blob: fdd994ee55e22c5f502a82106c9eae01438fc4a3 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0+
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * caam - Freescale FSL CAAM support for crypto API
4 *
5 * Copyright 2008-2011 Freescale Semiconductor, Inc.
David Brazdil0f672f62019-12-10 10:32:29 +00006 * Copyright 2016-2019 NXP
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 *
8 * Based on talitos crypto API driver.
9 *
10 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
11 *
12 * --------------- ---------------
13 * | JobDesc #1 |-------------------->| ShareDesc |
14 * | *(packet 1) | | (PDB) |
15 * --------------- |------------->| (hashKey) |
16 * . | | (cipherKey) |
17 * . | |-------->| (operation) |
18 * --------------- | | ---------------
19 * | JobDesc #2 |------| |
20 * | *(packet 2) | |
21 * --------------- |
22 * . |
23 * . |
24 * --------------- |
25 * | JobDesc #3 |------------
26 * | *(packet 3) |
27 * ---------------
28 *
29 * The SharedDesc never changes for a connection unless rekeyed, but
30 * each packet will likely be in a different place. So all we need
31 * to know to process the packet is where the input is, where the
32 * output goes, and what context we want to process with. Context is
33 * in the SharedDesc, packet references in the JobDesc.
34 *
35 * So, a job desc looks like:
36 *
37 * ---------------------
38 * | Header |
39 * | ShareDesc Pointer |
40 * | SEQ_OUT_PTR |
41 * | (output buffer) |
42 * | (output length) |
43 * | SEQ_IN_PTR |
44 * | (input buffer) |
45 * | (input length) |
46 * ---------------------
47 */
48
49#include "compat.h"
50
51#include "regs.h"
52#include "intern.h"
53#include "desc_constr.h"
54#include "jr.h"
55#include "error.h"
56#include "sg_sw_sec4.h"
57#include "key_gen.h"
58#include "caamalg_desc.h"
59
60/*
61 * crypto alg
62 */
63#define CAAM_CRA_PRIORITY 3000
64/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
65#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
66 CTR_RFC3686_NONCE_SIZE + \
67 SHA512_DIGEST_SIZE * 2)
68
69#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
70#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
71 CAAM_CMD_SZ * 4)
72#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
73 CAAM_CMD_SZ * 5)
74
David Brazdil0f672f62019-12-10 10:32:29 +000075#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
76
77#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
79
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080struct caam_alg_entry {
81 int class1_alg_type;
82 int class2_alg_type;
83 bool rfc3686;
84 bool geniv;
David Brazdil0f672f62019-12-10 10:32:29 +000085 bool nodkp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086};
87
88struct caam_aead_alg {
89 struct aead_alg aead;
90 struct caam_alg_entry caam;
91 bool registered;
92};
93
David Brazdil0f672f62019-12-10 10:32:29 +000094struct caam_skcipher_alg {
95 struct skcipher_alg skcipher;
96 struct caam_alg_entry caam;
97 bool registered;
98};
99
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100/*
101 * per-session context
102 */
103struct caam_ctx {
104 u32 sh_desc_enc[DESC_MAX_USED_LEN];
105 u32 sh_desc_dec[DESC_MAX_USED_LEN];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106 u8 key[CAAM_MAX_KEY_SIZE];
107 dma_addr_t sh_desc_enc_dma;
108 dma_addr_t sh_desc_dec_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109 dma_addr_t key_dma;
110 enum dma_data_direction dir;
111 struct device *jrdev;
112 struct alginfo adata;
113 struct alginfo cdata;
114 unsigned int authsize;
115};
116
117static int aead_null_set_sh_desc(struct crypto_aead *aead)
118{
119 struct caam_ctx *ctx = crypto_aead_ctx(aead);
120 struct device *jrdev = ctx->jrdev;
121 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
122 u32 *desc;
123 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
124 ctx->adata.keylen_pad;
125
126 /*
127 * Job Descriptor and Shared Descriptors
128 * must all fit into the 64-word Descriptor h/w Buffer
129 */
130 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
131 ctx->adata.key_inline = true;
132 ctx->adata.key_virt = ctx->key;
133 } else {
134 ctx->adata.key_inline = false;
135 ctx->adata.key_dma = ctx->key_dma;
136 }
137
138 /* aead_encrypt shared descriptor */
139 desc = ctx->sh_desc_enc;
140 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
141 ctrlpriv->era);
142 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
143 desc_bytes(desc), ctx->dir);
144
145 /*
146 * Job Descriptor and Shared Descriptors
147 * must all fit into the 64-word Descriptor h/w Buffer
148 */
149 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
150 ctx->adata.key_inline = true;
151 ctx->adata.key_virt = ctx->key;
152 } else {
153 ctx->adata.key_inline = false;
154 ctx->adata.key_dma = ctx->key_dma;
155 }
156
157 /* aead_decrypt shared descriptor */
158 desc = ctx->sh_desc_dec;
159 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
160 ctrlpriv->era);
161 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
162 desc_bytes(desc), ctx->dir);
163
164 return 0;
165}
166
167static int aead_set_sh_desc(struct crypto_aead *aead)
168{
169 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 struct caam_aead_alg, aead);
171 unsigned int ivsize = crypto_aead_ivsize(aead);
172 struct caam_ctx *ctx = crypto_aead_ctx(aead);
173 struct device *jrdev = ctx->jrdev;
174 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
175 u32 ctx1_iv_off = 0;
176 u32 *desc, *nonce = NULL;
177 u32 inl_mask;
178 unsigned int data_len[2];
179 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
180 OP_ALG_AAI_CTR_MOD128);
181 const bool is_rfc3686 = alg->caam.rfc3686;
182
183 if (!ctx->authsize)
184 return 0;
185
186 /* NULL encryption / decryption */
187 if (!ctx->cdata.keylen)
188 return aead_null_set_sh_desc(aead);
189
190 /*
191 * AES-CTR needs to load IV in CONTEXT1 reg
192 * at an offset of 128bits (16bytes)
193 * CONTEXT1[255:128] = IV
194 */
195 if (ctr_mode)
196 ctx1_iv_off = 16;
197
198 /*
199 * RFC3686 specific:
200 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
201 */
202 if (is_rfc3686) {
203 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
204 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
205 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
206 }
207
David Brazdil0f672f62019-12-10 10:32:29 +0000208 /*
209 * In case |user key| > |derived key|, using DKP<imm,imm>
210 * would result in invalid opcodes (last bytes of user key) in
211 * the resulting descriptor. Use DKP<ptr,imm> instead => both
212 * virtual and dma key addresses are needed.
213 */
214 ctx->adata.key_virt = ctx->key;
215 ctx->adata.key_dma = ctx->key_dma;
216
217 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
218 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
219
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 data_len[0] = ctx->adata.keylen_pad;
221 data_len[1] = ctx->cdata.keylen;
222
223 if (alg->caam.geniv)
224 goto skip_enc;
225
226 /*
227 * Job Descriptor and Shared Descriptors
228 * must all fit into the 64-word Descriptor h/w Buffer
229 */
230 if (desc_inline_query(DESC_AEAD_ENC_LEN +
231 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
232 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
233 ARRAY_SIZE(data_len)) < 0)
234 return -EINVAL;
235
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 ctx->adata.key_inline = !!(inl_mask & 1);
237 ctx->cdata.key_inline = !!(inl_mask & 2);
238
239 /* aead_encrypt shared descriptor */
240 desc = ctx->sh_desc_enc;
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
242 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
243 false, ctrlpriv->era);
244 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
245 desc_bytes(desc), ctx->dir);
246
247skip_enc:
248 /*
249 * Job Descriptor and Shared Descriptors
250 * must all fit into the 64-word Descriptor h/w Buffer
251 */
252 if (desc_inline_query(DESC_AEAD_DEC_LEN +
253 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
254 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
255 ARRAY_SIZE(data_len)) < 0)
256 return -EINVAL;
257
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 ctx->adata.key_inline = !!(inl_mask & 1);
259 ctx->cdata.key_inline = !!(inl_mask & 2);
260
261 /* aead_decrypt shared descriptor */
262 desc = ctx->sh_desc_dec;
263 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
264 ctx->authsize, alg->caam.geniv, is_rfc3686,
265 nonce, ctx1_iv_off, false, ctrlpriv->era);
266 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
267 desc_bytes(desc), ctx->dir);
268
269 if (!alg->caam.geniv)
270 goto skip_givenc;
271
272 /*
273 * Job Descriptor and Shared Descriptors
274 * must all fit into the 64-word Descriptor h/w Buffer
275 */
276 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
277 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
278 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
279 ARRAY_SIZE(data_len)) < 0)
280 return -EINVAL;
281
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 ctx->adata.key_inline = !!(inl_mask & 1);
283 ctx->cdata.key_inline = !!(inl_mask & 2);
284
285 /* aead_givencrypt shared descriptor */
286 desc = ctx->sh_desc_enc;
287 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
288 ctx->authsize, is_rfc3686, nonce,
289 ctx1_iv_off, false, ctrlpriv->era);
290 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
291 desc_bytes(desc), ctx->dir);
292
293skip_givenc:
294 return 0;
295}
296
297static int aead_setauthsize(struct crypto_aead *authenc,
298 unsigned int authsize)
299{
300 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
301
302 ctx->authsize = authsize;
303 aead_set_sh_desc(authenc);
304
305 return 0;
306}
307
308static int gcm_set_sh_desc(struct crypto_aead *aead)
309{
310 struct caam_ctx *ctx = crypto_aead_ctx(aead);
311 struct device *jrdev = ctx->jrdev;
312 unsigned int ivsize = crypto_aead_ivsize(aead);
313 u32 *desc;
314 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
315 ctx->cdata.keylen;
316
317 if (!ctx->cdata.keylen || !ctx->authsize)
318 return 0;
319
320 /*
321 * AES GCM encrypt shared descriptor
322 * Job Descriptor and Shared Descriptor
323 * must fit into the 64-word Descriptor h/w Buffer
324 */
325 if (rem_bytes >= DESC_GCM_ENC_LEN) {
326 ctx->cdata.key_inline = true;
327 ctx->cdata.key_virt = ctx->key;
328 } else {
329 ctx->cdata.key_inline = false;
330 ctx->cdata.key_dma = ctx->key_dma;
331 }
332
333 desc = ctx->sh_desc_enc;
334 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
335 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
336 desc_bytes(desc), ctx->dir);
337
338 /*
339 * Job Descriptor and Shared Descriptors
340 * must all fit into the 64-word Descriptor h/w Buffer
341 */
342 if (rem_bytes >= DESC_GCM_DEC_LEN) {
343 ctx->cdata.key_inline = true;
344 ctx->cdata.key_virt = ctx->key;
345 } else {
346 ctx->cdata.key_inline = false;
347 ctx->cdata.key_dma = ctx->key_dma;
348 }
349
350 desc = ctx->sh_desc_dec;
351 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
352 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
353 desc_bytes(desc), ctx->dir);
354
355 return 0;
356}
357
358static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
359{
360 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
David Brazdil0f672f62019-12-10 10:32:29 +0000361 int err;
362
363 err = crypto_gcm_check_authsize(authsize);
364 if (err)
365 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366
367 ctx->authsize = authsize;
368 gcm_set_sh_desc(authenc);
369
370 return 0;
371}
372
373static int rfc4106_set_sh_desc(struct crypto_aead *aead)
374{
375 struct caam_ctx *ctx = crypto_aead_ctx(aead);
376 struct device *jrdev = ctx->jrdev;
377 unsigned int ivsize = crypto_aead_ivsize(aead);
378 u32 *desc;
379 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
380 ctx->cdata.keylen;
381
382 if (!ctx->cdata.keylen || !ctx->authsize)
383 return 0;
384
385 /*
386 * RFC4106 encrypt shared descriptor
387 * Job Descriptor and Shared Descriptor
388 * must fit into the 64-word Descriptor h/w Buffer
389 */
390 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
391 ctx->cdata.key_inline = true;
392 ctx->cdata.key_virt = ctx->key;
393 } else {
394 ctx->cdata.key_inline = false;
395 ctx->cdata.key_dma = ctx->key_dma;
396 }
397
398 desc = ctx->sh_desc_enc;
399 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
400 false);
401 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
402 desc_bytes(desc), ctx->dir);
403
404 /*
405 * Job Descriptor and Shared Descriptors
406 * must all fit into the 64-word Descriptor h/w Buffer
407 */
408 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
409 ctx->cdata.key_inline = true;
410 ctx->cdata.key_virt = ctx->key;
411 } else {
412 ctx->cdata.key_inline = false;
413 ctx->cdata.key_dma = ctx->key_dma;
414 }
415
416 desc = ctx->sh_desc_dec;
417 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
418 false);
419 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
420 desc_bytes(desc), ctx->dir);
421
422 return 0;
423}
424
425static int rfc4106_setauthsize(struct crypto_aead *authenc,
426 unsigned int authsize)
427{
428 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
David Brazdil0f672f62019-12-10 10:32:29 +0000429 int err;
430
431 err = crypto_rfc4106_check_authsize(authsize);
432 if (err)
433 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434
435 ctx->authsize = authsize;
436 rfc4106_set_sh_desc(authenc);
437
438 return 0;
439}
440
441static int rfc4543_set_sh_desc(struct crypto_aead *aead)
442{
443 struct caam_ctx *ctx = crypto_aead_ctx(aead);
444 struct device *jrdev = ctx->jrdev;
445 unsigned int ivsize = crypto_aead_ivsize(aead);
446 u32 *desc;
447 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
448 ctx->cdata.keylen;
449
450 if (!ctx->cdata.keylen || !ctx->authsize)
451 return 0;
452
453 /*
454 * RFC4543 encrypt shared descriptor
455 * Job Descriptor and Shared Descriptor
456 * must fit into the 64-word Descriptor h/w Buffer
457 */
458 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
459 ctx->cdata.key_inline = true;
460 ctx->cdata.key_virt = ctx->key;
461 } else {
462 ctx->cdata.key_inline = false;
463 ctx->cdata.key_dma = ctx->key_dma;
464 }
465
466 desc = ctx->sh_desc_enc;
467 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
468 false);
469 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
470 desc_bytes(desc), ctx->dir);
471
472 /*
473 * Job Descriptor and Shared Descriptors
474 * must all fit into the 64-word Descriptor h/w Buffer
475 */
476 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
477 ctx->cdata.key_inline = true;
478 ctx->cdata.key_virt = ctx->key;
479 } else {
480 ctx->cdata.key_inline = false;
481 ctx->cdata.key_dma = ctx->key_dma;
482 }
483
484 desc = ctx->sh_desc_dec;
485 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
486 false);
487 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
488 desc_bytes(desc), ctx->dir);
489
490 return 0;
491}
492
493static int rfc4543_setauthsize(struct crypto_aead *authenc,
494 unsigned int authsize)
495{
496 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
497
David Brazdil0f672f62019-12-10 10:32:29 +0000498 if (authsize != 16)
499 return -EINVAL;
500
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501 ctx->authsize = authsize;
502 rfc4543_set_sh_desc(authenc);
503
504 return 0;
505}
506
David Brazdil0f672f62019-12-10 10:32:29 +0000507static int chachapoly_set_sh_desc(struct crypto_aead *aead)
508{
509 struct caam_ctx *ctx = crypto_aead_ctx(aead);
510 struct device *jrdev = ctx->jrdev;
511 unsigned int ivsize = crypto_aead_ivsize(aead);
512 u32 *desc;
513
514 if (!ctx->cdata.keylen || !ctx->authsize)
515 return 0;
516
517 desc = ctx->sh_desc_enc;
518 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
519 ctx->authsize, true, false);
520 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
521 desc_bytes(desc), ctx->dir);
522
523 desc = ctx->sh_desc_dec;
524 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
525 ctx->authsize, false, false);
526 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
527 desc_bytes(desc), ctx->dir);
528
529 return 0;
530}
531
532static int chachapoly_setauthsize(struct crypto_aead *aead,
533 unsigned int authsize)
534{
535 struct caam_ctx *ctx = crypto_aead_ctx(aead);
536
537 if (authsize != POLY1305_DIGEST_SIZE)
538 return -EINVAL;
539
540 ctx->authsize = authsize;
541 return chachapoly_set_sh_desc(aead);
542}
543
544static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
545 unsigned int keylen)
546{
547 struct caam_ctx *ctx = crypto_aead_ctx(aead);
548 unsigned int ivsize = crypto_aead_ivsize(aead);
549 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
550
551 if (keylen != CHACHA_KEY_SIZE + saltlen) {
552 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 return -EINVAL;
554 }
555
556 ctx->cdata.key_virt = key;
557 ctx->cdata.keylen = keylen - saltlen;
558
559 return chachapoly_set_sh_desc(aead);
560}
561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562static int aead_setkey(struct crypto_aead *aead,
563 const u8 *key, unsigned int keylen)
564{
565 struct caam_ctx *ctx = crypto_aead_ctx(aead);
566 struct device *jrdev = ctx->jrdev;
567 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
568 struct crypto_authenc_keys keys;
569 int ret = 0;
570
571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
572 goto badkey;
573
David Brazdil0f672f62019-12-10 10:32:29 +0000574 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 keys.authkeylen + keys.enckeylen, keys.enckeylen,
576 keys.authkeylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000577 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
578 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579
580 /*
581 * If DKP is supported, use it in the shared descriptor to generate
582 * the split key.
583 */
584 if (ctrlpriv->era >= 6) {
585 ctx->adata.keylen = keys.authkeylen;
586 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
587 OP_ALG_ALGSEL_MASK);
588
589 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
590 goto badkey;
591
592 memcpy(ctx->key, keys.authkey, keys.authkeylen);
593 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
594 keys.enckeylen);
595 dma_sync_single_for_device(jrdev, ctx->key_dma,
596 ctx->adata.keylen_pad +
597 keys.enckeylen, ctx->dir);
598 goto skip_split_key;
599 }
600
601 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
602 keys.authkeylen, CAAM_MAX_KEY_SIZE -
603 keys.enckeylen);
604 if (ret) {
605 goto badkey;
606 }
607
608 /* postpend encryption key to auth split key */
609 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
610 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
611 keys.enckeylen, ctx->dir);
David Brazdil0f672f62019-12-10 10:32:29 +0000612
613 print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
614 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
615 ctx->adata.keylen_pad + keys.enckeylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616
617skip_split_key:
618 ctx->cdata.keylen = keys.enckeylen;
619 memzero_explicit(&keys, sizeof(keys));
620 return aead_set_sh_desc(aead);
621badkey:
622 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
623 memzero_explicit(&keys, sizeof(keys));
624 return -EINVAL;
625}
626
David Brazdil0f672f62019-12-10 10:32:29 +0000627static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
628 unsigned int keylen)
629{
630 struct crypto_authenc_keys keys;
631 int err;
632
633 err = crypto_authenc_extractkeys(&keys, key, keylen);
634 if (unlikely(err))
635 return err;
636
637 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
638 aead_setkey(aead, key, keylen);
639
640 memzero_explicit(&keys, sizeof(keys));
641 return err;
642}
643
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644static int gcm_setkey(struct crypto_aead *aead,
645 const u8 *key, unsigned int keylen)
646{
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000649 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650
David Brazdil0f672f62019-12-10 10:32:29 +0000651 err = aes_check_keylen(keylen);
652 if (err) {
653 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
654 return err;
655 }
656
657 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
658 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000659
660 memcpy(ctx->key, key, keylen);
661 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
662 ctx->cdata.keylen = keylen;
663
664 return gcm_set_sh_desc(aead);
665}
666
667static int rfc4106_setkey(struct crypto_aead *aead,
668 const u8 *key, unsigned int keylen)
669{
670 struct caam_ctx *ctx = crypto_aead_ctx(aead);
671 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000672 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673
David Brazdil0f672f62019-12-10 10:32:29 +0000674 err = aes_check_keylen(keylen - 4);
675 if (err) {
676 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
677 return err;
678 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679
David Brazdil0f672f62019-12-10 10:32:29 +0000680 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
681 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000682
683 memcpy(ctx->key, key, keylen);
684
685 /*
686 * The last four bytes of the key material are used as the salt value
687 * in the nonce. Update the AES key length.
688 */
689 ctx->cdata.keylen = keylen - 4;
690 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
691 ctx->dir);
692 return rfc4106_set_sh_desc(aead);
693}
694
695static int rfc4543_setkey(struct crypto_aead *aead,
696 const u8 *key, unsigned int keylen)
697{
698 struct caam_ctx *ctx = crypto_aead_ctx(aead);
699 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000700 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701
David Brazdil0f672f62019-12-10 10:32:29 +0000702 err = aes_check_keylen(keylen - 4);
703 if (err) {
704 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
705 return err;
706 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000707
David Brazdil0f672f62019-12-10 10:32:29 +0000708 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
709 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710
711 memcpy(ctx->key, key, keylen);
712
713 /*
714 * The last four bytes of the key material are used as the salt value
715 * in the nonce. Update the AES key length.
716 */
717 ctx->cdata.keylen = keylen - 4;
718 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
719 ctx->dir);
720 return rfc4543_set_sh_desc(aead);
721}
722
David Brazdil0f672f62019-12-10 10:32:29 +0000723static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
724 unsigned int keylen, const u32 ctx1_iv_off)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725{
David Brazdil0f672f62019-12-10 10:32:29 +0000726 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
727 struct caam_skcipher_alg *alg =
728 container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
729 skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 struct device *jrdev = ctx->jrdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000731 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732 u32 *desc;
David Brazdil0f672f62019-12-10 10:32:29 +0000733 const bool is_rfc3686 = alg->caam.rfc3686;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000734
David Brazdil0f672f62019-12-10 10:32:29 +0000735 print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
736 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
737
738 ctx->cdata.keylen = keylen;
739 ctx->cdata.key_virt = key;
740 ctx->cdata.key_inline = true;
741
742 /* skcipher_encrypt shared descriptor */
743 desc = ctx->sh_desc_enc;
744 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
745 ctx1_iv_off);
746 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
747 desc_bytes(desc), ctx->dir);
748
749 /* skcipher_decrypt shared descriptor */
750 desc = ctx->sh_desc_dec;
751 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
752 ctx1_iv_off);
753 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
754 desc_bytes(desc), ctx->dir);
755
756 return 0;
757}
758
759static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
760 const u8 *key, unsigned int keylen)
761{
762 int err;
763
764 err = aes_check_keylen(keylen);
765 if (err) {
766 crypto_skcipher_set_flags(skcipher,
767 CRYPTO_TFM_RES_BAD_KEY_LEN);
768 return err;
769 }
770
771 return skcipher_setkey(skcipher, key, keylen, 0);
772}
773
774static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
775 const u8 *key, unsigned int keylen)
776{
777 u32 ctx1_iv_off;
778 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779
780 /*
781 * RFC3686 specific:
782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
783 * | *key = {KEY, NONCE}
784 */
David Brazdil0f672f62019-12-10 10:32:29 +0000785 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
786 keylen -= CTR_RFC3686_NONCE_SIZE;
787
788 err = aes_check_keylen(keylen);
789 if (err) {
790 crypto_skcipher_set_flags(skcipher,
791 CRYPTO_TFM_RES_BAD_KEY_LEN);
792 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000793 }
794
David Brazdil0f672f62019-12-10 10:32:29 +0000795 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796}
797
David Brazdil0f672f62019-12-10 10:32:29 +0000798static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
799 const u8 *key, unsigned int keylen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800{
David Brazdil0f672f62019-12-10 10:32:29 +0000801 u32 ctx1_iv_off;
802 int err;
803
804 /*
805 * AES-CTR needs to load IV in CONTEXT1 reg
806 * at an offset of 128bits (16bytes)
807 * CONTEXT1[255:128] = IV
808 */
809 ctx1_iv_off = 16;
810
811 err = aes_check_keylen(keylen);
812 if (err) {
813 crypto_skcipher_set_flags(skcipher,
814 CRYPTO_TFM_RES_BAD_KEY_LEN);
815 return err;
816 }
817
818 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
819}
820
David Brazdil0f672f62019-12-10 10:32:29 +0000821static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
822 const u8 *key, unsigned int keylen)
823{
824 return verify_skcipher_des_key(skcipher, key) ?:
825 skcipher_setkey(skcipher, key, keylen, 0);
826}
827
828static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
829 const u8 *key, unsigned int keylen)
830{
831 return verify_skcipher_des3_key(skcipher, key) ?:
832 skcipher_setkey(skcipher, key, keylen, 0);
833}
834
835static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
836 unsigned int keylen)
837{
838 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839 struct device *jrdev = ctx->jrdev;
840 u32 *desc;
841
842 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +0000843 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000844 dev_err(jrdev, "key size mismatch\n");
845 return -EINVAL;
846 }
847
848 ctx->cdata.keylen = keylen;
849 ctx->cdata.key_virt = key;
850 ctx->cdata.key_inline = true;
851
David Brazdil0f672f62019-12-10 10:32:29 +0000852 /* xts_skcipher_encrypt shared descriptor */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000853 desc = ctx->sh_desc_enc;
David Brazdil0f672f62019-12-10 10:32:29 +0000854 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000855 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
856 desc_bytes(desc), ctx->dir);
857
David Brazdil0f672f62019-12-10 10:32:29 +0000858 /* xts_skcipher_decrypt shared descriptor */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 desc = ctx->sh_desc_dec;
David Brazdil0f672f62019-12-10 10:32:29 +0000860 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000861 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
862 desc_bytes(desc), ctx->dir);
863
864 return 0;
865}
866
867/*
868 * aead_edesc - s/w-extended aead descriptor
869 * @src_nents: number of segments in input s/w scatterlist
870 * @dst_nents: number of segments in output s/w scatterlist
David Brazdil0f672f62019-12-10 10:32:29 +0000871 * @mapped_src_nents: number of segments in input h/w link table
872 * @mapped_dst_nents: number of segments in output h/w link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 * @sec4_sg_bytes: length of dma mapped sec4_sg space
874 * @sec4_sg_dma: bus physical mapped address of h/w link table
875 * @sec4_sg: pointer to h/w link table
876 * @hw_desc: the h/w job descriptor followed by any referenced link tables
877 */
878struct aead_edesc {
879 int src_nents;
880 int dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +0000881 int mapped_src_nents;
882 int mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000883 int sec4_sg_bytes;
884 dma_addr_t sec4_sg_dma;
885 struct sec4_sg_entry *sec4_sg;
886 u32 hw_desc[];
887};
888
889/*
David Brazdil0f672f62019-12-10 10:32:29 +0000890 * skcipher_edesc - s/w-extended skcipher descriptor
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891 * @src_nents: number of segments in input s/w scatterlist
892 * @dst_nents: number of segments in output s/w scatterlist
David Brazdil0f672f62019-12-10 10:32:29 +0000893 * @mapped_src_nents: number of segments in input h/w link table
894 * @mapped_dst_nents: number of segments in output h/w link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 * @iv_dma: dma address of iv for checking continuity and link table
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000896 * @sec4_sg_bytes: length of dma mapped sec4_sg space
897 * @sec4_sg_dma: bus physical mapped address of h/w link table
898 * @sec4_sg: pointer to h/w link table
899 * @hw_desc: the h/w job descriptor followed by any referenced link tables
900 * and IV
901 */
David Brazdil0f672f62019-12-10 10:32:29 +0000902struct skcipher_edesc {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000903 int src_nents;
904 int dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +0000905 int mapped_src_nents;
906 int mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000907 dma_addr_t iv_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000908 int sec4_sg_bytes;
909 dma_addr_t sec4_sg_dma;
910 struct sec4_sg_entry *sec4_sg;
911 u32 hw_desc[0];
912};
913
914static void caam_unmap(struct device *dev, struct scatterlist *src,
915 struct scatterlist *dst, int src_nents,
916 int dst_nents,
David Brazdil0f672f62019-12-10 10:32:29 +0000917 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000918 int sec4_sg_bytes)
919{
920 if (dst != src) {
921 if (src_nents)
922 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
David Brazdil0f672f62019-12-10 10:32:29 +0000923 if (dst_nents)
924 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000925 } else {
926 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
927 }
928
929 if (iv_dma)
David Brazdil0f672f62019-12-10 10:32:29 +0000930 dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931 if (sec4_sg_bytes)
932 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
933 DMA_TO_DEVICE);
934}
935
936static void aead_unmap(struct device *dev,
937 struct aead_edesc *edesc,
938 struct aead_request *req)
939{
940 caam_unmap(dev, req->src, req->dst,
David Brazdil0f672f62019-12-10 10:32:29 +0000941 edesc->src_nents, edesc->dst_nents, 0, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000942 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
943}
944
David Brazdil0f672f62019-12-10 10:32:29 +0000945static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
946 struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000947{
David Brazdil0f672f62019-12-10 10:32:29 +0000948 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
949 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000950
951 caam_unmap(dev, req->src, req->dst,
952 edesc->src_nents, edesc->dst_nents,
David Brazdil0f672f62019-12-10 10:32:29 +0000953 edesc->iv_dma, ivsize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000954 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
955}
956
957static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
958 void *context)
959{
960 struct aead_request *req = context;
961 struct aead_edesc *edesc;
David Brazdil0f672f62019-12-10 10:32:29 +0000962 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963
David Brazdil0f672f62019-12-10 10:32:29 +0000964 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000965
966 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
967
968 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000969 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000970
971 aead_unmap(jrdev, edesc, req);
972
973 kfree(edesc);
974
David Brazdil0f672f62019-12-10 10:32:29 +0000975 aead_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000976}
977
978static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
979 void *context)
980{
981 struct aead_request *req = context;
982 struct aead_edesc *edesc;
David Brazdil0f672f62019-12-10 10:32:29 +0000983 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000984
David Brazdil0f672f62019-12-10 10:32:29 +0000985 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986
987 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
988
989 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000990 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991
992 aead_unmap(jrdev, edesc, req);
993
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000994 kfree(edesc);
995
David Brazdil0f672f62019-12-10 10:32:29 +0000996 aead_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000997}
998
David Brazdil0f672f62019-12-10 10:32:29 +0000999static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1000 void *context)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001001{
David Brazdil0f672f62019-12-10 10:32:29 +00001002 struct skcipher_request *req = context;
1003 struct skcipher_edesc *edesc;
1004 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1005 int ivsize = crypto_skcipher_ivsize(skcipher);
1006 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001007
David Brazdil0f672f62019-12-10 10:32:29 +00001008 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001009
David Brazdil0f672f62019-12-10 10:32:29 +00001010 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011
1012 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001013 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001014
David Brazdil0f672f62019-12-10 10:32:29 +00001015 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016
1017 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001018 * The crypto API expects us to set the IV (req->iv) to the last
1019 * ciphertext block (CBC mode) or last counter (CTR mode).
1020 * This is used e.g. by the CTS mode.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001021 */
David Brazdil0f672f62019-12-10 10:32:29 +00001022 if (ivsize && !ecode) {
1023 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1024 ivsize);
1025 print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
1026 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1027 edesc->src_nents > 1 ? 100 : ivsize, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001028 }
1029
David Brazdil0f672f62019-12-10 10:32:29 +00001030 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1031 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1032 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1033
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001034 kfree(edesc);
1035
David Brazdil0f672f62019-12-10 10:32:29 +00001036 skcipher_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001037}
1038
David Brazdil0f672f62019-12-10 10:32:29 +00001039static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1040 void *context)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041{
David Brazdil0f672f62019-12-10 10:32:29 +00001042 struct skcipher_request *req = context;
1043 struct skcipher_edesc *edesc;
1044 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1045 int ivsize = crypto_skcipher_ivsize(skcipher);
1046 int ecode = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001047
David Brazdil0f672f62019-12-10 10:32:29 +00001048 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001049
David Brazdil0f672f62019-12-10 10:32:29 +00001050 edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001051 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001052 ecode = caam_jr_strstatus(jrdev, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001053
David Brazdil0f672f62019-12-10 10:32:29 +00001054 skcipher_unmap(jrdev, edesc, req);
1055
1056 /*
1057 * The crypto API expects us to set the IV (req->iv) to the last
1058 * ciphertext block (CBC mode) or last counter (CTR mode).
1059 * This is used e.g. by the CTS mode.
1060 */
1061 if (ivsize && !ecode) {
1062 memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
1063 ivsize);
1064
1065 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1066 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1067 ivsize, 1);
1068 }
1069
1070 caam_dump_sg("dst @" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
David Brazdil0f672f62019-12-10 10:32:29 +00001072 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001073
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001074 kfree(edesc);
1075
David Brazdil0f672f62019-12-10 10:32:29 +00001076 skcipher_request_complete(req, ecode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077}
1078
1079/*
1080 * Fill in aead job descriptor
1081 */
1082static void init_aead_job(struct aead_request *req,
1083 struct aead_edesc *edesc,
1084 bool all_contig, bool encrypt)
1085{
1086 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1087 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1088 int authsize = ctx->authsize;
1089 u32 *desc = edesc->hw_desc;
1090 u32 out_options, in_options;
1091 dma_addr_t dst_dma, src_dma;
1092 int len, sec4_sg_index = 0;
1093 dma_addr_t ptr;
1094 u32 *sh_desc;
1095
1096 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1097 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1098
1099 len = desc_len(sh_desc);
1100 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1101
1102 if (all_contig) {
David Brazdil0f672f62019-12-10 10:32:29 +00001103 src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
1104 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001105 in_options = 0;
1106 } else {
1107 src_dma = edesc->sec4_sg_dma;
David Brazdil0f672f62019-12-10 10:32:29 +00001108 sec4_sg_index += edesc->mapped_src_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001109 in_options = LDST_SGF;
1110 }
1111
1112 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1113 in_options);
1114
1115 dst_dma = src_dma;
1116 out_options = in_options;
1117
1118 if (unlikely(req->src != req->dst)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001119 if (!edesc->mapped_dst_nents) {
1120 dst_dma = 0;
1121 out_options = 0;
1122 } else if (edesc->mapped_dst_nents == 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123 dst_dma = sg_dma_address(req->dst);
David Brazdil0f672f62019-12-10 10:32:29 +00001124 out_options = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125 } else {
1126 dst_dma = edesc->sec4_sg_dma +
1127 sec4_sg_index *
1128 sizeof(struct sec4_sg_entry);
1129 out_options = LDST_SGF;
1130 }
1131 }
1132
1133 if (encrypt)
1134 append_seq_out_ptr(desc, dst_dma,
1135 req->assoclen + req->cryptlen + authsize,
1136 out_options);
1137 else
1138 append_seq_out_ptr(desc, dst_dma,
1139 req->assoclen + req->cryptlen - authsize,
1140 out_options);
1141}
1142
1143static void init_gcm_job(struct aead_request *req,
1144 struct aead_edesc *edesc,
1145 bool all_contig, bool encrypt)
1146{
1147 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1148 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 unsigned int ivsize = crypto_aead_ivsize(aead);
1150 u32 *desc = edesc->hw_desc;
1151 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1152 unsigned int last;
1153
1154 init_aead_job(req, edesc, all_contig, encrypt);
1155 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1156
1157 /* BUG This should not be specific to generic GCM. */
1158 last = 0;
1159 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1160 last = FIFOLD_TYPE_LAST1;
1161
1162 /* Read GCM IV */
1163 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1164 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1165 /* Append Salt */
1166 if (!generic_gcm)
1167 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1168 /* Append IV */
1169 append_data(desc, req->iv, ivsize);
1170 /* End of blank commands */
1171}
1172
David Brazdil0f672f62019-12-10 10:32:29 +00001173static void init_chachapoly_job(struct aead_request *req,
1174 struct aead_edesc *edesc, bool all_contig,
1175 bool encrypt)
1176{
1177 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1178 unsigned int ivsize = crypto_aead_ivsize(aead);
1179 unsigned int assoclen = req->assoclen;
1180 u32 *desc = edesc->hw_desc;
1181 u32 ctx_iv_off = 4;
1182
1183 init_aead_job(req, edesc, all_contig, encrypt);
1184
1185 if (ivsize != CHACHAPOLY_IV_SIZE) {
1186 /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1187 ctx_iv_off += 4;
1188
1189 /*
1190 * The associated data comes already with the IV but we need
1191 * to skip it when we authenticate or encrypt...
1192 */
1193 assoclen -= ivsize;
1194 }
1195
1196 append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1197
1198 /*
1199 * For IPsec load the IV further in the same register.
1200 * For RFC7539 simply load the 12 bytes nonce in a single operation
1201 */
1202 append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1203 LDST_SRCDST_BYTE_CONTEXT |
1204 ctx_iv_off << LDST_OFFSET_SHIFT);
1205}
1206
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207static void init_authenc_job(struct aead_request *req,
1208 struct aead_edesc *edesc,
1209 bool all_contig, bool encrypt)
1210{
1211 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1212 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1213 struct caam_aead_alg, aead);
1214 unsigned int ivsize = crypto_aead_ivsize(aead);
1215 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1216 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1217 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1218 OP_ALG_AAI_CTR_MOD128);
1219 const bool is_rfc3686 = alg->caam.rfc3686;
1220 u32 *desc = edesc->hw_desc;
1221 u32 ivoffset = 0;
1222
1223 /*
1224 * AES-CTR needs to load IV in CONTEXT1 reg
1225 * at an offset of 128bits (16bytes)
1226 * CONTEXT1[255:128] = IV
1227 */
1228 if (ctr_mode)
1229 ivoffset = 16;
1230
1231 /*
1232 * RFC3686 specific:
1233 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1234 */
1235 if (is_rfc3686)
1236 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1237
1238 init_aead_job(req, edesc, all_contig, encrypt);
1239
1240 /*
1241 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1242 * having DPOVRD as destination.
1243 */
1244 if (ctrlpriv->era < 3)
1245 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1246 else
1247 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1248
1249 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1250 append_load_as_imm(desc, req->iv, ivsize,
1251 LDST_CLASS_1_CCB |
1252 LDST_SRCDST_BYTE_CONTEXT |
1253 (ivoffset << LDST_OFFSET_SHIFT));
1254}
1255
1256/*
David Brazdil0f672f62019-12-10 10:32:29 +00001257 * Fill in skcipher job descriptor
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001258 */
David Brazdil0f672f62019-12-10 10:32:29 +00001259static void init_skcipher_job(struct skcipher_request *req,
1260 struct skcipher_edesc *edesc,
1261 const bool encrypt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262{
David Brazdil0f672f62019-12-10 10:32:29 +00001263 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1264 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1265 struct device *jrdev = ctx->jrdev;
1266 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001267 u32 *desc = edesc->hw_desc;
David Brazdil0f672f62019-12-10 10:32:29 +00001268 u32 *sh_desc;
1269 u32 in_options = 0, out_options = 0;
1270 dma_addr_t src_dma, dst_dma, ptr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271 int len, sec4_sg_index = 0;
1272
David Brazdil0f672f62019-12-10 10:32:29 +00001273 print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
1274 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1275 dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
1276 (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
1277
1278 caam_dump_sg("src @" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001279 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
David Brazdil0f672f62019-12-10 10:32:29 +00001280 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1281
1282 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1283 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001284
1285 len = desc_len(sh_desc);
1286 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1287
David Brazdil0f672f62019-12-10 10:32:29 +00001288 if (ivsize || edesc->mapped_src_nents > 1) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289 src_dma = edesc->sec4_sg_dma;
David Brazdil0f672f62019-12-10 10:32:29 +00001290 sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001291 in_options = LDST_SGF;
David Brazdil0f672f62019-12-10 10:32:29 +00001292 } else {
1293 src_dma = sg_dma_address(req->src);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001294 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001295
David Brazdil0f672f62019-12-10 10:32:29 +00001296 append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
1297
1298 if (likely(req->src == req->dst)) {
1299 dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
1300 out_options = in_options;
1301 } else if (!ivsize && edesc->mapped_dst_nents == 1) {
1302 dst_dma = sg_dma_address(req->dst);
1303 } else {
1304 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1305 sizeof(struct sec4_sg_entry);
1306 out_options = LDST_SGF;
1307 }
1308
1309 append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001310}
1311
1312/*
1313 * allocate and map the aead extended descriptor
1314 */
1315static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1316 int desc_bytes, bool *all_contig_ptr,
1317 bool encrypt)
1318{
1319 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1320 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1321 struct device *jrdev = ctx->jrdev;
1322 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1323 GFP_KERNEL : GFP_ATOMIC;
1324 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001325 int src_len, dst_len = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001326 struct aead_edesc *edesc;
1327 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1328 unsigned int authsize = ctx->authsize;
1329
1330 if (unlikely(req->dst != req->src)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001331 src_len = req->assoclen + req->cryptlen;
1332 dst_len = src_len + (encrypt ? authsize : (-authsize));
1333
1334 src_nents = sg_nents_for_len(req->src, src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001335 if (unlikely(src_nents < 0)) {
1336 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001337 src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001338 return ERR_PTR(src_nents);
1339 }
1340
David Brazdil0f672f62019-12-10 10:32:29 +00001341 dst_nents = sg_nents_for_len(req->dst, dst_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001342 if (unlikely(dst_nents < 0)) {
1343 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001344 dst_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001345 return ERR_PTR(dst_nents);
1346 }
1347 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001348 src_len = req->assoclen + req->cryptlen +
1349 (encrypt ? authsize : 0);
1350
1351 src_nents = sg_nents_for_len(req->src, src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001352 if (unlikely(src_nents < 0)) {
1353 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001354 src_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001355 return ERR_PTR(src_nents);
1356 }
1357 }
1358
1359 if (likely(req->src == req->dst)) {
1360 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1361 DMA_BIDIRECTIONAL);
1362 if (unlikely(!mapped_src_nents)) {
1363 dev_err(jrdev, "unable to map source\n");
1364 return ERR_PTR(-ENOMEM);
1365 }
1366 } else {
1367 /* Cover also the case of null (zero length) input data */
1368 if (src_nents) {
1369 mapped_src_nents = dma_map_sg(jrdev, req->src,
1370 src_nents, DMA_TO_DEVICE);
1371 if (unlikely(!mapped_src_nents)) {
1372 dev_err(jrdev, "unable to map source\n");
1373 return ERR_PTR(-ENOMEM);
1374 }
1375 } else {
1376 mapped_src_nents = 0;
1377 }
1378
David Brazdil0f672f62019-12-10 10:32:29 +00001379 /* Cover also the case of null (zero length) output data */
1380 if (dst_nents) {
1381 mapped_dst_nents = dma_map_sg(jrdev, req->dst,
1382 dst_nents,
1383 DMA_FROM_DEVICE);
1384 if (unlikely(!mapped_dst_nents)) {
1385 dev_err(jrdev, "unable to map destination\n");
1386 dma_unmap_sg(jrdev, req->src, src_nents,
1387 DMA_TO_DEVICE);
1388 return ERR_PTR(-ENOMEM);
1389 }
1390 } else {
1391 mapped_dst_nents = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001392 }
1393 }
1394
David Brazdil0f672f62019-12-10 10:32:29 +00001395 /*
1396 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1397 * the end of the table by allocating more S/G entries.
1398 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001399 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001400 if (mapped_dst_nents > 1)
1401 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
1402 else
1403 sec4_sg_len = pad_sg_nents(sec4_sg_len);
1404
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001405 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1406
1407 /* allocate space for base edesc and hw desc commands, link tables */
1408 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1409 GFP_DMA | flags);
1410 if (!edesc) {
1411 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001412 0, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413 return ERR_PTR(-ENOMEM);
1414 }
1415
1416 edesc->src_nents = src_nents;
1417 edesc->dst_nents = dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +00001418 edesc->mapped_src_nents = mapped_src_nents;
1419 edesc->mapped_dst_nents = mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001420 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1421 desc_bytes;
1422 *all_contig_ptr = !(mapped_src_nents > 1);
1423
1424 sec4_sg_index = 0;
1425 if (mapped_src_nents > 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001426 sg_to_sec4_sg_last(req->src, src_len,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001427 edesc->sec4_sg + sec4_sg_index, 0);
1428 sec4_sg_index += mapped_src_nents;
1429 }
1430 if (mapped_dst_nents > 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001431 sg_to_sec4_sg_last(req->dst, dst_len,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001432 edesc->sec4_sg + sec4_sg_index, 0);
1433 }
1434
1435 if (!sec4_sg_bytes)
1436 return edesc;
1437
1438 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1439 sec4_sg_bytes, DMA_TO_DEVICE);
1440 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1441 dev_err(jrdev, "unable to map S/G table\n");
1442 aead_unmap(jrdev, edesc, req);
1443 kfree(edesc);
1444 return ERR_PTR(-ENOMEM);
1445 }
1446
1447 edesc->sec4_sg_bytes = sec4_sg_bytes;
1448
1449 return edesc;
1450}
1451
1452static int gcm_encrypt(struct aead_request *req)
1453{
1454 struct aead_edesc *edesc;
1455 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1456 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1457 struct device *jrdev = ctx->jrdev;
1458 bool all_contig;
1459 u32 *desc;
1460 int ret = 0;
1461
1462 /* allocate extended descriptor */
1463 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1464 if (IS_ERR(edesc))
1465 return PTR_ERR(edesc);
1466
1467 /* Create and submit job descriptor */
1468 init_gcm_job(req, edesc, all_contig, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001469
1470 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1471 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1472 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001473
1474 desc = edesc->hw_desc;
1475 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1476 if (!ret) {
1477 ret = -EINPROGRESS;
1478 } else {
1479 aead_unmap(jrdev, edesc, req);
1480 kfree(edesc);
1481 }
1482
1483 return ret;
1484}
1485
David Brazdil0f672f62019-12-10 10:32:29 +00001486static int chachapoly_encrypt(struct aead_request *req)
1487{
1488 struct aead_edesc *edesc;
1489 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1490 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1491 struct device *jrdev = ctx->jrdev;
1492 bool all_contig;
1493 u32 *desc;
1494 int ret;
1495
1496 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1497 true);
1498 if (IS_ERR(edesc))
1499 return PTR_ERR(edesc);
1500
1501 desc = edesc->hw_desc;
1502
1503 init_chachapoly_job(req, edesc, all_contig, true);
1504 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1505 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1506 1);
1507
1508 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1509 if (!ret) {
1510 ret = -EINPROGRESS;
1511 } else {
1512 aead_unmap(jrdev, edesc, req);
1513 kfree(edesc);
1514 }
1515
1516 return ret;
1517}
1518
1519static int chachapoly_decrypt(struct aead_request *req)
1520{
1521 struct aead_edesc *edesc;
1522 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1523 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1524 struct device *jrdev = ctx->jrdev;
1525 bool all_contig;
1526 u32 *desc;
1527 int ret;
1528
1529 edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
1530 false);
1531 if (IS_ERR(edesc))
1532 return PTR_ERR(edesc);
1533
1534 desc = edesc->hw_desc;
1535
1536 init_chachapoly_job(req, edesc, all_contig, false);
1537 print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
1538 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1539 1);
1540
1541 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1542 if (!ret) {
1543 ret = -EINPROGRESS;
1544 } else {
1545 aead_unmap(jrdev, edesc, req);
1546 kfree(edesc);
1547 }
1548
1549 return ret;
1550}
1551
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001552static int ipsec_gcm_encrypt(struct aead_request *req)
1553{
David Brazdil0f672f62019-12-10 10:32:29 +00001554 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555}
1556
1557static int aead_encrypt(struct aead_request *req)
1558{
1559 struct aead_edesc *edesc;
1560 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1561 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1562 struct device *jrdev = ctx->jrdev;
1563 bool all_contig;
1564 u32 *desc;
1565 int ret = 0;
1566
1567 /* allocate extended descriptor */
1568 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1569 &all_contig, true);
1570 if (IS_ERR(edesc))
1571 return PTR_ERR(edesc);
1572
1573 /* Create and submit job descriptor */
1574 init_authenc_job(req, edesc, all_contig, true);
David Brazdil0f672f62019-12-10 10:32:29 +00001575
1576 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1577 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1578 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579
1580 desc = edesc->hw_desc;
1581 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1582 if (!ret) {
1583 ret = -EINPROGRESS;
1584 } else {
1585 aead_unmap(jrdev, edesc, req);
1586 kfree(edesc);
1587 }
1588
1589 return ret;
1590}
1591
1592static int gcm_decrypt(struct aead_request *req)
1593{
1594 struct aead_edesc *edesc;
1595 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1596 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1597 struct device *jrdev = ctx->jrdev;
1598 bool all_contig;
1599 u32 *desc;
1600 int ret = 0;
1601
1602 /* allocate extended descriptor */
1603 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1604 if (IS_ERR(edesc))
1605 return PTR_ERR(edesc);
1606
1607 /* Create and submit job descriptor*/
1608 init_gcm_job(req, edesc, all_contig, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001609
1610 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1611 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1612 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001613
1614 desc = edesc->hw_desc;
1615 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1616 if (!ret) {
1617 ret = -EINPROGRESS;
1618 } else {
1619 aead_unmap(jrdev, edesc, req);
1620 kfree(edesc);
1621 }
1622
1623 return ret;
1624}
1625
1626static int ipsec_gcm_decrypt(struct aead_request *req)
1627{
David Brazdil0f672f62019-12-10 10:32:29 +00001628 return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001629}
1630
1631static int aead_decrypt(struct aead_request *req)
1632{
1633 struct aead_edesc *edesc;
1634 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1635 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1636 struct device *jrdev = ctx->jrdev;
1637 bool all_contig;
1638 u32 *desc;
1639 int ret = 0;
1640
David Brazdil0f672f62019-12-10 10:32:29 +00001641 caam_dump_sg("dec src@" __stringify(__LINE__)": ",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001642 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1643 req->assoclen + req->cryptlen, 1);
1644
1645 /* allocate extended descriptor */
1646 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1647 &all_contig, false);
1648 if (IS_ERR(edesc))
1649 return PTR_ERR(edesc);
1650
1651 /* Create and submit job descriptor*/
1652 init_authenc_job(req, edesc, all_contig, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001653
1654 print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
1655 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1656 desc_bytes(edesc->hw_desc), 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001657
1658 desc = edesc->hw_desc;
1659 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1660 if (!ret) {
1661 ret = -EINPROGRESS;
1662 } else {
1663 aead_unmap(jrdev, edesc, req);
1664 kfree(edesc);
1665 }
1666
1667 return ret;
1668}
1669
1670/*
David Brazdil0f672f62019-12-10 10:32:29 +00001671 * allocate and map the skcipher extended descriptor for skcipher
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001672 */
David Brazdil0f672f62019-12-10 10:32:29 +00001673static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
1674 int desc_bytes)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001675{
David Brazdil0f672f62019-12-10 10:32:29 +00001676 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1677 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001678 struct device *jrdev = ctx->jrdev;
1679 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1680 GFP_KERNEL : GFP_ATOMIC;
1681 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001682 struct skcipher_edesc *edesc;
1683 dma_addr_t iv_dma = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001684 u8 *iv;
David Brazdil0f672f62019-12-10 10:32:29 +00001685 int ivsize = crypto_skcipher_ivsize(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1687
David Brazdil0f672f62019-12-10 10:32:29 +00001688 src_nents = sg_nents_for_len(req->src, req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001689 if (unlikely(src_nents < 0)) {
1690 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001691 req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001692 return ERR_PTR(src_nents);
1693 }
1694
1695 if (req->dst != req->src) {
David Brazdil0f672f62019-12-10 10:32:29 +00001696 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001697 if (unlikely(dst_nents < 0)) {
1698 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001699 req->cryptlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001700 return ERR_PTR(dst_nents);
1701 }
1702 }
1703
1704 if (likely(req->src == req->dst)) {
1705 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1706 DMA_BIDIRECTIONAL);
1707 if (unlikely(!mapped_src_nents)) {
1708 dev_err(jrdev, "unable to map source\n");
1709 return ERR_PTR(-ENOMEM);
1710 }
1711 } else {
1712 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1713 DMA_TO_DEVICE);
1714 if (unlikely(!mapped_src_nents)) {
1715 dev_err(jrdev, "unable to map source\n");
1716 return ERR_PTR(-ENOMEM);
1717 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001718 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1719 DMA_FROM_DEVICE);
1720 if (unlikely(!mapped_dst_nents)) {
1721 dev_err(jrdev, "unable to map destination\n");
1722 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1723 return ERR_PTR(-ENOMEM);
1724 }
1725 }
1726
David Brazdil0f672f62019-12-10 10:32:29 +00001727 if (!ivsize && mapped_src_nents == 1)
1728 sec4_sg_ents = 0; // no need for an input hw s/g table
1729 else
1730 sec4_sg_ents = mapped_src_nents + !!ivsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001731 dst_sg_idx = sec4_sg_ents;
David Brazdil0f672f62019-12-10 10:32:29 +00001732
1733 /*
1734 * Input, output HW S/G tables: [IV, src][dst, IV]
1735 * IV entries point to the same buffer
1736 * If src == dst, S/G entries are reused (S/G tables overlap)
1737 *
1738 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1739 * the end of the table by allocating more S/G entries. Logic:
1740 * if (output S/G)
1741 * pad output S/G, if needed
1742 * else if (input S/G) ...
1743 * pad input S/G, if needed
1744 */
1745 if (ivsize || mapped_dst_nents > 1) {
1746 if (req->src == req->dst)
1747 sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
1748 else
1749 sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
1750 !!ivsize);
1751 } else {
1752 sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
1753 }
1754
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001755 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1756
1757 /*
1758 * allocate space for base edesc and hw desc commands, link tables, IV
1759 */
1760 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1761 GFP_DMA | flags);
1762 if (!edesc) {
1763 dev_err(jrdev, "could not allocate extended descriptor\n");
1764 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
David Brazdil0f672f62019-12-10 10:32:29 +00001765 0, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001766 return ERR_PTR(-ENOMEM);
1767 }
1768
1769 edesc->src_nents = src_nents;
1770 edesc->dst_nents = dst_nents;
David Brazdil0f672f62019-12-10 10:32:29 +00001771 edesc->mapped_src_nents = mapped_src_nents;
1772 edesc->mapped_dst_nents = mapped_dst_nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001773 edesc->sec4_sg_bytes = sec4_sg_bytes;
1774 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1775 desc_bytes);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001776
1777 /* Make sure IV is located in a DMAable area */
David Brazdil0f672f62019-12-10 10:32:29 +00001778 if (ivsize) {
1779 iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
1780 memcpy(iv, req->iv, ivsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001781
David Brazdil0f672f62019-12-10 10:32:29 +00001782 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
1783 if (dma_mapping_error(jrdev, iv_dma)) {
1784 dev_err(jrdev, "unable to map IV\n");
1785 caam_unmap(jrdev, req->src, req->dst, src_nents,
1786 dst_nents, 0, 0, 0, 0);
1787 kfree(edesc);
1788 return ERR_PTR(-ENOMEM);
1789 }
1790
1791 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001792 }
David Brazdil0f672f62019-12-10 10:32:29 +00001793 if (dst_sg_idx)
1794 sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
1795 !!ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001796
David Brazdil0f672f62019-12-10 10:32:29 +00001797 if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
1798 sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
1799 dst_sg_idx, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001800
David Brazdil0f672f62019-12-10 10:32:29 +00001801 if (ivsize)
1802 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
1803 mapped_dst_nents, iv_dma, ivsize, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001804
David Brazdil0f672f62019-12-10 10:32:29 +00001805 if (ivsize || mapped_dst_nents > 1)
1806 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
Olivier Deprez0e641232021-09-23 10:07:05 +02001807 mapped_dst_nents - 1 + !!ivsize);
David Brazdil0f672f62019-12-10 10:32:29 +00001808
1809 if (sec4_sg_bytes) {
1810 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1811 sec4_sg_bytes,
1812 DMA_TO_DEVICE);
1813 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1814 dev_err(jrdev, "unable to map S/G table\n");
1815 caam_unmap(jrdev, req->src, req->dst, src_nents,
1816 dst_nents, iv_dma, ivsize, 0, 0);
1817 kfree(edesc);
1818 return ERR_PTR(-ENOMEM);
1819 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001820 }
1821
1822 edesc->iv_dma = iv_dma;
1823
David Brazdil0f672f62019-12-10 10:32:29 +00001824 print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
1825 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1826 sec4_sg_bytes, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001827
1828 return edesc;
1829}
1830
David Brazdil0f672f62019-12-10 10:32:29 +00001831static int skcipher_encrypt(struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001832{
David Brazdil0f672f62019-12-10 10:32:29 +00001833 struct skcipher_edesc *edesc;
1834 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1835 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001836 struct device *jrdev = ctx->jrdev;
1837 u32 *desc;
1838 int ret = 0;
1839
David Brazdil0f672f62019-12-10 10:32:29 +00001840 if (!req->cryptlen)
1841 return 0;
1842
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001843 /* allocate extended descriptor */
David Brazdil0f672f62019-12-10 10:32:29 +00001844 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001845 if (IS_ERR(edesc))
1846 return PTR_ERR(edesc);
1847
1848 /* Create and submit job descriptor*/
David Brazdil0f672f62019-12-10 10:32:29 +00001849 init_skcipher_job(req, edesc, true);
1850
1851 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1852 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1853 desc_bytes(edesc->hw_desc), 1);
1854
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001855 desc = edesc->hw_desc;
David Brazdil0f672f62019-12-10 10:32:29 +00001856 ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001857
1858 if (!ret) {
1859 ret = -EINPROGRESS;
1860 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001861 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001862 kfree(edesc);
1863 }
1864
1865 return ret;
1866}
1867
David Brazdil0f672f62019-12-10 10:32:29 +00001868static int skcipher_decrypt(struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001869{
David Brazdil0f672f62019-12-10 10:32:29 +00001870 struct skcipher_edesc *edesc;
1871 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1872 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001873 struct device *jrdev = ctx->jrdev;
1874 u32 *desc;
1875 int ret = 0;
1876
David Brazdil0f672f62019-12-10 10:32:29 +00001877 if (!req->cryptlen)
1878 return 0;
1879
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001880 /* allocate extended descriptor */
David Brazdil0f672f62019-12-10 10:32:29 +00001881 edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001882 if (IS_ERR(edesc))
1883 return PTR_ERR(edesc);
1884
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001885 /* Create and submit job descriptor*/
David Brazdil0f672f62019-12-10 10:32:29 +00001886 init_skcipher_job(req, edesc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001887 desc = edesc->hw_desc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001888
David Brazdil0f672f62019-12-10 10:32:29 +00001889 print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
1890 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1891 desc_bytes(edesc->hw_desc), 1);
1892
1893 ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001894 if (!ret) {
1895 ret = -EINPROGRESS;
1896 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001897 skcipher_unmap(jrdev, edesc, req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001898 kfree(edesc);
1899 }
1900
1901 return ret;
1902}
1903
David Brazdil0f672f62019-12-10 10:32:29 +00001904static struct caam_skcipher_alg driver_algs[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001905 {
David Brazdil0f672f62019-12-10 10:32:29 +00001906 .skcipher = {
1907 .base = {
1908 .cra_name = "cbc(aes)",
1909 .cra_driver_name = "cbc-aes-caam",
1910 .cra_blocksize = AES_BLOCK_SIZE,
1911 },
1912 .setkey = aes_skcipher_setkey,
1913 .encrypt = skcipher_encrypt,
1914 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001915 .min_keysize = AES_MIN_KEY_SIZE,
1916 .max_keysize = AES_MAX_KEY_SIZE,
1917 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001918 },
1919 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001920 },
1921 {
David Brazdil0f672f62019-12-10 10:32:29 +00001922 .skcipher = {
1923 .base = {
1924 .cra_name = "cbc(des3_ede)",
1925 .cra_driver_name = "cbc-3des-caam",
1926 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1927 },
1928 .setkey = des3_skcipher_setkey,
1929 .encrypt = skcipher_encrypt,
1930 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001931 .min_keysize = DES3_EDE_KEY_SIZE,
1932 .max_keysize = DES3_EDE_KEY_SIZE,
1933 .ivsize = DES3_EDE_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001934 },
1935 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001936 },
1937 {
David Brazdil0f672f62019-12-10 10:32:29 +00001938 .skcipher = {
1939 .base = {
1940 .cra_name = "cbc(des)",
1941 .cra_driver_name = "cbc-des-caam",
1942 .cra_blocksize = DES_BLOCK_SIZE,
1943 },
1944 .setkey = des_skcipher_setkey,
1945 .encrypt = skcipher_encrypt,
1946 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001947 .min_keysize = DES_KEY_SIZE,
1948 .max_keysize = DES_KEY_SIZE,
1949 .ivsize = DES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001950 },
1951 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001952 },
1953 {
David Brazdil0f672f62019-12-10 10:32:29 +00001954 .skcipher = {
1955 .base = {
1956 .cra_name = "ctr(aes)",
1957 .cra_driver_name = "ctr-aes-caam",
1958 .cra_blocksize = 1,
1959 },
1960 .setkey = ctr_skcipher_setkey,
1961 .encrypt = skcipher_encrypt,
1962 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001963 .min_keysize = AES_MIN_KEY_SIZE,
1964 .max_keysize = AES_MAX_KEY_SIZE,
1965 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001966 .chunksize = AES_BLOCK_SIZE,
1967 },
1968 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1969 OP_ALG_AAI_CTR_MOD128,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001970 },
1971 {
David Brazdil0f672f62019-12-10 10:32:29 +00001972 .skcipher = {
1973 .base = {
1974 .cra_name = "rfc3686(ctr(aes))",
1975 .cra_driver_name = "rfc3686-ctr-aes-caam",
1976 .cra_blocksize = 1,
1977 },
1978 .setkey = rfc3686_skcipher_setkey,
1979 .encrypt = skcipher_encrypt,
1980 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001981 .min_keysize = AES_MIN_KEY_SIZE +
1982 CTR_RFC3686_NONCE_SIZE,
1983 .max_keysize = AES_MAX_KEY_SIZE +
1984 CTR_RFC3686_NONCE_SIZE,
1985 .ivsize = CTR_RFC3686_IV_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001986 .chunksize = AES_BLOCK_SIZE,
1987 },
1988 .caam = {
1989 .class1_alg_type = OP_ALG_ALGSEL_AES |
1990 OP_ALG_AAI_CTR_MOD128,
1991 .rfc3686 = true,
1992 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001993 },
1994 {
David Brazdil0f672f62019-12-10 10:32:29 +00001995 .skcipher = {
1996 .base = {
1997 .cra_name = "xts(aes)",
1998 .cra_driver_name = "xts-aes-caam",
1999 .cra_blocksize = AES_BLOCK_SIZE,
2000 },
2001 .setkey = xts_skcipher_setkey,
2002 .encrypt = skcipher_encrypt,
2003 .decrypt = skcipher_decrypt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002004 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2005 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2006 .ivsize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00002007 },
2008 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2009 },
2010 {
2011 .skcipher = {
2012 .base = {
2013 .cra_name = "ecb(des)",
2014 .cra_driver_name = "ecb-des-caam",
2015 .cra_blocksize = DES_BLOCK_SIZE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002016 },
David Brazdil0f672f62019-12-10 10:32:29 +00002017 .setkey = des_skcipher_setkey,
2018 .encrypt = skcipher_encrypt,
2019 .decrypt = skcipher_decrypt,
2020 .min_keysize = DES_KEY_SIZE,
2021 .max_keysize = DES_KEY_SIZE,
2022 },
2023 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
2024 },
2025 {
2026 .skcipher = {
2027 .base = {
2028 .cra_name = "ecb(aes)",
2029 .cra_driver_name = "ecb-aes-caam",
2030 .cra_blocksize = AES_BLOCK_SIZE,
2031 },
2032 .setkey = aes_skcipher_setkey,
2033 .encrypt = skcipher_encrypt,
2034 .decrypt = skcipher_decrypt,
2035 .min_keysize = AES_MIN_KEY_SIZE,
2036 .max_keysize = AES_MAX_KEY_SIZE,
2037 },
2038 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
2039 },
2040 {
2041 .skcipher = {
2042 .base = {
2043 .cra_name = "ecb(des3_ede)",
2044 .cra_driver_name = "ecb-des3-caam",
2045 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2046 },
2047 .setkey = des3_skcipher_setkey,
2048 .encrypt = skcipher_encrypt,
2049 .decrypt = skcipher_decrypt,
2050 .min_keysize = DES3_EDE_KEY_SIZE,
2051 .max_keysize = DES3_EDE_KEY_SIZE,
2052 },
2053 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
2054 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002055};
2056
2057static struct caam_aead_alg driver_aeads[] = {
2058 {
2059 .aead = {
2060 .base = {
2061 .cra_name = "rfc4106(gcm(aes))",
2062 .cra_driver_name = "rfc4106-gcm-aes-caam",
2063 .cra_blocksize = 1,
2064 },
2065 .setkey = rfc4106_setkey,
2066 .setauthsize = rfc4106_setauthsize,
2067 .encrypt = ipsec_gcm_encrypt,
2068 .decrypt = ipsec_gcm_decrypt,
2069 .ivsize = GCM_RFC4106_IV_SIZE,
2070 .maxauthsize = AES_BLOCK_SIZE,
2071 },
2072 .caam = {
2073 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002074 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002075 },
2076 },
2077 {
2078 .aead = {
2079 .base = {
2080 .cra_name = "rfc4543(gcm(aes))",
2081 .cra_driver_name = "rfc4543-gcm-aes-caam",
2082 .cra_blocksize = 1,
2083 },
2084 .setkey = rfc4543_setkey,
2085 .setauthsize = rfc4543_setauthsize,
2086 .encrypt = ipsec_gcm_encrypt,
2087 .decrypt = ipsec_gcm_decrypt,
2088 .ivsize = GCM_RFC4543_IV_SIZE,
2089 .maxauthsize = AES_BLOCK_SIZE,
2090 },
2091 .caam = {
2092 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002093 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002094 },
2095 },
2096 /* Galois Counter Mode */
2097 {
2098 .aead = {
2099 .base = {
2100 .cra_name = "gcm(aes)",
2101 .cra_driver_name = "gcm-aes-caam",
2102 .cra_blocksize = 1,
2103 },
2104 .setkey = gcm_setkey,
2105 .setauthsize = gcm_setauthsize,
2106 .encrypt = gcm_encrypt,
2107 .decrypt = gcm_decrypt,
2108 .ivsize = GCM_AES_IV_SIZE,
2109 .maxauthsize = AES_BLOCK_SIZE,
2110 },
2111 .caam = {
2112 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
David Brazdil0f672f62019-12-10 10:32:29 +00002113 .nodkp = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002114 },
2115 },
2116 /* single-pass ipsec_esp descriptor */
2117 {
2118 .aead = {
2119 .base = {
2120 .cra_name = "authenc(hmac(md5),"
2121 "ecb(cipher_null))",
2122 .cra_driver_name = "authenc-hmac-md5-"
2123 "ecb-cipher_null-caam",
2124 .cra_blocksize = NULL_BLOCK_SIZE,
2125 },
2126 .setkey = aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .ivsize = NULL_IV_SIZE,
2131 .maxauthsize = MD5_DIGEST_SIZE,
2132 },
2133 .caam = {
2134 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2135 OP_ALG_AAI_HMAC_PRECOMP,
2136 },
2137 },
2138 {
2139 .aead = {
2140 .base = {
2141 .cra_name = "authenc(hmac(sha1),"
2142 "ecb(cipher_null))",
2143 .cra_driver_name = "authenc-hmac-sha1-"
2144 "ecb-cipher_null-caam",
2145 .cra_blocksize = NULL_BLOCK_SIZE,
2146 },
2147 .setkey = aead_setkey,
2148 .setauthsize = aead_setauthsize,
2149 .encrypt = aead_encrypt,
2150 .decrypt = aead_decrypt,
2151 .ivsize = NULL_IV_SIZE,
2152 .maxauthsize = SHA1_DIGEST_SIZE,
2153 },
2154 .caam = {
2155 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2156 OP_ALG_AAI_HMAC_PRECOMP,
2157 },
2158 },
2159 {
2160 .aead = {
2161 .base = {
2162 .cra_name = "authenc(hmac(sha224),"
2163 "ecb(cipher_null))",
2164 .cra_driver_name = "authenc-hmac-sha224-"
2165 "ecb-cipher_null-caam",
2166 .cra_blocksize = NULL_BLOCK_SIZE,
2167 },
2168 .setkey = aead_setkey,
2169 .setauthsize = aead_setauthsize,
2170 .encrypt = aead_encrypt,
2171 .decrypt = aead_decrypt,
2172 .ivsize = NULL_IV_SIZE,
2173 .maxauthsize = SHA224_DIGEST_SIZE,
2174 },
2175 .caam = {
2176 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2177 OP_ALG_AAI_HMAC_PRECOMP,
2178 },
2179 },
2180 {
2181 .aead = {
2182 .base = {
2183 .cra_name = "authenc(hmac(sha256),"
2184 "ecb(cipher_null))",
2185 .cra_driver_name = "authenc-hmac-sha256-"
2186 "ecb-cipher_null-caam",
2187 .cra_blocksize = NULL_BLOCK_SIZE,
2188 },
2189 .setkey = aead_setkey,
2190 .setauthsize = aead_setauthsize,
2191 .encrypt = aead_encrypt,
2192 .decrypt = aead_decrypt,
2193 .ivsize = NULL_IV_SIZE,
2194 .maxauthsize = SHA256_DIGEST_SIZE,
2195 },
2196 .caam = {
2197 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2198 OP_ALG_AAI_HMAC_PRECOMP,
2199 },
2200 },
2201 {
2202 .aead = {
2203 .base = {
2204 .cra_name = "authenc(hmac(sha384),"
2205 "ecb(cipher_null))",
2206 .cra_driver_name = "authenc-hmac-sha384-"
2207 "ecb-cipher_null-caam",
2208 .cra_blocksize = NULL_BLOCK_SIZE,
2209 },
2210 .setkey = aead_setkey,
2211 .setauthsize = aead_setauthsize,
2212 .encrypt = aead_encrypt,
2213 .decrypt = aead_decrypt,
2214 .ivsize = NULL_IV_SIZE,
2215 .maxauthsize = SHA384_DIGEST_SIZE,
2216 },
2217 .caam = {
2218 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2219 OP_ALG_AAI_HMAC_PRECOMP,
2220 },
2221 },
2222 {
2223 .aead = {
2224 .base = {
2225 .cra_name = "authenc(hmac(sha512),"
2226 "ecb(cipher_null))",
2227 .cra_driver_name = "authenc-hmac-sha512-"
2228 "ecb-cipher_null-caam",
2229 .cra_blocksize = NULL_BLOCK_SIZE,
2230 },
2231 .setkey = aead_setkey,
2232 .setauthsize = aead_setauthsize,
2233 .encrypt = aead_encrypt,
2234 .decrypt = aead_decrypt,
2235 .ivsize = NULL_IV_SIZE,
2236 .maxauthsize = SHA512_DIGEST_SIZE,
2237 },
2238 .caam = {
2239 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2240 OP_ALG_AAI_HMAC_PRECOMP,
2241 },
2242 },
2243 {
2244 .aead = {
2245 .base = {
2246 .cra_name = "authenc(hmac(md5),cbc(aes))",
2247 .cra_driver_name = "authenc-hmac-md5-"
2248 "cbc-aes-caam",
2249 .cra_blocksize = AES_BLOCK_SIZE,
2250 },
2251 .setkey = aead_setkey,
2252 .setauthsize = aead_setauthsize,
2253 .encrypt = aead_encrypt,
2254 .decrypt = aead_decrypt,
2255 .ivsize = AES_BLOCK_SIZE,
2256 .maxauthsize = MD5_DIGEST_SIZE,
2257 },
2258 .caam = {
2259 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2260 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2261 OP_ALG_AAI_HMAC_PRECOMP,
2262 },
2263 },
2264 {
2265 .aead = {
2266 .base = {
2267 .cra_name = "echainiv(authenc(hmac(md5),"
2268 "cbc(aes)))",
2269 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2270 "cbc-aes-caam",
2271 .cra_blocksize = AES_BLOCK_SIZE,
2272 },
2273 .setkey = aead_setkey,
2274 .setauthsize = aead_setauthsize,
2275 .encrypt = aead_encrypt,
2276 .decrypt = aead_decrypt,
2277 .ivsize = AES_BLOCK_SIZE,
2278 .maxauthsize = MD5_DIGEST_SIZE,
2279 },
2280 .caam = {
2281 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2282 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2283 OP_ALG_AAI_HMAC_PRECOMP,
2284 .geniv = true,
2285 },
2286 },
2287 {
2288 .aead = {
2289 .base = {
2290 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2291 .cra_driver_name = "authenc-hmac-sha1-"
2292 "cbc-aes-caam",
2293 .cra_blocksize = AES_BLOCK_SIZE,
2294 },
2295 .setkey = aead_setkey,
2296 .setauthsize = aead_setauthsize,
2297 .encrypt = aead_encrypt,
2298 .decrypt = aead_decrypt,
2299 .ivsize = AES_BLOCK_SIZE,
2300 .maxauthsize = SHA1_DIGEST_SIZE,
2301 },
2302 .caam = {
2303 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2304 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2305 OP_ALG_AAI_HMAC_PRECOMP,
2306 },
2307 },
2308 {
2309 .aead = {
2310 .base = {
2311 .cra_name = "echainiv(authenc(hmac(sha1),"
2312 "cbc(aes)))",
2313 .cra_driver_name = "echainiv-authenc-"
2314 "hmac-sha1-cbc-aes-caam",
2315 .cra_blocksize = AES_BLOCK_SIZE,
2316 },
2317 .setkey = aead_setkey,
2318 .setauthsize = aead_setauthsize,
2319 .encrypt = aead_encrypt,
2320 .decrypt = aead_decrypt,
2321 .ivsize = AES_BLOCK_SIZE,
2322 .maxauthsize = SHA1_DIGEST_SIZE,
2323 },
2324 .caam = {
2325 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2326 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2327 OP_ALG_AAI_HMAC_PRECOMP,
2328 .geniv = true,
2329 },
2330 },
2331 {
2332 .aead = {
2333 .base = {
2334 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2335 .cra_driver_name = "authenc-hmac-sha224-"
2336 "cbc-aes-caam",
2337 .cra_blocksize = AES_BLOCK_SIZE,
2338 },
2339 .setkey = aead_setkey,
2340 .setauthsize = aead_setauthsize,
2341 .encrypt = aead_encrypt,
2342 .decrypt = aead_decrypt,
2343 .ivsize = AES_BLOCK_SIZE,
2344 .maxauthsize = SHA224_DIGEST_SIZE,
2345 },
2346 .caam = {
2347 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2348 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2349 OP_ALG_AAI_HMAC_PRECOMP,
2350 },
2351 },
2352 {
2353 .aead = {
2354 .base = {
2355 .cra_name = "echainiv(authenc(hmac(sha224),"
2356 "cbc(aes)))",
2357 .cra_driver_name = "echainiv-authenc-"
2358 "hmac-sha224-cbc-aes-caam",
2359 .cra_blocksize = AES_BLOCK_SIZE,
2360 },
2361 .setkey = aead_setkey,
2362 .setauthsize = aead_setauthsize,
2363 .encrypt = aead_encrypt,
2364 .decrypt = aead_decrypt,
2365 .ivsize = AES_BLOCK_SIZE,
2366 .maxauthsize = SHA224_DIGEST_SIZE,
2367 },
2368 .caam = {
2369 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2370 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2371 OP_ALG_AAI_HMAC_PRECOMP,
2372 .geniv = true,
2373 },
2374 },
2375 {
2376 .aead = {
2377 .base = {
2378 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2379 .cra_driver_name = "authenc-hmac-sha256-"
2380 "cbc-aes-caam",
2381 .cra_blocksize = AES_BLOCK_SIZE,
2382 },
2383 .setkey = aead_setkey,
2384 .setauthsize = aead_setauthsize,
2385 .encrypt = aead_encrypt,
2386 .decrypt = aead_decrypt,
2387 .ivsize = AES_BLOCK_SIZE,
2388 .maxauthsize = SHA256_DIGEST_SIZE,
2389 },
2390 .caam = {
2391 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2392 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2393 OP_ALG_AAI_HMAC_PRECOMP,
2394 },
2395 },
2396 {
2397 .aead = {
2398 .base = {
2399 .cra_name = "echainiv(authenc(hmac(sha256),"
2400 "cbc(aes)))",
2401 .cra_driver_name = "echainiv-authenc-"
2402 "hmac-sha256-cbc-aes-caam",
2403 .cra_blocksize = AES_BLOCK_SIZE,
2404 },
2405 .setkey = aead_setkey,
2406 .setauthsize = aead_setauthsize,
2407 .encrypt = aead_encrypt,
2408 .decrypt = aead_decrypt,
2409 .ivsize = AES_BLOCK_SIZE,
2410 .maxauthsize = SHA256_DIGEST_SIZE,
2411 },
2412 .caam = {
2413 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2414 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2415 OP_ALG_AAI_HMAC_PRECOMP,
2416 .geniv = true,
2417 },
2418 },
2419 {
2420 .aead = {
2421 .base = {
2422 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2423 .cra_driver_name = "authenc-hmac-sha384-"
2424 "cbc-aes-caam",
2425 .cra_blocksize = AES_BLOCK_SIZE,
2426 },
2427 .setkey = aead_setkey,
2428 .setauthsize = aead_setauthsize,
2429 .encrypt = aead_encrypt,
2430 .decrypt = aead_decrypt,
2431 .ivsize = AES_BLOCK_SIZE,
2432 .maxauthsize = SHA384_DIGEST_SIZE,
2433 },
2434 .caam = {
2435 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2436 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2437 OP_ALG_AAI_HMAC_PRECOMP,
2438 },
2439 },
2440 {
2441 .aead = {
2442 .base = {
2443 .cra_name = "echainiv(authenc(hmac(sha384),"
2444 "cbc(aes)))",
2445 .cra_driver_name = "echainiv-authenc-"
2446 "hmac-sha384-cbc-aes-caam",
2447 .cra_blocksize = AES_BLOCK_SIZE,
2448 },
2449 .setkey = aead_setkey,
2450 .setauthsize = aead_setauthsize,
2451 .encrypt = aead_encrypt,
2452 .decrypt = aead_decrypt,
2453 .ivsize = AES_BLOCK_SIZE,
2454 .maxauthsize = SHA384_DIGEST_SIZE,
2455 },
2456 .caam = {
2457 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2460 .geniv = true,
2461 },
2462 },
2463 {
2464 .aead = {
2465 .base = {
2466 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2467 .cra_driver_name = "authenc-hmac-sha512-"
2468 "cbc-aes-caam",
2469 .cra_blocksize = AES_BLOCK_SIZE,
2470 },
2471 .setkey = aead_setkey,
2472 .setauthsize = aead_setauthsize,
2473 .encrypt = aead_encrypt,
2474 .decrypt = aead_decrypt,
2475 .ivsize = AES_BLOCK_SIZE,
2476 .maxauthsize = SHA512_DIGEST_SIZE,
2477 },
2478 .caam = {
2479 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2480 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2481 OP_ALG_AAI_HMAC_PRECOMP,
2482 },
2483 },
2484 {
2485 .aead = {
2486 .base = {
2487 .cra_name = "echainiv(authenc(hmac(sha512),"
2488 "cbc(aes)))",
2489 .cra_driver_name = "echainiv-authenc-"
2490 "hmac-sha512-cbc-aes-caam",
2491 .cra_blocksize = AES_BLOCK_SIZE,
2492 },
2493 .setkey = aead_setkey,
2494 .setauthsize = aead_setauthsize,
2495 .encrypt = aead_encrypt,
2496 .decrypt = aead_decrypt,
2497 .ivsize = AES_BLOCK_SIZE,
2498 .maxauthsize = SHA512_DIGEST_SIZE,
2499 },
2500 .caam = {
2501 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2502 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2503 OP_ALG_AAI_HMAC_PRECOMP,
2504 .geniv = true,
2505 },
2506 },
2507 {
2508 .aead = {
2509 .base = {
2510 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2511 .cra_driver_name = "authenc-hmac-md5-"
2512 "cbc-des3_ede-caam",
2513 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2514 },
David Brazdil0f672f62019-12-10 10:32:29 +00002515 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002516 .setauthsize = aead_setauthsize,
2517 .encrypt = aead_encrypt,
2518 .decrypt = aead_decrypt,
2519 .ivsize = DES3_EDE_BLOCK_SIZE,
2520 .maxauthsize = MD5_DIGEST_SIZE,
2521 },
2522 .caam = {
2523 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2524 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2525 OP_ALG_AAI_HMAC_PRECOMP,
2526 }
2527 },
2528 {
2529 .aead = {
2530 .base = {
2531 .cra_name = "echainiv(authenc(hmac(md5),"
2532 "cbc(des3_ede)))",
2533 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2534 "cbc-des3_ede-caam",
2535 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2536 },
David Brazdil0f672f62019-12-10 10:32:29 +00002537 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002538 .setauthsize = aead_setauthsize,
2539 .encrypt = aead_encrypt,
2540 .decrypt = aead_decrypt,
2541 .ivsize = DES3_EDE_BLOCK_SIZE,
2542 .maxauthsize = MD5_DIGEST_SIZE,
2543 },
2544 .caam = {
2545 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2546 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2547 OP_ALG_AAI_HMAC_PRECOMP,
2548 .geniv = true,
2549 }
2550 },
2551 {
2552 .aead = {
2553 .base = {
2554 .cra_name = "authenc(hmac(sha1),"
2555 "cbc(des3_ede))",
2556 .cra_driver_name = "authenc-hmac-sha1-"
2557 "cbc-des3_ede-caam",
2558 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2559 },
David Brazdil0f672f62019-12-10 10:32:29 +00002560 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002561 .setauthsize = aead_setauthsize,
2562 .encrypt = aead_encrypt,
2563 .decrypt = aead_decrypt,
2564 .ivsize = DES3_EDE_BLOCK_SIZE,
2565 .maxauthsize = SHA1_DIGEST_SIZE,
2566 },
2567 .caam = {
2568 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2569 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2570 OP_ALG_AAI_HMAC_PRECOMP,
2571 },
2572 },
2573 {
2574 .aead = {
2575 .base = {
2576 .cra_name = "echainiv(authenc(hmac(sha1),"
2577 "cbc(des3_ede)))",
2578 .cra_driver_name = "echainiv-authenc-"
2579 "hmac-sha1-"
2580 "cbc-des3_ede-caam",
2581 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2582 },
David Brazdil0f672f62019-12-10 10:32:29 +00002583 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002584 .setauthsize = aead_setauthsize,
2585 .encrypt = aead_encrypt,
2586 .decrypt = aead_decrypt,
2587 .ivsize = DES3_EDE_BLOCK_SIZE,
2588 .maxauthsize = SHA1_DIGEST_SIZE,
2589 },
2590 .caam = {
2591 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2592 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2593 OP_ALG_AAI_HMAC_PRECOMP,
2594 .geniv = true,
2595 },
2596 },
2597 {
2598 .aead = {
2599 .base = {
2600 .cra_name = "authenc(hmac(sha224),"
2601 "cbc(des3_ede))",
2602 .cra_driver_name = "authenc-hmac-sha224-"
2603 "cbc-des3_ede-caam",
2604 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2605 },
David Brazdil0f672f62019-12-10 10:32:29 +00002606 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002607 .setauthsize = aead_setauthsize,
2608 .encrypt = aead_encrypt,
2609 .decrypt = aead_decrypt,
2610 .ivsize = DES3_EDE_BLOCK_SIZE,
2611 .maxauthsize = SHA224_DIGEST_SIZE,
2612 },
2613 .caam = {
2614 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2615 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2616 OP_ALG_AAI_HMAC_PRECOMP,
2617 },
2618 },
2619 {
2620 .aead = {
2621 .base = {
2622 .cra_name = "echainiv(authenc(hmac(sha224),"
2623 "cbc(des3_ede)))",
2624 .cra_driver_name = "echainiv-authenc-"
2625 "hmac-sha224-"
2626 "cbc-des3_ede-caam",
2627 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2628 },
David Brazdil0f672f62019-12-10 10:32:29 +00002629 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002630 .setauthsize = aead_setauthsize,
2631 .encrypt = aead_encrypt,
2632 .decrypt = aead_decrypt,
2633 .ivsize = DES3_EDE_BLOCK_SIZE,
2634 .maxauthsize = SHA224_DIGEST_SIZE,
2635 },
2636 .caam = {
2637 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2638 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2639 OP_ALG_AAI_HMAC_PRECOMP,
2640 .geniv = true,
2641 },
2642 },
2643 {
2644 .aead = {
2645 .base = {
2646 .cra_name = "authenc(hmac(sha256),"
2647 "cbc(des3_ede))",
2648 .cra_driver_name = "authenc-hmac-sha256-"
2649 "cbc-des3_ede-caam",
2650 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2651 },
David Brazdil0f672f62019-12-10 10:32:29 +00002652 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002653 .setauthsize = aead_setauthsize,
2654 .encrypt = aead_encrypt,
2655 .decrypt = aead_decrypt,
2656 .ivsize = DES3_EDE_BLOCK_SIZE,
2657 .maxauthsize = SHA256_DIGEST_SIZE,
2658 },
2659 .caam = {
2660 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2661 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2662 OP_ALG_AAI_HMAC_PRECOMP,
2663 },
2664 },
2665 {
2666 .aead = {
2667 .base = {
2668 .cra_name = "echainiv(authenc(hmac(sha256),"
2669 "cbc(des3_ede)))",
2670 .cra_driver_name = "echainiv-authenc-"
2671 "hmac-sha256-"
2672 "cbc-des3_ede-caam",
2673 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2674 },
David Brazdil0f672f62019-12-10 10:32:29 +00002675 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002676 .setauthsize = aead_setauthsize,
2677 .encrypt = aead_encrypt,
2678 .decrypt = aead_decrypt,
2679 .ivsize = DES3_EDE_BLOCK_SIZE,
2680 .maxauthsize = SHA256_DIGEST_SIZE,
2681 },
2682 .caam = {
2683 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2684 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2685 OP_ALG_AAI_HMAC_PRECOMP,
2686 .geniv = true,
2687 },
2688 },
2689 {
2690 .aead = {
2691 .base = {
2692 .cra_name = "authenc(hmac(sha384),"
2693 "cbc(des3_ede))",
2694 .cra_driver_name = "authenc-hmac-sha384-"
2695 "cbc-des3_ede-caam",
2696 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2697 },
David Brazdil0f672f62019-12-10 10:32:29 +00002698 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002699 .setauthsize = aead_setauthsize,
2700 .encrypt = aead_encrypt,
2701 .decrypt = aead_decrypt,
2702 .ivsize = DES3_EDE_BLOCK_SIZE,
2703 .maxauthsize = SHA384_DIGEST_SIZE,
2704 },
2705 .caam = {
2706 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2707 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2708 OP_ALG_AAI_HMAC_PRECOMP,
2709 },
2710 },
2711 {
2712 .aead = {
2713 .base = {
2714 .cra_name = "echainiv(authenc(hmac(sha384),"
2715 "cbc(des3_ede)))",
2716 .cra_driver_name = "echainiv-authenc-"
2717 "hmac-sha384-"
2718 "cbc-des3_ede-caam",
2719 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2720 },
David Brazdil0f672f62019-12-10 10:32:29 +00002721 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002722 .setauthsize = aead_setauthsize,
2723 .encrypt = aead_encrypt,
2724 .decrypt = aead_decrypt,
2725 .ivsize = DES3_EDE_BLOCK_SIZE,
2726 .maxauthsize = SHA384_DIGEST_SIZE,
2727 },
2728 .caam = {
2729 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2730 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2731 OP_ALG_AAI_HMAC_PRECOMP,
2732 .geniv = true,
2733 },
2734 },
2735 {
2736 .aead = {
2737 .base = {
2738 .cra_name = "authenc(hmac(sha512),"
2739 "cbc(des3_ede))",
2740 .cra_driver_name = "authenc-hmac-sha512-"
2741 "cbc-des3_ede-caam",
2742 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2743 },
David Brazdil0f672f62019-12-10 10:32:29 +00002744 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002745 .setauthsize = aead_setauthsize,
2746 .encrypt = aead_encrypt,
2747 .decrypt = aead_decrypt,
2748 .ivsize = DES3_EDE_BLOCK_SIZE,
2749 .maxauthsize = SHA512_DIGEST_SIZE,
2750 },
2751 .caam = {
2752 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2753 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2754 OP_ALG_AAI_HMAC_PRECOMP,
2755 },
2756 },
2757 {
2758 .aead = {
2759 .base = {
2760 .cra_name = "echainiv(authenc(hmac(sha512),"
2761 "cbc(des3_ede)))",
2762 .cra_driver_name = "echainiv-authenc-"
2763 "hmac-sha512-"
2764 "cbc-des3_ede-caam",
2765 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2766 },
David Brazdil0f672f62019-12-10 10:32:29 +00002767 .setkey = des3_aead_setkey,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002768 .setauthsize = aead_setauthsize,
2769 .encrypt = aead_encrypt,
2770 .decrypt = aead_decrypt,
2771 .ivsize = DES3_EDE_BLOCK_SIZE,
2772 .maxauthsize = SHA512_DIGEST_SIZE,
2773 },
2774 .caam = {
2775 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2776 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2777 OP_ALG_AAI_HMAC_PRECOMP,
2778 .geniv = true,
2779 },
2780 },
2781 {
2782 .aead = {
2783 .base = {
2784 .cra_name = "authenc(hmac(md5),cbc(des))",
2785 .cra_driver_name = "authenc-hmac-md5-"
2786 "cbc-des-caam",
2787 .cra_blocksize = DES_BLOCK_SIZE,
2788 },
2789 .setkey = aead_setkey,
2790 .setauthsize = aead_setauthsize,
2791 .encrypt = aead_encrypt,
2792 .decrypt = aead_decrypt,
2793 .ivsize = DES_BLOCK_SIZE,
2794 .maxauthsize = MD5_DIGEST_SIZE,
2795 },
2796 .caam = {
2797 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2798 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2799 OP_ALG_AAI_HMAC_PRECOMP,
2800 },
2801 },
2802 {
2803 .aead = {
2804 .base = {
2805 .cra_name = "echainiv(authenc(hmac(md5),"
2806 "cbc(des)))",
2807 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2808 "cbc-des-caam",
2809 .cra_blocksize = DES_BLOCK_SIZE,
2810 },
2811 .setkey = aead_setkey,
2812 .setauthsize = aead_setauthsize,
2813 .encrypt = aead_encrypt,
2814 .decrypt = aead_decrypt,
2815 .ivsize = DES_BLOCK_SIZE,
2816 .maxauthsize = MD5_DIGEST_SIZE,
2817 },
2818 .caam = {
2819 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2820 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2821 OP_ALG_AAI_HMAC_PRECOMP,
2822 .geniv = true,
2823 },
2824 },
2825 {
2826 .aead = {
2827 .base = {
2828 .cra_name = "authenc(hmac(sha1),cbc(des))",
2829 .cra_driver_name = "authenc-hmac-sha1-"
2830 "cbc-des-caam",
2831 .cra_blocksize = DES_BLOCK_SIZE,
2832 },
2833 .setkey = aead_setkey,
2834 .setauthsize = aead_setauthsize,
2835 .encrypt = aead_encrypt,
2836 .decrypt = aead_decrypt,
2837 .ivsize = DES_BLOCK_SIZE,
2838 .maxauthsize = SHA1_DIGEST_SIZE,
2839 },
2840 .caam = {
2841 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2842 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2843 OP_ALG_AAI_HMAC_PRECOMP,
2844 },
2845 },
2846 {
2847 .aead = {
2848 .base = {
2849 .cra_name = "echainiv(authenc(hmac(sha1),"
2850 "cbc(des)))",
2851 .cra_driver_name = "echainiv-authenc-"
2852 "hmac-sha1-cbc-des-caam",
2853 .cra_blocksize = DES_BLOCK_SIZE,
2854 },
2855 .setkey = aead_setkey,
2856 .setauthsize = aead_setauthsize,
2857 .encrypt = aead_encrypt,
2858 .decrypt = aead_decrypt,
2859 .ivsize = DES_BLOCK_SIZE,
2860 .maxauthsize = SHA1_DIGEST_SIZE,
2861 },
2862 .caam = {
2863 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2864 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2865 OP_ALG_AAI_HMAC_PRECOMP,
2866 .geniv = true,
2867 },
2868 },
2869 {
2870 .aead = {
2871 .base = {
2872 .cra_name = "authenc(hmac(sha224),cbc(des))",
2873 .cra_driver_name = "authenc-hmac-sha224-"
2874 "cbc-des-caam",
2875 .cra_blocksize = DES_BLOCK_SIZE,
2876 },
2877 .setkey = aead_setkey,
2878 .setauthsize = aead_setauthsize,
2879 .encrypt = aead_encrypt,
2880 .decrypt = aead_decrypt,
2881 .ivsize = DES_BLOCK_SIZE,
2882 .maxauthsize = SHA224_DIGEST_SIZE,
2883 },
2884 .caam = {
2885 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2886 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2887 OP_ALG_AAI_HMAC_PRECOMP,
2888 },
2889 },
2890 {
2891 .aead = {
2892 .base = {
2893 .cra_name = "echainiv(authenc(hmac(sha224),"
2894 "cbc(des)))",
2895 .cra_driver_name = "echainiv-authenc-"
2896 "hmac-sha224-cbc-des-caam",
2897 .cra_blocksize = DES_BLOCK_SIZE,
2898 },
2899 .setkey = aead_setkey,
2900 .setauthsize = aead_setauthsize,
2901 .encrypt = aead_encrypt,
2902 .decrypt = aead_decrypt,
2903 .ivsize = DES_BLOCK_SIZE,
2904 .maxauthsize = SHA224_DIGEST_SIZE,
2905 },
2906 .caam = {
2907 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2908 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2909 OP_ALG_AAI_HMAC_PRECOMP,
2910 .geniv = true,
2911 },
2912 },
2913 {
2914 .aead = {
2915 .base = {
2916 .cra_name = "authenc(hmac(sha256),cbc(des))",
2917 .cra_driver_name = "authenc-hmac-sha256-"
2918 "cbc-des-caam",
2919 .cra_blocksize = DES_BLOCK_SIZE,
2920 },
2921 .setkey = aead_setkey,
2922 .setauthsize = aead_setauthsize,
2923 .encrypt = aead_encrypt,
2924 .decrypt = aead_decrypt,
2925 .ivsize = DES_BLOCK_SIZE,
2926 .maxauthsize = SHA256_DIGEST_SIZE,
2927 },
2928 .caam = {
2929 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2930 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2931 OP_ALG_AAI_HMAC_PRECOMP,
2932 },
2933 },
2934 {
2935 .aead = {
2936 .base = {
2937 .cra_name = "echainiv(authenc(hmac(sha256),"
2938 "cbc(des)))",
2939 .cra_driver_name = "echainiv-authenc-"
2940 "hmac-sha256-cbc-des-caam",
2941 .cra_blocksize = DES_BLOCK_SIZE,
2942 },
2943 .setkey = aead_setkey,
2944 .setauthsize = aead_setauthsize,
2945 .encrypt = aead_encrypt,
2946 .decrypt = aead_decrypt,
2947 .ivsize = DES_BLOCK_SIZE,
2948 .maxauthsize = SHA256_DIGEST_SIZE,
2949 },
2950 .caam = {
2951 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2952 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2953 OP_ALG_AAI_HMAC_PRECOMP,
2954 .geniv = true,
2955 },
2956 },
2957 {
2958 .aead = {
2959 .base = {
2960 .cra_name = "authenc(hmac(sha384),cbc(des))",
2961 .cra_driver_name = "authenc-hmac-sha384-"
2962 "cbc-des-caam",
2963 .cra_blocksize = DES_BLOCK_SIZE,
2964 },
2965 .setkey = aead_setkey,
2966 .setauthsize = aead_setauthsize,
2967 .encrypt = aead_encrypt,
2968 .decrypt = aead_decrypt,
2969 .ivsize = DES_BLOCK_SIZE,
2970 .maxauthsize = SHA384_DIGEST_SIZE,
2971 },
2972 .caam = {
2973 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2974 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2975 OP_ALG_AAI_HMAC_PRECOMP,
2976 },
2977 },
2978 {
2979 .aead = {
2980 .base = {
2981 .cra_name = "echainiv(authenc(hmac(sha384),"
2982 "cbc(des)))",
2983 .cra_driver_name = "echainiv-authenc-"
2984 "hmac-sha384-cbc-des-caam",
2985 .cra_blocksize = DES_BLOCK_SIZE,
2986 },
2987 .setkey = aead_setkey,
2988 .setauthsize = aead_setauthsize,
2989 .encrypt = aead_encrypt,
2990 .decrypt = aead_decrypt,
2991 .ivsize = DES_BLOCK_SIZE,
2992 .maxauthsize = SHA384_DIGEST_SIZE,
2993 },
2994 .caam = {
2995 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2996 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2997 OP_ALG_AAI_HMAC_PRECOMP,
2998 .geniv = true,
2999 },
3000 },
3001 {
3002 .aead = {
3003 .base = {
3004 .cra_name = "authenc(hmac(sha512),cbc(des))",
3005 .cra_driver_name = "authenc-hmac-sha512-"
3006 "cbc-des-caam",
3007 .cra_blocksize = DES_BLOCK_SIZE,
3008 },
3009 .setkey = aead_setkey,
3010 .setauthsize = aead_setauthsize,
3011 .encrypt = aead_encrypt,
3012 .decrypt = aead_decrypt,
3013 .ivsize = DES_BLOCK_SIZE,
3014 .maxauthsize = SHA512_DIGEST_SIZE,
3015 },
3016 .caam = {
3017 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3018 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3019 OP_ALG_AAI_HMAC_PRECOMP,
3020 },
3021 },
3022 {
3023 .aead = {
3024 .base = {
3025 .cra_name = "echainiv(authenc(hmac(sha512),"
3026 "cbc(des)))",
3027 .cra_driver_name = "echainiv-authenc-"
3028 "hmac-sha512-cbc-des-caam",
3029 .cra_blocksize = DES_BLOCK_SIZE,
3030 },
3031 .setkey = aead_setkey,
3032 .setauthsize = aead_setauthsize,
3033 .encrypt = aead_encrypt,
3034 .decrypt = aead_decrypt,
3035 .ivsize = DES_BLOCK_SIZE,
3036 .maxauthsize = SHA512_DIGEST_SIZE,
3037 },
3038 .caam = {
3039 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3040 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3041 OP_ALG_AAI_HMAC_PRECOMP,
3042 .geniv = true,
3043 },
3044 },
3045 {
3046 .aead = {
3047 .base = {
3048 .cra_name = "authenc(hmac(md5),"
3049 "rfc3686(ctr(aes)))",
3050 .cra_driver_name = "authenc-hmac-md5-"
3051 "rfc3686-ctr-aes-caam",
3052 .cra_blocksize = 1,
3053 },
3054 .setkey = aead_setkey,
3055 .setauthsize = aead_setauthsize,
3056 .encrypt = aead_encrypt,
3057 .decrypt = aead_decrypt,
3058 .ivsize = CTR_RFC3686_IV_SIZE,
3059 .maxauthsize = MD5_DIGEST_SIZE,
3060 },
3061 .caam = {
3062 .class1_alg_type = OP_ALG_ALGSEL_AES |
3063 OP_ALG_AAI_CTR_MOD128,
3064 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3065 OP_ALG_AAI_HMAC_PRECOMP,
3066 .rfc3686 = true,
3067 },
3068 },
3069 {
3070 .aead = {
3071 .base = {
3072 .cra_name = "seqiv(authenc("
3073 "hmac(md5),rfc3686(ctr(aes))))",
3074 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3075 "rfc3686-ctr-aes-caam",
3076 .cra_blocksize = 1,
3077 },
3078 .setkey = aead_setkey,
3079 .setauthsize = aead_setauthsize,
3080 .encrypt = aead_encrypt,
3081 .decrypt = aead_decrypt,
3082 .ivsize = CTR_RFC3686_IV_SIZE,
3083 .maxauthsize = MD5_DIGEST_SIZE,
3084 },
3085 .caam = {
3086 .class1_alg_type = OP_ALG_ALGSEL_AES |
3087 OP_ALG_AAI_CTR_MOD128,
3088 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3089 OP_ALG_AAI_HMAC_PRECOMP,
3090 .rfc3686 = true,
3091 .geniv = true,
3092 },
3093 },
3094 {
3095 .aead = {
3096 .base = {
3097 .cra_name = "authenc(hmac(sha1),"
3098 "rfc3686(ctr(aes)))",
3099 .cra_driver_name = "authenc-hmac-sha1-"
3100 "rfc3686-ctr-aes-caam",
3101 .cra_blocksize = 1,
3102 },
3103 .setkey = aead_setkey,
3104 .setauthsize = aead_setauthsize,
3105 .encrypt = aead_encrypt,
3106 .decrypt = aead_decrypt,
3107 .ivsize = CTR_RFC3686_IV_SIZE,
3108 .maxauthsize = SHA1_DIGEST_SIZE,
3109 },
3110 .caam = {
3111 .class1_alg_type = OP_ALG_ALGSEL_AES |
3112 OP_ALG_AAI_CTR_MOD128,
3113 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3114 OP_ALG_AAI_HMAC_PRECOMP,
3115 .rfc3686 = true,
3116 },
3117 },
3118 {
3119 .aead = {
3120 .base = {
3121 .cra_name = "seqiv(authenc("
3122 "hmac(sha1),rfc3686(ctr(aes))))",
3123 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3124 "rfc3686-ctr-aes-caam",
3125 .cra_blocksize = 1,
3126 },
3127 .setkey = aead_setkey,
3128 .setauthsize = aead_setauthsize,
3129 .encrypt = aead_encrypt,
3130 .decrypt = aead_decrypt,
3131 .ivsize = CTR_RFC3686_IV_SIZE,
3132 .maxauthsize = SHA1_DIGEST_SIZE,
3133 },
3134 .caam = {
3135 .class1_alg_type = OP_ALG_ALGSEL_AES |
3136 OP_ALG_AAI_CTR_MOD128,
3137 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3138 OP_ALG_AAI_HMAC_PRECOMP,
3139 .rfc3686 = true,
3140 .geniv = true,
3141 },
3142 },
3143 {
3144 .aead = {
3145 .base = {
3146 .cra_name = "authenc(hmac(sha224),"
3147 "rfc3686(ctr(aes)))",
3148 .cra_driver_name = "authenc-hmac-sha224-"
3149 "rfc3686-ctr-aes-caam",
3150 .cra_blocksize = 1,
3151 },
3152 .setkey = aead_setkey,
3153 .setauthsize = aead_setauthsize,
3154 .encrypt = aead_encrypt,
3155 .decrypt = aead_decrypt,
3156 .ivsize = CTR_RFC3686_IV_SIZE,
3157 .maxauthsize = SHA224_DIGEST_SIZE,
3158 },
3159 .caam = {
3160 .class1_alg_type = OP_ALG_ALGSEL_AES |
3161 OP_ALG_AAI_CTR_MOD128,
3162 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3163 OP_ALG_AAI_HMAC_PRECOMP,
3164 .rfc3686 = true,
3165 },
3166 },
3167 {
3168 .aead = {
3169 .base = {
3170 .cra_name = "seqiv(authenc("
3171 "hmac(sha224),rfc3686(ctr(aes))))",
3172 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3173 "rfc3686-ctr-aes-caam",
3174 .cra_blocksize = 1,
3175 },
3176 .setkey = aead_setkey,
3177 .setauthsize = aead_setauthsize,
3178 .encrypt = aead_encrypt,
3179 .decrypt = aead_decrypt,
3180 .ivsize = CTR_RFC3686_IV_SIZE,
3181 .maxauthsize = SHA224_DIGEST_SIZE,
3182 },
3183 .caam = {
3184 .class1_alg_type = OP_ALG_ALGSEL_AES |
3185 OP_ALG_AAI_CTR_MOD128,
3186 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3187 OP_ALG_AAI_HMAC_PRECOMP,
3188 .rfc3686 = true,
3189 .geniv = true,
3190 },
3191 },
3192 {
3193 .aead = {
3194 .base = {
3195 .cra_name = "authenc(hmac(sha256),"
3196 "rfc3686(ctr(aes)))",
3197 .cra_driver_name = "authenc-hmac-sha256-"
3198 "rfc3686-ctr-aes-caam",
3199 .cra_blocksize = 1,
3200 },
3201 .setkey = aead_setkey,
3202 .setauthsize = aead_setauthsize,
3203 .encrypt = aead_encrypt,
3204 .decrypt = aead_decrypt,
3205 .ivsize = CTR_RFC3686_IV_SIZE,
3206 .maxauthsize = SHA256_DIGEST_SIZE,
3207 },
3208 .caam = {
3209 .class1_alg_type = OP_ALG_ALGSEL_AES |
3210 OP_ALG_AAI_CTR_MOD128,
3211 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3212 OP_ALG_AAI_HMAC_PRECOMP,
3213 .rfc3686 = true,
3214 },
3215 },
3216 {
3217 .aead = {
3218 .base = {
3219 .cra_name = "seqiv(authenc(hmac(sha256),"
3220 "rfc3686(ctr(aes))))",
3221 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3222 "rfc3686-ctr-aes-caam",
3223 .cra_blocksize = 1,
3224 },
3225 .setkey = aead_setkey,
3226 .setauthsize = aead_setauthsize,
3227 .encrypt = aead_encrypt,
3228 .decrypt = aead_decrypt,
3229 .ivsize = CTR_RFC3686_IV_SIZE,
3230 .maxauthsize = SHA256_DIGEST_SIZE,
3231 },
3232 .caam = {
3233 .class1_alg_type = OP_ALG_ALGSEL_AES |
3234 OP_ALG_AAI_CTR_MOD128,
3235 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3236 OP_ALG_AAI_HMAC_PRECOMP,
3237 .rfc3686 = true,
3238 .geniv = true,
3239 },
3240 },
3241 {
3242 .aead = {
3243 .base = {
3244 .cra_name = "authenc(hmac(sha384),"
3245 "rfc3686(ctr(aes)))",
3246 .cra_driver_name = "authenc-hmac-sha384-"
3247 "rfc3686-ctr-aes-caam",
3248 .cra_blocksize = 1,
3249 },
3250 .setkey = aead_setkey,
3251 .setauthsize = aead_setauthsize,
3252 .encrypt = aead_encrypt,
3253 .decrypt = aead_decrypt,
3254 .ivsize = CTR_RFC3686_IV_SIZE,
3255 .maxauthsize = SHA384_DIGEST_SIZE,
3256 },
3257 .caam = {
3258 .class1_alg_type = OP_ALG_ALGSEL_AES |
3259 OP_ALG_AAI_CTR_MOD128,
3260 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3261 OP_ALG_AAI_HMAC_PRECOMP,
3262 .rfc3686 = true,
3263 },
3264 },
3265 {
3266 .aead = {
3267 .base = {
3268 .cra_name = "seqiv(authenc(hmac(sha384),"
3269 "rfc3686(ctr(aes))))",
3270 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3271 "rfc3686-ctr-aes-caam",
3272 .cra_blocksize = 1,
3273 },
3274 .setkey = aead_setkey,
3275 .setauthsize = aead_setauthsize,
3276 .encrypt = aead_encrypt,
3277 .decrypt = aead_decrypt,
3278 .ivsize = CTR_RFC3686_IV_SIZE,
3279 .maxauthsize = SHA384_DIGEST_SIZE,
3280 },
3281 .caam = {
3282 .class1_alg_type = OP_ALG_ALGSEL_AES |
3283 OP_ALG_AAI_CTR_MOD128,
3284 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3285 OP_ALG_AAI_HMAC_PRECOMP,
3286 .rfc3686 = true,
3287 .geniv = true,
3288 },
3289 },
3290 {
3291 .aead = {
3292 .base = {
3293 .cra_name = "authenc(hmac(sha512),"
3294 "rfc3686(ctr(aes)))",
3295 .cra_driver_name = "authenc-hmac-sha512-"
3296 "rfc3686-ctr-aes-caam",
3297 .cra_blocksize = 1,
3298 },
3299 .setkey = aead_setkey,
3300 .setauthsize = aead_setauthsize,
3301 .encrypt = aead_encrypt,
3302 .decrypt = aead_decrypt,
3303 .ivsize = CTR_RFC3686_IV_SIZE,
3304 .maxauthsize = SHA512_DIGEST_SIZE,
3305 },
3306 .caam = {
3307 .class1_alg_type = OP_ALG_ALGSEL_AES |
3308 OP_ALG_AAI_CTR_MOD128,
3309 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3310 OP_ALG_AAI_HMAC_PRECOMP,
3311 .rfc3686 = true,
3312 },
3313 },
3314 {
3315 .aead = {
3316 .base = {
3317 .cra_name = "seqiv(authenc(hmac(sha512),"
3318 "rfc3686(ctr(aes))))",
3319 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3320 "rfc3686-ctr-aes-caam",
3321 .cra_blocksize = 1,
3322 },
3323 .setkey = aead_setkey,
3324 .setauthsize = aead_setauthsize,
3325 .encrypt = aead_encrypt,
3326 .decrypt = aead_decrypt,
3327 .ivsize = CTR_RFC3686_IV_SIZE,
3328 .maxauthsize = SHA512_DIGEST_SIZE,
3329 },
3330 .caam = {
3331 .class1_alg_type = OP_ALG_ALGSEL_AES |
3332 OP_ALG_AAI_CTR_MOD128,
3333 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3334 OP_ALG_AAI_HMAC_PRECOMP,
3335 .rfc3686 = true,
3336 .geniv = true,
3337 },
3338 },
David Brazdil0f672f62019-12-10 10:32:29 +00003339 {
3340 .aead = {
3341 .base = {
3342 .cra_name = "rfc7539(chacha20,poly1305)",
3343 .cra_driver_name = "rfc7539-chacha20-poly1305-"
3344 "caam",
3345 .cra_blocksize = 1,
3346 },
3347 .setkey = chachapoly_setkey,
3348 .setauthsize = chachapoly_setauthsize,
3349 .encrypt = chachapoly_encrypt,
3350 .decrypt = chachapoly_decrypt,
3351 .ivsize = CHACHAPOLY_IV_SIZE,
3352 .maxauthsize = POLY1305_DIGEST_SIZE,
3353 },
3354 .caam = {
3355 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3356 OP_ALG_AAI_AEAD,
3357 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3358 OP_ALG_AAI_AEAD,
3359 .nodkp = true,
3360 },
3361 },
3362 {
3363 .aead = {
3364 .base = {
3365 .cra_name = "rfc7539esp(chacha20,poly1305)",
3366 .cra_driver_name = "rfc7539esp-chacha20-"
3367 "poly1305-caam",
3368 .cra_blocksize = 1,
3369 },
3370 .setkey = chachapoly_setkey,
3371 .setauthsize = chachapoly_setauthsize,
3372 .encrypt = chachapoly_encrypt,
3373 .decrypt = chachapoly_decrypt,
3374 .ivsize = 8,
3375 .maxauthsize = POLY1305_DIGEST_SIZE,
3376 },
3377 .caam = {
3378 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3379 OP_ALG_AAI_AEAD,
3380 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3381 OP_ALG_AAI_AEAD,
3382 .nodkp = true,
3383 },
3384 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003385};
3386
3387static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3388 bool uses_dkp)
3389{
3390 dma_addr_t dma_addr;
3391 struct caam_drv_private *priv;
3392
3393 ctx->jrdev = caam_jr_alloc();
3394 if (IS_ERR(ctx->jrdev)) {
3395 pr_err("Job Ring Device allocation for transform failed\n");
3396 return PTR_ERR(ctx->jrdev);
3397 }
3398
3399 priv = dev_get_drvdata(ctx->jrdev->parent);
3400 if (priv->era >= 6 && uses_dkp)
3401 ctx->dir = DMA_BIDIRECTIONAL;
3402 else
3403 ctx->dir = DMA_TO_DEVICE;
3404
3405 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3406 offsetof(struct caam_ctx,
3407 sh_desc_enc_dma),
3408 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3409 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3410 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3411 caam_jr_free(ctx->jrdev);
3412 return -ENOMEM;
3413 }
3414
3415 ctx->sh_desc_enc_dma = dma_addr;
3416 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3417 sh_desc_dec);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003418 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3419
3420 /* copy descriptor header template value */
3421 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3422 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3423
3424 return 0;
3425}
3426
David Brazdil0f672f62019-12-10 10:32:29 +00003427static int caam_cra_init(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003428{
David Brazdil0f672f62019-12-10 10:32:29 +00003429 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3430 struct caam_skcipher_alg *caam_alg =
3431 container_of(alg, typeof(*caam_alg), skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003432
David Brazdil0f672f62019-12-10 10:32:29 +00003433 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
3434 false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003435}
3436
3437static int caam_aead_init(struct crypto_aead *tfm)
3438{
3439 struct aead_alg *alg = crypto_aead_alg(tfm);
3440 struct caam_aead_alg *caam_alg =
3441 container_of(alg, struct caam_aead_alg, aead);
3442 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
3443
David Brazdil0f672f62019-12-10 10:32:29 +00003444 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003445}
3446
3447static void caam_exit_common(struct caam_ctx *ctx)
3448{
3449 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3450 offsetof(struct caam_ctx, sh_desc_enc_dma),
3451 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3452 caam_jr_free(ctx->jrdev);
3453}
3454
David Brazdil0f672f62019-12-10 10:32:29 +00003455static void caam_cra_exit(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003456{
David Brazdil0f672f62019-12-10 10:32:29 +00003457 caam_exit_common(crypto_skcipher_ctx(tfm));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003458}
3459
3460static void caam_aead_exit(struct crypto_aead *tfm)
3461{
3462 caam_exit_common(crypto_aead_ctx(tfm));
3463}
3464
David Brazdil0f672f62019-12-10 10:32:29 +00003465void caam_algapi_exit(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003466{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003467 int i;
3468
3469 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3470 struct caam_aead_alg *t_alg = driver_aeads + i;
3471
3472 if (t_alg->registered)
3473 crypto_unregister_aead(&t_alg->aead);
3474 }
3475
David Brazdil0f672f62019-12-10 10:32:29 +00003476 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3477 struct caam_skcipher_alg *t_alg = driver_algs + i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003478
David Brazdil0f672f62019-12-10 10:32:29 +00003479 if (t_alg->registered)
3480 crypto_unregister_skcipher(&t_alg->skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003481 }
3482}
3483
David Brazdil0f672f62019-12-10 10:32:29 +00003484static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003485{
David Brazdil0f672f62019-12-10 10:32:29 +00003486 struct skcipher_alg *alg = &t_alg->skcipher;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003487
David Brazdil0f672f62019-12-10 10:32:29 +00003488 alg->base.cra_module = THIS_MODULE;
3489 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3490 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3491 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003492
David Brazdil0f672f62019-12-10 10:32:29 +00003493 alg->init = caam_cra_init;
3494 alg->exit = caam_cra_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003495}
3496
3497static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3498{
3499 struct aead_alg *alg = &t_alg->aead;
3500
3501 alg->base.cra_module = THIS_MODULE;
3502 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3503 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3504 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3505
3506 alg->init = caam_aead_init;
3507 alg->exit = caam_aead_exit;
3508}
3509
David Brazdil0f672f62019-12-10 10:32:29 +00003510int caam_algapi_init(struct device *ctrldev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003511{
David Brazdil0f672f62019-12-10 10:32:29 +00003512 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003513 int i = 0, err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003514 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003515 unsigned int md_limit = SHA512_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00003516 bool registered = false, gcm_support;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003517
3518 /*
3519 * Register crypto algorithms the device supports.
3520 * First, detect presence and attributes of DES, AES, and MD blocks.
3521 */
David Brazdil0f672f62019-12-10 10:32:29 +00003522 if (priv->era < 10) {
3523 u32 cha_vid, cha_inst, aes_rn;
3524
3525 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3526 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
3527 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3528
3529 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3530 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
3531 CHA_ID_LS_DES_SHIFT;
3532 aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
3533 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
David Brazdil0f672f62019-12-10 10:32:29 +00003534 ccha_inst = 0;
3535 ptha_inst = 0;
3536
3537 aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
3538 CHA_ID_LS_AES_MASK;
3539 gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3540 } else {
3541 u32 aesa, mdha;
3542
3543 aesa = rd_reg32(&priv->ctrl->vreg.aesa);
3544 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
3545
3546 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3547 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
3548
3549 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
3550 aes_inst = aesa & CHA_VER_NUM_MASK;
3551 md_inst = mdha & CHA_VER_NUM_MASK;
3552 ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
3553 ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
David Brazdil0f672f62019-12-10 10:32:29 +00003554
3555 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3556 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003557
3558 /* If MD is present, limit digest size based on LP256 */
David Brazdil0f672f62019-12-10 10:32:29 +00003559 if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003560 md_limit = SHA256_DIGEST_SIZE;
3561
3562 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00003563 struct caam_skcipher_alg *t_alg = driver_algs + i;
3564 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003565
3566 /* Skip DES algorithms if not supported by device */
3567 if (!des_inst &&
3568 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3569 (alg_sel == OP_ALG_ALGSEL_DES)))
3570 continue;
3571
3572 /* Skip AES algorithms if not supported by device */
3573 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3574 continue;
3575
3576 /*
3577 * Check support for AES modes not available
3578 * on LP devices.
3579 */
David Brazdil0f672f62019-12-10 10:32:29 +00003580 if (aes_vid == CHA_VER_VID_AES_LP &&
3581 (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
3582 OP_ALG_AAI_XTS)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003583 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003584
David Brazdil0f672f62019-12-10 10:32:29 +00003585 caam_skcipher_alg_init(t_alg);
3586
3587 err = crypto_register_skcipher(&t_alg->skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003588 if (err) {
3589 pr_warn("%s alg registration failed\n",
David Brazdil0f672f62019-12-10 10:32:29 +00003590 t_alg->skcipher.base.cra_driver_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003591 continue;
3592 }
3593
David Brazdil0f672f62019-12-10 10:32:29 +00003594 t_alg->registered = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003595 registered = true;
3596 }
3597
3598 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3599 struct caam_aead_alg *t_alg = driver_aeads + i;
3600 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3601 OP_ALG_ALGSEL_MASK;
3602 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3603 OP_ALG_ALGSEL_MASK;
3604 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3605
3606 /* Skip DES algorithms if not supported by device */
3607 if (!des_inst &&
3608 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3609 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3610 continue;
3611
3612 /* Skip AES algorithms if not supported by device */
3613 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3614 continue;
3615
David Brazdil0f672f62019-12-10 10:32:29 +00003616 /* Skip CHACHA20 algorithms if not supported by device */
3617 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
3618 continue;
3619
3620 /* Skip POLY1305 algorithms if not supported by device */
3621 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3622 continue;
3623
3624 /* Skip GCM algorithms if not supported by device */
3625 if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3626 alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3627 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003628
3629 /*
3630 * Skip algorithms requiring message digests
3631 * if MD or MD size is not supported by device.
3632 */
David Brazdil0f672f62019-12-10 10:32:29 +00003633 if (is_mdha(c2_alg_sel) &&
3634 (!md_inst || t_alg->aead.maxauthsize > md_limit))
3635 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003636
3637 caam_aead_alg_init(t_alg);
3638
3639 err = crypto_register_aead(&t_alg->aead);
3640 if (err) {
3641 pr_warn("%s alg registration failed\n",
3642 t_alg->aead.base.cra_driver_name);
3643 continue;
3644 }
3645
3646 t_alg->registered = true;
3647 registered = true;
3648 }
3649
3650 if (registered)
3651 pr_info("caam algorithms registered in /proc/crypto\n");
3652
3653 return err;
3654}