blob: 7234b95241e9187781ef1742fd35552d937aa729 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
David Brazdil0f672f62019-12-10 10:32:29 +00005 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013#include <linux/interrupt.h>
14#include <crypto/scatterwalk.h>
15#include <crypto/des.h>
16#include <linux/ccp.h>
17
18#include "ccp-dev.h"
19
20/* SHA initial context values */
21static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
22 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
23 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
24 cpu_to_be32(SHA1_H4),
25};
26
27static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
28 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
29 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
30 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
31 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
32};
33
34static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
35 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
36 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
37 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
38 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
39};
40
41static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
42 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
43 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
44 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
45 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
46};
47
48static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
49 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
50 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
51 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
52 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
53};
54
55#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
56 ccp_gen_jobid(ccp) : 0)
57
58static u32 ccp_gen_jobid(struct ccp_device *ccp)
59{
60 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
61}
62
63static void ccp_sg_free(struct ccp_sg_workarea *wa)
64{
65 if (wa->dma_count)
Olivier Deprez0e641232021-09-23 10:07:05 +020066 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067
68 wa->dma_count = 0;
69}
70
71static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
72 struct scatterlist *sg, u64 len,
73 enum dma_data_direction dma_dir)
74{
75 memset(wa, 0, sizeof(*wa));
76
77 wa->sg = sg;
78 if (!sg)
79 return 0;
80
81 wa->nents = sg_nents_for_len(sg, len);
82 if (wa->nents < 0)
83 return wa->nents;
84
85 wa->bytes_left = len;
86 wa->sg_used = 0;
87
88 if (len == 0)
89 return 0;
90
91 if (dma_dir == DMA_NONE)
92 return 0;
93
94 wa->dma_sg = sg;
Olivier Deprez0e641232021-09-23 10:07:05 +020095 wa->dma_sg_head = sg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 wa->dma_dev = dev;
97 wa->dma_dir = dma_dir;
98 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
99 if (!wa->dma_count)
100 return -ENOMEM;
101
102 return 0;
103}
104
105static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
106{
107 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
Olivier Deprez0e641232021-09-23 10:07:05 +0200108 unsigned int sg_combined_len = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109
110 if (!wa->sg)
111 return;
112
113 wa->sg_used += nbytes;
114 wa->bytes_left -= nbytes;
Olivier Deprez0e641232021-09-23 10:07:05 +0200115 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
116 /* Advance to the next DMA scatterlist entry */
117 wa->dma_sg = sg_next(wa->dma_sg);
118
119 /* In the case that the DMA mapped scatterlist has entries
120 * that have been merged, the non-DMA mapped scatterlist
121 * must be advanced multiple times for each merged entry.
122 * This ensures that the current non-DMA mapped entry
123 * corresponds to the current DMA mapped entry.
124 */
125 do {
126 sg_combined_len += wa->sg->length;
127 wa->sg = sg_next(wa->sg);
128 } while (wa->sg_used > sg_combined_len);
129
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 wa->sg_used = 0;
131 }
132}
133
134static void ccp_dm_free(struct ccp_dm_workarea *wa)
135{
136 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
137 if (wa->address)
138 dma_pool_free(wa->dma_pool, wa->address,
139 wa->dma.address);
140 } else {
141 if (wa->dma.address)
142 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
143 wa->dma.dir);
144 kfree(wa->address);
145 }
146
147 wa->address = NULL;
148 wa->dma.address = 0;
149}
150
151static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
152 struct ccp_cmd_queue *cmd_q,
153 unsigned int len,
154 enum dma_data_direction dir)
155{
156 memset(wa, 0, sizeof(*wa));
157
158 if (!len)
159 return 0;
160
161 wa->dev = cmd_q->ccp->dev;
162 wa->length = len;
163
164 if (len <= CCP_DMAPOOL_MAX_SIZE) {
165 wa->dma_pool = cmd_q->dma_pool;
166
David Brazdil0f672f62019-12-10 10:32:29 +0000167 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168 &wa->dma.address);
169 if (!wa->address)
170 return -ENOMEM;
171
172 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
173
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 } else {
175 wa->address = kzalloc(len, GFP_KERNEL);
176 if (!wa->address)
177 return -ENOMEM;
178
179 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
180 dir);
181 if (dma_mapping_error(wa->dev, wa->dma.address))
182 return -ENOMEM;
183
184 wa->dma.length = len;
185 }
186 wa->dma.dir = dir;
187
188 return 0;
189}
190
191static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
192 struct scatterlist *sg, unsigned int sg_offset,
193 unsigned int len)
194{
195 WARN_ON(!wa->address);
196
197 if (len > (wa->length - wa_offset))
198 return -EINVAL;
199
200 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
201 0);
202 return 0;
203}
204
205static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
206 struct scatterlist *sg, unsigned int sg_offset,
207 unsigned int len)
208{
209 WARN_ON(!wa->address);
210
211 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
212 1);
213}
214
215static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
216 unsigned int wa_offset,
217 struct scatterlist *sg,
218 unsigned int sg_offset,
219 unsigned int len)
220{
221 u8 *p, *q;
222 int rc;
223
224 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
225 if (rc)
226 return rc;
227
228 p = wa->address + wa_offset;
229 q = p + len - 1;
230 while (p < q) {
231 *p = *p ^ *q;
232 *q = *p ^ *q;
233 *p = *p ^ *q;
234 p++;
235 q--;
236 }
237 return 0;
238}
239
240static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
241 unsigned int wa_offset,
242 struct scatterlist *sg,
243 unsigned int sg_offset,
244 unsigned int len)
245{
246 u8 *p, *q;
247
248 p = wa->address + wa_offset;
249 q = p + len - 1;
250 while (p < q) {
251 *p = *p ^ *q;
252 *q = *p ^ *q;
253 *p = *p ^ *q;
254 p++;
255 q--;
256 }
257
258 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
259}
260
261static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
262{
263 ccp_dm_free(&data->dm_wa);
264 ccp_sg_free(&data->sg_wa);
265}
266
267static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
268 struct scatterlist *sg, u64 sg_len,
269 unsigned int dm_len,
270 enum dma_data_direction dir)
271{
272 int ret;
273
274 memset(data, 0, sizeof(*data));
275
276 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
277 dir);
278 if (ret)
279 goto e_err;
280
281 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
282 if (ret)
283 goto e_err;
284
285 return 0;
286
287e_err:
288 ccp_free_data(data, cmd_q);
289
290 return ret;
291}
292
293static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
294{
295 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
296 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
297 unsigned int buf_count, nbytes;
298
299 /* Clear the buffer if setting it */
300 if (!from)
301 memset(dm_wa->address, 0, dm_wa->length);
302
303 if (!sg_wa->sg)
304 return 0;
305
306 /* Perform the copy operation
307 * nbytes will always be <= UINT_MAX because dm_wa->length is
308 * an unsigned int
309 */
310 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
311 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
312 nbytes, from);
313
314 /* Update the structures and generate the count */
315 buf_count = 0;
316 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200317 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 dm_wa->length - buf_count);
319 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
320
321 buf_count += nbytes;
322 ccp_update_sg_workarea(sg_wa, nbytes);
323 }
324
325 return buf_count;
326}
327
328static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
329{
330 return ccp_queue_buf(data, 0);
331}
332
333static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
334{
335 return ccp_queue_buf(data, 1);
336}
337
338static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
339 struct ccp_op *op, unsigned int block_size,
340 bool blocksize_op)
341{
342 unsigned int sg_src_len, sg_dst_len, op_len;
343
344 /* The CCP can only DMA from/to one address each per operation. This
345 * requires that we find the smallest DMA area between the source
346 * and destination. The resulting len values will always be <= UINT_MAX
347 * because the dma length is an unsigned int.
348 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200349 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
351
352 if (dst) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200353 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
355 op_len = min(sg_src_len, sg_dst_len);
356 } else {
357 op_len = sg_src_len;
358 }
359
360 /* The data operation length will be at least block_size in length
361 * or the smaller of available sg room remaining for the source or
362 * the destination
363 */
364 op_len = max(op_len, block_size);
365
366 /* Unless we have to buffer data, there's no reason to wait */
367 op->soc = 0;
368
369 if (sg_src_len < block_size) {
370 /* Not enough data in the sg element, so it
371 * needs to be buffered into a blocksize chunk
372 */
373 int cp_len = ccp_fill_queue_buf(src);
374
375 op->soc = 1;
376 op->src.u.dma.address = src->dm_wa.dma.address;
377 op->src.u.dma.offset = 0;
378 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
379 } else {
380 /* Enough data in the sg element, but we need to
381 * adjust for any previously copied data
382 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200383 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384 op->src.u.dma.offset = src->sg_wa.sg_used;
385 op->src.u.dma.length = op_len & ~(block_size - 1);
386
387 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
388 }
389
390 if (dst) {
391 if (sg_dst_len < block_size) {
392 /* Not enough room in the sg element or we're on the
393 * last piece of data (when using padding), so the
394 * output needs to be buffered into a blocksize chunk
395 */
396 op->soc = 1;
397 op->dst.u.dma.address = dst->dm_wa.dma.address;
398 op->dst.u.dma.offset = 0;
399 op->dst.u.dma.length = op->src.u.dma.length;
400 } else {
401 /* Enough room in the sg element, but we need to
402 * adjust for any previously used area
403 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200404 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 op->dst.u.dma.offset = dst->sg_wa.sg_used;
406 op->dst.u.dma.length = op->src.u.dma.length;
407 }
408 }
409}
410
411static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
412 struct ccp_op *op)
413{
414 op->init = 0;
415
416 if (dst) {
417 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
418 ccp_empty_queue_buf(dst);
419 else
420 ccp_update_sg_workarea(&dst->sg_wa,
421 op->dst.u.dma.length);
422 }
423}
424
425static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
426 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
427 u32 byte_swap, bool from)
428{
429 struct ccp_op op;
430
431 memset(&op, 0, sizeof(op));
432
433 op.cmd_q = cmd_q;
434 op.jobid = jobid;
435 op.eom = 1;
436
437 if (from) {
438 op.soc = 1;
439 op.src.type = CCP_MEMTYPE_SB;
440 op.src.u.sb = sb;
441 op.dst.type = CCP_MEMTYPE_SYSTEM;
442 op.dst.u.dma.address = wa->dma.address;
443 op.dst.u.dma.length = wa->length;
444 } else {
445 op.src.type = CCP_MEMTYPE_SYSTEM;
446 op.src.u.dma.address = wa->dma.address;
447 op.src.u.dma.length = wa->length;
448 op.dst.type = CCP_MEMTYPE_SB;
449 op.dst.u.sb = sb;
450 }
451
452 op.u.passthru.byte_swap = byte_swap;
453
454 return cmd_q->ccp->vdata->perform->passthru(&op);
455}
456
457static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
458 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
459 u32 byte_swap)
460{
461 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
462}
463
464static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
465 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
466 u32 byte_swap)
467{
468 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
469}
470
David Brazdil0f672f62019-12-10 10:32:29 +0000471static noinline_for_stack int
472ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473{
474 struct ccp_aes_engine *aes = &cmd->u.aes;
475 struct ccp_dm_workarea key, ctx;
476 struct ccp_data src;
477 struct ccp_op op;
478 unsigned int dm_offset;
479 int ret;
480
481 if (!((aes->key_len == AES_KEYSIZE_128) ||
482 (aes->key_len == AES_KEYSIZE_192) ||
483 (aes->key_len == AES_KEYSIZE_256)))
484 return -EINVAL;
485
486 if (aes->src_len & (AES_BLOCK_SIZE - 1))
487 return -EINVAL;
488
489 if (aes->iv_len != AES_BLOCK_SIZE)
490 return -EINVAL;
491
492 if (!aes->key || !aes->iv || !aes->src)
493 return -EINVAL;
494
495 if (aes->cmac_final) {
496 if (aes->cmac_key_len != AES_BLOCK_SIZE)
497 return -EINVAL;
498
499 if (!aes->cmac_key)
500 return -EINVAL;
501 }
502
503 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
504 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
505
506 ret = -EIO;
507 memset(&op, 0, sizeof(op));
508 op.cmd_q = cmd_q;
509 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
510 op.sb_key = cmd_q->sb_key;
511 op.sb_ctx = cmd_q->sb_ctx;
512 op.init = 1;
513 op.u.aes.type = aes->type;
514 op.u.aes.mode = aes->mode;
515 op.u.aes.action = aes->action;
516
517 /* All supported key sizes fit in a single (32-byte) SB entry
518 * and must be in little endian format. Use the 256-bit byte
519 * swap passthru option to convert from big endian to little
520 * endian.
521 */
522 ret = ccp_init_dm_workarea(&key, cmd_q,
523 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
524 DMA_TO_DEVICE);
525 if (ret)
526 return ret;
527
528 dm_offset = CCP_SB_BYTES - aes->key_len;
529 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
530 if (ret)
531 goto e_key;
532 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
533 CCP_PASSTHRU_BYTESWAP_256BIT);
534 if (ret) {
535 cmd->engine_error = cmd_q->cmd_error;
536 goto e_key;
537 }
538
539 /* The AES context fits in a single (32-byte) SB entry and
540 * must be in little endian format. Use the 256-bit byte swap
541 * passthru option to convert from big endian to little endian.
542 */
543 ret = ccp_init_dm_workarea(&ctx, cmd_q,
544 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
545 DMA_BIDIRECTIONAL);
546 if (ret)
547 goto e_key;
548
549 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
550 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
551 if (ret)
552 goto e_ctx;
553 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
554 CCP_PASSTHRU_BYTESWAP_256BIT);
555 if (ret) {
556 cmd->engine_error = cmd_q->cmd_error;
557 goto e_ctx;
558 }
559
560 /* Send data to the CCP AES engine */
561 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
562 AES_BLOCK_SIZE, DMA_TO_DEVICE);
563 if (ret)
564 goto e_ctx;
565
566 while (src.sg_wa.bytes_left) {
567 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
568 if (aes->cmac_final && !src.sg_wa.bytes_left) {
569 op.eom = 1;
570
571 /* Push the K1/K2 key to the CCP now */
572 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
573 op.sb_ctx,
574 CCP_PASSTHRU_BYTESWAP_256BIT);
575 if (ret) {
576 cmd->engine_error = cmd_q->cmd_error;
577 goto e_src;
578 }
579
580 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
581 aes->cmac_key_len);
582 if (ret)
583 goto e_src;
584 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
585 CCP_PASSTHRU_BYTESWAP_256BIT);
586 if (ret) {
587 cmd->engine_error = cmd_q->cmd_error;
588 goto e_src;
589 }
590 }
591
592 ret = cmd_q->ccp->vdata->perform->aes(&op);
593 if (ret) {
594 cmd->engine_error = cmd_q->cmd_error;
595 goto e_src;
596 }
597
598 ccp_process_data(&src, NULL, &op);
599 }
600
601 /* Retrieve the AES context - convert from LE to BE using
602 * 32-byte (256-bit) byteswapping
603 */
604 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
605 CCP_PASSTHRU_BYTESWAP_256BIT);
606 if (ret) {
607 cmd->engine_error = cmd_q->cmd_error;
608 goto e_src;
609 }
610
611 /* ...but we only need AES_BLOCK_SIZE bytes */
612 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
613 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
614
615e_src:
616 ccp_free_data(&src, cmd_q);
617
618e_ctx:
619 ccp_dm_free(&ctx);
620
621e_key:
622 ccp_dm_free(&key);
623
624 return ret;
625}
626
David Brazdil0f672f62019-12-10 10:32:29 +0000627static noinline_for_stack int
628ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000629{
630 struct ccp_aes_engine *aes = &cmd->u.aes;
631 struct ccp_dm_workarea key, ctx, final_wa, tag;
632 struct ccp_data src, dst;
633 struct ccp_data aad;
634 struct ccp_op op;
635
636 unsigned long long *final;
637 unsigned int dm_offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000638 unsigned int authsize;
639 unsigned int jobid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640 unsigned int ilen;
641 bool in_place = true; /* Default value */
642 int ret;
643
644 struct scatterlist *p_inp, sg_inp[2];
645 struct scatterlist *p_tag, sg_tag[2];
646 struct scatterlist *p_outp, sg_outp[2];
647 struct scatterlist *p_aad;
648
649 if (!aes->iv)
650 return -EINVAL;
651
652 if (!((aes->key_len == AES_KEYSIZE_128) ||
653 (aes->key_len == AES_KEYSIZE_192) ||
654 (aes->key_len == AES_KEYSIZE_256)))
655 return -EINVAL;
656
657 if (!aes->key) /* Gotta have a key SGL */
658 return -EINVAL;
659
David Brazdil0f672f62019-12-10 10:32:29 +0000660 /* Zero defaults to 16 bytes, the maximum size */
661 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
662 switch (authsize) {
663 case 16:
664 case 15:
665 case 14:
666 case 13:
667 case 12:
668 case 8:
669 case 4:
670 break;
671 default:
672 return -EINVAL;
673 }
674
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000675 /* First, decompose the source buffer into AAD & PT,
676 * and the destination buffer into AAD, CT & tag, or
677 * the input into CT & tag.
678 * It is expected that the input and output SGs will
679 * be valid, even if the AAD and input lengths are 0.
680 */
681 p_aad = aes->src;
682 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
683 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
684 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
685 ilen = aes->src_len;
686 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
687 } else {
688 /* Input length for decryption includes tag */
David Brazdil0f672f62019-12-10 10:32:29 +0000689 ilen = aes->src_len - authsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000690 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
691 }
692
David Brazdil0f672f62019-12-10 10:32:29 +0000693 jobid = CCP_NEW_JOBID(cmd_q->ccp);
694
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695 memset(&op, 0, sizeof(op));
696 op.cmd_q = cmd_q;
David Brazdil0f672f62019-12-10 10:32:29 +0000697 op.jobid = jobid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000698 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
699 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
700 op.init = 1;
701 op.u.aes.type = aes->type;
702
703 /* Copy the key to the LSB */
704 ret = ccp_init_dm_workarea(&key, cmd_q,
705 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
706 DMA_TO_DEVICE);
707 if (ret)
708 return ret;
709
710 dm_offset = CCP_SB_BYTES - aes->key_len;
711 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
712 if (ret)
713 goto e_key;
714 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
715 CCP_PASSTHRU_BYTESWAP_256BIT);
716 if (ret) {
717 cmd->engine_error = cmd_q->cmd_error;
718 goto e_key;
719 }
720
721 /* Copy the context (IV) to the LSB.
722 * There is an assumption here that the IV is 96 bits in length, plus
723 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
724 */
725 ret = ccp_init_dm_workarea(&ctx, cmd_q,
726 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
727 DMA_BIDIRECTIONAL);
728 if (ret)
729 goto e_key;
730
731 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
732 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
733 if (ret)
734 goto e_ctx;
735
736 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
737 CCP_PASSTHRU_BYTESWAP_256BIT);
738 if (ret) {
739 cmd->engine_error = cmd_q->cmd_error;
740 goto e_ctx;
741 }
742
743 op.init = 1;
744 if (aes->aad_len > 0) {
745 /* Step 1: Run a GHASH over the Additional Authenticated Data */
746 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
747 AES_BLOCK_SIZE,
748 DMA_TO_DEVICE);
749 if (ret)
750 goto e_ctx;
751
752 op.u.aes.mode = CCP_AES_MODE_GHASH;
753 op.u.aes.action = CCP_AES_GHASHAAD;
754
755 while (aad.sg_wa.bytes_left) {
756 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
757
758 ret = cmd_q->ccp->vdata->perform->aes(&op);
759 if (ret) {
760 cmd->engine_error = cmd_q->cmd_error;
761 goto e_aad;
762 }
763
764 ccp_process_data(&aad, NULL, &op);
765 op.init = 0;
766 }
767 }
768
769 op.u.aes.mode = CCP_AES_MODE_GCTR;
770 op.u.aes.action = aes->action;
771
772 if (ilen > 0) {
773 /* Step 2: Run a GCTR over the plaintext */
774 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
775
776 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
777 AES_BLOCK_SIZE,
778 in_place ? DMA_BIDIRECTIONAL
779 : DMA_TO_DEVICE);
780 if (ret)
781 goto e_ctx;
782
783 if (in_place) {
784 dst = src;
785 } else {
786 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
787 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
788 if (ret)
789 goto e_src;
790 }
791
792 op.soc = 0;
793 op.eom = 0;
794 op.init = 1;
795 while (src.sg_wa.bytes_left) {
796 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
797 if (!src.sg_wa.bytes_left) {
David Brazdil0f672f62019-12-10 10:32:29 +0000798 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799
800 if (nbytes) {
801 op.eom = 1;
802 op.u.aes.size = (nbytes * 8) - 1;
803 }
804 }
805
806 ret = cmd_q->ccp->vdata->perform->aes(&op);
807 if (ret) {
808 cmd->engine_error = cmd_q->cmd_error;
809 goto e_dst;
810 }
811
812 ccp_process_data(&src, &dst, &op);
813 op.init = 0;
814 }
815 }
816
817 /* Step 3: Update the IV portion of the context with the original IV */
818 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
819 CCP_PASSTHRU_BYTESWAP_256BIT);
820 if (ret) {
821 cmd->engine_error = cmd_q->cmd_error;
822 goto e_dst;
823 }
824
825 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
826 if (ret)
827 goto e_dst;
828
829 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
830 CCP_PASSTHRU_BYTESWAP_256BIT);
831 if (ret) {
832 cmd->engine_error = cmd_q->cmd_error;
833 goto e_dst;
834 }
835
836 /* Step 4: Concatenate the lengths of the AAD and source, and
837 * hash that 16 byte buffer.
838 */
839 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
840 DMA_BIDIRECTIONAL);
841 if (ret)
842 goto e_dst;
843 final = (unsigned long long *) final_wa.address;
844 final[0] = cpu_to_be64(aes->aad_len * 8);
845 final[1] = cpu_to_be64(ilen * 8);
846
David Brazdil0f672f62019-12-10 10:32:29 +0000847 memset(&op, 0, sizeof(op));
848 op.cmd_q = cmd_q;
849 op.jobid = jobid;
850 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
851 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
852 op.init = 1;
853 op.u.aes.type = aes->type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854 op.u.aes.mode = CCP_AES_MODE_GHASH;
855 op.u.aes.action = CCP_AES_GHASHFINAL;
856 op.src.type = CCP_MEMTYPE_SYSTEM;
857 op.src.u.dma.address = final_wa.dma.address;
858 op.src.u.dma.length = AES_BLOCK_SIZE;
859 op.dst.type = CCP_MEMTYPE_SYSTEM;
860 op.dst.u.dma.address = final_wa.dma.address;
861 op.dst.u.dma.length = AES_BLOCK_SIZE;
862 op.eom = 1;
863 op.u.aes.size = 0;
864 ret = cmd_q->ccp->vdata->perform->aes(&op);
865 if (ret)
866 goto e_dst;
867
868 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
869 /* Put the ciphered tag after the ciphertext. */
David Brazdil0f672f62019-12-10 10:32:29 +0000870 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000871 } else {
872 /* Does this ciphered tag match the input? */
David Brazdil0f672f62019-12-10 10:32:29 +0000873 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000874 DMA_BIDIRECTIONAL);
875 if (ret)
876 goto e_tag;
David Brazdil0f672f62019-12-10 10:32:29 +0000877 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878 if (ret)
879 goto e_tag;
880
David Brazdil0f672f62019-12-10 10:32:29 +0000881 ret = crypto_memneq(tag.address, final_wa.address,
882 authsize) ? -EBADMSG : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000883 ccp_dm_free(&tag);
884 }
885
886e_tag:
887 ccp_dm_free(&final_wa);
888
889e_dst:
David Brazdil0f672f62019-12-10 10:32:29 +0000890 if (ilen > 0 && !in_place)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891 ccp_free_data(&dst, cmd_q);
892
893e_src:
David Brazdil0f672f62019-12-10 10:32:29 +0000894 if (ilen > 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 ccp_free_data(&src, cmd_q);
896
897e_aad:
898 if (aes->aad_len)
899 ccp_free_data(&aad, cmd_q);
900
901e_ctx:
902 ccp_dm_free(&ctx);
903
904e_key:
905 ccp_dm_free(&key);
906
907 return ret;
908}
909
David Brazdil0f672f62019-12-10 10:32:29 +0000910static noinline_for_stack int
911ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912{
913 struct ccp_aes_engine *aes = &cmd->u.aes;
914 struct ccp_dm_workarea key, ctx;
915 struct ccp_data src, dst;
916 struct ccp_op op;
917 unsigned int dm_offset;
918 bool in_place = false;
919 int ret;
920
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000921 if (!((aes->key_len == AES_KEYSIZE_128) ||
922 (aes->key_len == AES_KEYSIZE_192) ||
923 (aes->key_len == AES_KEYSIZE_256)))
924 return -EINVAL;
925
926 if (((aes->mode == CCP_AES_MODE_ECB) ||
David Brazdil0f672f62019-12-10 10:32:29 +0000927 (aes->mode == CCP_AES_MODE_CBC)) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000928 (aes->src_len & (AES_BLOCK_SIZE - 1)))
929 return -EINVAL;
930
931 if (!aes->key || !aes->src || !aes->dst)
932 return -EINVAL;
933
934 if (aes->mode != CCP_AES_MODE_ECB) {
935 if (aes->iv_len != AES_BLOCK_SIZE)
936 return -EINVAL;
937
938 if (!aes->iv)
939 return -EINVAL;
940 }
941
942 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
943 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
944
945 ret = -EIO;
946 memset(&op, 0, sizeof(op));
947 op.cmd_q = cmd_q;
948 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
949 op.sb_key = cmd_q->sb_key;
950 op.sb_ctx = cmd_q->sb_ctx;
951 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
952 op.u.aes.type = aes->type;
953 op.u.aes.mode = aes->mode;
954 op.u.aes.action = aes->action;
955
956 /* All supported key sizes fit in a single (32-byte) SB entry
957 * and must be in little endian format. Use the 256-bit byte
958 * swap passthru option to convert from big endian to little
959 * endian.
960 */
961 ret = ccp_init_dm_workarea(&key, cmd_q,
962 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
963 DMA_TO_DEVICE);
964 if (ret)
965 return ret;
966
967 dm_offset = CCP_SB_BYTES - aes->key_len;
968 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
969 if (ret)
970 goto e_key;
971 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
972 CCP_PASSTHRU_BYTESWAP_256BIT);
973 if (ret) {
974 cmd->engine_error = cmd_q->cmd_error;
975 goto e_key;
976 }
977
978 /* The AES context fits in a single (32-byte) SB entry and
979 * must be in little endian format. Use the 256-bit byte swap
980 * passthru option to convert from big endian to little endian.
981 */
982 ret = ccp_init_dm_workarea(&ctx, cmd_q,
983 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
984 DMA_BIDIRECTIONAL);
985 if (ret)
986 goto e_key;
987
988 if (aes->mode != CCP_AES_MODE_ECB) {
989 /* Load the AES context - convert to LE */
990 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
991 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
992 if (ret)
993 goto e_ctx;
994 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
995 CCP_PASSTHRU_BYTESWAP_256BIT);
996 if (ret) {
997 cmd->engine_error = cmd_q->cmd_error;
998 goto e_ctx;
999 }
1000 }
1001 switch (aes->mode) {
1002 case CCP_AES_MODE_CFB: /* CFB128 only */
1003 case CCP_AES_MODE_CTR:
1004 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1005 break;
1006 default:
1007 op.u.aes.size = 0;
1008 }
1009
1010 /* Prepare the input and output data workareas. For in-place
1011 * operations we need to set the dma direction to BIDIRECTIONAL
1012 * and copy the src workarea to the dst workarea.
1013 */
1014 if (sg_virt(aes->src) == sg_virt(aes->dst))
1015 in_place = true;
1016
1017 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1018 AES_BLOCK_SIZE,
1019 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1020 if (ret)
1021 goto e_ctx;
1022
1023 if (in_place) {
1024 dst = src;
1025 } else {
1026 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1027 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1028 if (ret)
1029 goto e_src;
1030 }
1031
1032 /* Send data to the CCP AES engine */
1033 while (src.sg_wa.bytes_left) {
1034 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1035 if (!src.sg_wa.bytes_left) {
1036 op.eom = 1;
1037
1038 /* Since we don't retrieve the AES context in ECB
1039 * mode we have to wait for the operation to complete
1040 * on the last piece of data
1041 */
1042 if (aes->mode == CCP_AES_MODE_ECB)
1043 op.soc = 1;
1044 }
1045
1046 ret = cmd_q->ccp->vdata->perform->aes(&op);
1047 if (ret) {
1048 cmd->engine_error = cmd_q->cmd_error;
1049 goto e_dst;
1050 }
1051
1052 ccp_process_data(&src, &dst, &op);
1053 }
1054
1055 if (aes->mode != CCP_AES_MODE_ECB) {
1056 /* Retrieve the AES context - convert from LE to BE using
1057 * 32-byte (256-bit) byteswapping
1058 */
1059 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1060 CCP_PASSTHRU_BYTESWAP_256BIT);
1061 if (ret) {
1062 cmd->engine_error = cmd_q->cmd_error;
1063 goto e_dst;
1064 }
1065
1066 /* ...but we only need AES_BLOCK_SIZE bytes */
1067 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1068 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1069 }
1070
1071e_dst:
1072 if (!in_place)
1073 ccp_free_data(&dst, cmd_q);
1074
1075e_src:
1076 ccp_free_data(&src, cmd_q);
1077
1078e_ctx:
1079 ccp_dm_free(&ctx);
1080
1081e_key:
1082 ccp_dm_free(&key);
1083
1084 return ret;
1085}
1086
David Brazdil0f672f62019-12-10 10:32:29 +00001087static noinline_for_stack int
1088ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001089{
1090 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1091 struct ccp_dm_workarea key, ctx;
1092 struct ccp_data src, dst;
1093 struct ccp_op op;
1094 unsigned int unit_size, dm_offset;
1095 bool in_place = false;
1096 unsigned int sb_count;
1097 enum ccp_aes_type aestype;
1098 int ret;
1099
1100 switch (xts->unit_size) {
1101 case CCP_XTS_AES_UNIT_SIZE_16:
1102 unit_size = 16;
1103 break;
1104 case CCP_XTS_AES_UNIT_SIZE_512:
1105 unit_size = 512;
1106 break;
1107 case CCP_XTS_AES_UNIT_SIZE_1024:
1108 unit_size = 1024;
1109 break;
1110 case CCP_XTS_AES_UNIT_SIZE_2048:
1111 unit_size = 2048;
1112 break;
1113 case CCP_XTS_AES_UNIT_SIZE_4096:
1114 unit_size = 4096;
1115 break;
1116
1117 default:
1118 return -EINVAL;
1119 }
1120
1121 if (xts->key_len == AES_KEYSIZE_128)
1122 aestype = CCP_AES_TYPE_128;
1123 else if (xts->key_len == AES_KEYSIZE_256)
1124 aestype = CCP_AES_TYPE_256;
1125 else
1126 return -EINVAL;
1127
1128 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1129 return -EINVAL;
1130
1131 if (xts->iv_len != AES_BLOCK_SIZE)
1132 return -EINVAL;
1133
1134 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1135 return -EINVAL;
1136
1137 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1138 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1139
1140 ret = -EIO;
1141 memset(&op, 0, sizeof(op));
1142 op.cmd_q = cmd_q;
1143 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1144 op.sb_key = cmd_q->sb_key;
1145 op.sb_ctx = cmd_q->sb_ctx;
1146 op.init = 1;
1147 op.u.xts.type = aestype;
1148 op.u.xts.action = xts->action;
1149 op.u.xts.unit_size = xts->unit_size;
1150
1151 /* A version 3 device only supports 128-bit keys, which fits into a
1152 * single SB entry. A version 5 device uses a 512-bit vector, so two
1153 * SB entries.
1154 */
1155 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1156 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1157 else
1158 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1159 ret = ccp_init_dm_workarea(&key, cmd_q,
1160 sb_count * CCP_SB_BYTES,
1161 DMA_TO_DEVICE);
1162 if (ret)
1163 return ret;
1164
1165 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1166 /* All supported key sizes must be in little endian format.
1167 * Use the 256-bit byte swap passthru option to convert from
1168 * big endian to little endian.
1169 */
1170 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1171 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1172 if (ret)
1173 goto e_key;
1174 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1175 if (ret)
1176 goto e_key;
1177 } else {
1178 /* Version 5 CCPs use a 512-bit space for the key: each portion
1179 * occupies 256 bits, or one entire slot, and is zero-padded.
1180 */
1181 unsigned int pad;
1182
1183 dm_offset = CCP_SB_BYTES;
1184 pad = dm_offset - xts->key_len;
1185 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1186 if (ret)
1187 goto e_key;
1188 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1189 xts->key_len, xts->key_len);
1190 if (ret)
1191 goto e_key;
1192 }
1193 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1194 CCP_PASSTHRU_BYTESWAP_256BIT);
1195 if (ret) {
1196 cmd->engine_error = cmd_q->cmd_error;
1197 goto e_key;
1198 }
1199
1200 /* The AES context fits in a single (32-byte) SB entry and
1201 * for XTS is already in little endian format so no byte swapping
1202 * is needed.
1203 */
1204 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1205 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1206 DMA_BIDIRECTIONAL);
1207 if (ret)
1208 goto e_key;
1209
1210 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1211 if (ret)
1212 goto e_ctx;
1213 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1214 CCP_PASSTHRU_BYTESWAP_NOOP);
1215 if (ret) {
1216 cmd->engine_error = cmd_q->cmd_error;
1217 goto e_ctx;
1218 }
1219
1220 /* Prepare the input and output data workareas. For in-place
1221 * operations we need to set the dma direction to BIDIRECTIONAL
1222 * and copy the src workarea to the dst workarea.
1223 */
1224 if (sg_virt(xts->src) == sg_virt(xts->dst))
1225 in_place = true;
1226
1227 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1228 unit_size,
1229 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1230 if (ret)
1231 goto e_ctx;
1232
1233 if (in_place) {
1234 dst = src;
1235 } else {
1236 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1237 unit_size, DMA_FROM_DEVICE);
1238 if (ret)
1239 goto e_src;
1240 }
1241
1242 /* Send data to the CCP AES engine */
1243 while (src.sg_wa.bytes_left) {
1244 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1245 if (!src.sg_wa.bytes_left)
1246 op.eom = 1;
1247
1248 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1249 if (ret) {
1250 cmd->engine_error = cmd_q->cmd_error;
1251 goto e_dst;
1252 }
1253
1254 ccp_process_data(&src, &dst, &op);
1255 }
1256
1257 /* Retrieve the AES context - convert from LE to BE using
1258 * 32-byte (256-bit) byteswapping
1259 */
1260 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1261 CCP_PASSTHRU_BYTESWAP_256BIT);
1262 if (ret) {
1263 cmd->engine_error = cmd_q->cmd_error;
1264 goto e_dst;
1265 }
1266
1267 /* ...but we only need AES_BLOCK_SIZE bytes */
1268 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1269 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1270
1271e_dst:
1272 if (!in_place)
1273 ccp_free_data(&dst, cmd_q);
1274
1275e_src:
1276 ccp_free_data(&src, cmd_q);
1277
1278e_ctx:
1279 ccp_dm_free(&ctx);
1280
1281e_key:
1282 ccp_dm_free(&key);
1283
1284 return ret;
1285}
1286
David Brazdil0f672f62019-12-10 10:32:29 +00001287static noinline_for_stack int
1288ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289{
1290 struct ccp_des3_engine *des3 = &cmd->u.des3;
1291
1292 struct ccp_dm_workarea key, ctx;
1293 struct ccp_data src, dst;
1294 struct ccp_op op;
1295 unsigned int dm_offset;
1296 unsigned int len_singlekey;
1297 bool in_place = false;
1298 int ret;
1299
1300 /* Error checks */
David Brazdil0f672f62019-12-10 10:32:29 +00001301 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1302 return -EINVAL;
1303
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001304 if (!cmd_q->ccp->vdata->perform->des3)
1305 return -EINVAL;
1306
1307 if (des3->key_len != DES3_EDE_KEY_SIZE)
1308 return -EINVAL;
1309
1310 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1311 (des3->mode == CCP_DES3_MODE_CBC)) &&
1312 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1313 return -EINVAL;
1314
1315 if (!des3->key || !des3->src || !des3->dst)
1316 return -EINVAL;
1317
1318 if (des3->mode != CCP_DES3_MODE_ECB) {
1319 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1320 return -EINVAL;
1321
1322 if (!des3->iv)
1323 return -EINVAL;
1324 }
1325
1326 ret = -EIO;
1327 /* Zero out all the fields of the command desc */
1328 memset(&op, 0, sizeof(op));
1329
1330 /* Set up the Function field */
1331 op.cmd_q = cmd_q;
1332 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1333 op.sb_key = cmd_q->sb_key;
1334
1335 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1336 op.u.des3.type = des3->type;
1337 op.u.des3.mode = des3->mode;
1338 op.u.des3.action = des3->action;
1339
1340 /*
1341 * All supported key sizes fit in a single (32-byte) KSB entry and
1342 * (like AES) must be in little endian format. Use the 256-bit byte
1343 * swap passthru option to convert from big endian to little endian.
1344 */
1345 ret = ccp_init_dm_workarea(&key, cmd_q,
1346 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1347 DMA_TO_DEVICE);
1348 if (ret)
1349 return ret;
1350
1351 /*
1352 * The contents of the key triplet are in the reverse order of what
1353 * is required by the engine. Copy the 3 pieces individually to put
1354 * them where they belong.
1355 */
1356 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1357
1358 len_singlekey = des3->key_len / 3;
1359 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1360 des3->key, 0, len_singlekey);
1361 if (ret)
1362 goto e_key;
1363 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1364 des3->key, len_singlekey, len_singlekey);
1365 if (ret)
1366 goto e_key;
1367 ret = ccp_set_dm_area(&key, dm_offset,
1368 des3->key, 2 * len_singlekey, len_singlekey);
1369 if (ret)
1370 goto e_key;
1371
1372 /* Copy the key to the SB */
1373 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1374 CCP_PASSTHRU_BYTESWAP_256BIT);
1375 if (ret) {
1376 cmd->engine_error = cmd_q->cmd_error;
1377 goto e_key;
1378 }
1379
1380 /*
1381 * The DES3 context fits in a single (32-byte) KSB entry and
1382 * must be in little endian format. Use the 256-bit byte swap
1383 * passthru option to convert from big endian to little endian.
1384 */
1385 if (des3->mode != CCP_DES3_MODE_ECB) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001386 op.sb_ctx = cmd_q->sb_ctx;
1387
1388 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1389 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1390 DMA_BIDIRECTIONAL);
1391 if (ret)
1392 goto e_key;
1393
1394 /* Load the context into the LSB */
1395 dm_offset = CCP_SB_BYTES - des3->iv_len;
1396 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1397 des3->iv_len);
1398 if (ret)
1399 goto e_ctx;
1400
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001401 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
David Brazdil0f672f62019-12-10 10:32:29 +00001402 CCP_PASSTHRU_BYTESWAP_256BIT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001403 if (ret) {
1404 cmd->engine_error = cmd_q->cmd_error;
1405 goto e_ctx;
1406 }
1407 }
1408
1409 /*
1410 * Prepare the input and output data workareas. For in-place
1411 * operations we need to set the dma direction to BIDIRECTIONAL
1412 * and copy the src workarea to the dst workarea.
1413 */
1414 if (sg_virt(des3->src) == sg_virt(des3->dst))
1415 in_place = true;
1416
1417 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1418 DES3_EDE_BLOCK_SIZE,
1419 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1420 if (ret)
1421 goto e_ctx;
1422
1423 if (in_place)
1424 dst = src;
1425 else {
1426 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1427 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1428 if (ret)
1429 goto e_src;
1430 }
1431
1432 /* Send data to the CCP DES3 engine */
1433 while (src.sg_wa.bytes_left) {
1434 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1435 if (!src.sg_wa.bytes_left) {
1436 op.eom = 1;
1437
1438 /* Since we don't retrieve the context in ECB mode
1439 * we have to wait for the operation to complete
1440 * on the last piece of data
1441 */
1442 op.soc = 0;
1443 }
1444
1445 ret = cmd_q->ccp->vdata->perform->des3(&op);
1446 if (ret) {
1447 cmd->engine_error = cmd_q->cmd_error;
1448 goto e_dst;
1449 }
1450
1451 ccp_process_data(&src, &dst, &op);
1452 }
1453
1454 if (des3->mode != CCP_DES3_MODE_ECB) {
1455 /* Retrieve the context and make BE */
1456 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1457 CCP_PASSTHRU_BYTESWAP_256BIT);
1458 if (ret) {
1459 cmd->engine_error = cmd_q->cmd_error;
1460 goto e_dst;
1461 }
1462
1463 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001464 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1465 DES3_EDE_BLOCK_SIZE);
1466 }
1467e_dst:
1468 if (!in_place)
1469 ccp_free_data(&dst, cmd_q);
1470
1471e_src:
1472 ccp_free_data(&src, cmd_q);
1473
1474e_ctx:
1475 if (des3->mode != CCP_DES3_MODE_ECB)
1476 ccp_dm_free(&ctx);
1477
1478e_key:
1479 ccp_dm_free(&key);
1480
1481 return ret;
1482}
1483
David Brazdil0f672f62019-12-10 10:32:29 +00001484static noinline_for_stack int
1485ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486{
1487 struct ccp_sha_engine *sha = &cmd->u.sha;
1488 struct ccp_dm_workarea ctx;
1489 struct ccp_data src;
1490 struct ccp_op op;
1491 unsigned int ioffset, ooffset;
1492 unsigned int digest_size;
1493 int sb_count;
1494 const void *init;
1495 u64 block_size;
1496 int ctx_size;
1497 int ret;
1498
1499 switch (sha->type) {
1500 case CCP_SHA_TYPE_1:
1501 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1502 return -EINVAL;
1503 block_size = SHA1_BLOCK_SIZE;
1504 break;
1505 case CCP_SHA_TYPE_224:
1506 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1507 return -EINVAL;
1508 block_size = SHA224_BLOCK_SIZE;
1509 break;
1510 case CCP_SHA_TYPE_256:
1511 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1512 return -EINVAL;
1513 block_size = SHA256_BLOCK_SIZE;
1514 break;
1515 case CCP_SHA_TYPE_384:
1516 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1517 || sha->ctx_len < SHA384_DIGEST_SIZE)
1518 return -EINVAL;
1519 block_size = SHA384_BLOCK_SIZE;
1520 break;
1521 case CCP_SHA_TYPE_512:
1522 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1523 || sha->ctx_len < SHA512_DIGEST_SIZE)
1524 return -EINVAL;
1525 block_size = SHA512_BLOCK_SIZE;
1526 break;
1527 default:
1528 return -EINVAL;
1529 }
1530
1531 if (!sha->ctx)
1532 return -EINVAL;
1533
1534 if (!sha->final && (sha->src_len & (block_size - 1)))
1535 return -EINVAL;
1536
1537 /* The version 3 device can't handle zero-length input */
1538 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1539
1540 if (!sha->src_len) {
1541 unsigned int digest_len;
1542 const u8 *sha_zero;
1543
1544 /* Not final, just return */
1545 if (!sha->final)
1546 return 0;
1547
1548 /* CCP can't do a zero length sha operation so the
1549 * caller must buffer the data.
1550 */
1551 if (sha->msg_bits)
1552 return -EINVAL;
1553
1554 /* The CCP cannot perform zero-length sha operations
1555 * so the caller is required to buffer data for the
1556 * final operation. However, a sha operation for a
1557 * message with a total length of zero is valid so
1558 * known values are required to supply the result.
1559 */
1560 switch (sha->type) {
1561 case CCP_SHA_TYPE_1:
1562 sha_zero = sha1_zero_message_hash;
1563 digest_len = SHA1_DIGEST_SIZE;
1564 break;
1565 case CCP_SHA_TYPE_224:
1566 sha_zero = sha224_zero_message_hash;
1567 digest_len = SHA224_DIGEST_SIZE;
1568 break;
1569 case CCP_SHA_TYPE_256:
1570 sha_zero = sha256_zero_message_hash;
1571 digest_len = SHA256_DIGEST_SIZE;
1572 break;
1573 default:
1574 return -EINVAL;
1575 }
1576
1577 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1578 digest_len, 1);
1579
1580 return 0;
1581 }
1582 }
1583
1584 /* Set variables used throughout */
1585 switch (sha->type) {
1586 case CCP_SHA_TYPE_1:
1587 digest_size = SHA1_DIGEST_SIZE;
1588 init = (void *) ccp_sha1_init;
1589 ctx_size = SHA1_DIGEST_SIZE;
1590 sb_count = 1;
1591 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1592 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1593 else
1594 ooffset = ioffset = 0;
1595 break;
1596 case CCP_SHA_TYPE_224:
1597 digest_size = SHA224_DIGEST_SIZE;
1598 init = (void *) ccp_sha224_init;
1599 ctx_size = SHA256_DIGEST_SIZE;
1600 sb_count = 1;
1601 ioffset = 0;
1602 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1603 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1604 else
1605 ooffset = 0;
1606 break;
1607 case CCP_SHA_TYPE_256:
1608 digest_size = SHA256_DIGEST_SIZE;
1609 init = (void *) ccp_sha256_init;
1610 ctx_size = SHA256_DIGEST_SIZE;
1611 sb_count = 1;
1612 ooffset = ioffset = 0;
1613 break;
1614 case CCP_SHA_TYPE_384:
1615 digest_size = SHA384_DIGEST_SIZE;
1616 init = (void *) ccp_sha384_init;
1617 ctx_size = SHA512_DIGEST_SIZE;
1618 sb_count = 2;
1619 ioffset = 0;
1620 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1621 break;
1622 case CCP_SHA_TYPE_512:
1623 digest_size = SHA512_DIGEST_SIZE;
1624 init = (void *) ccp_sha512_init;
1625 ctx_size = SHA512_DIGEST_SIZE;
1626 sb_count = 2;
1627 ooffset = ioffset = 0;
1628 break;
1629 default:
1630 ret = -EINVAL;
1631 goto e_data;
1632 }
1633
1634 /* For zero-length plaintext the src pointer is ignored;
1635 * otherwise both parts must be valid
1636 */
1637 if (sha->src_len && !sha->src)
1638 return -EINVAL;
1639
1640 memset(&op, 0, sizeof(op));
1641 op.cmd_q = cmd_q;
1642 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1643 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1644 op.u.sha.type = sha->type;
1645 op.u.sha.msg_bits = sha->msg_bits;
1646
1647 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1648 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1649 * first slot, and the left half in the second. Each portion must then
1650 * be in little endian format: use the 256-bit byte swap option.
1651 */
1652 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1653 DMA_BIDIRECTIONAL);
1654 if (ret)
1655 return ret;
1656 if (sha->first) {
1657 switch (sha->type) {
1658 case CCP_SHA_TYPE_1:
1659 case CCP_SHA_TYPE_224:
1660 case CCP_SHA_TYPE_256:
1661 memcpy(ctx.address + ioffset, init, ctx_size);
1662 break;
1663 case CCP_SHA_TYPE_384:
1664 case CCP_SHA_TYPE_512:
1665 memcpy(ctx.address + ctx_size / 2, init,
1666 ctx_size / 2);
1667 memcpy(ctx.address, init + ctx_size / 2,
1668 ctx_size / 2);
1669 break;
1670 default:
1671 ret = -EINVAL;
1672 goto e_ctx;
1673 }
1674 } else {
1675 /* Restore the context */
1676 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1677 sb_count * CCP_SB_BYTES);
1678 if (ret)
1679 goto e_ctx;
1680 }
1681
1682 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1683 CCP_PASSTHRU_BYTESWAP_256BIT);
1684 if (ret) {
1685 cmd->engine_error = cmd_q->cmd_error;
1686 goto e_ctx;
1687 }
1688
1689 if (sha->src) {
1690 /* Send data to the CCP SHA engine; block_size is set above */
1691 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1692 block_size, DMA_TO_DEVICE);
1693 if (ret)
1694 goto e_ctx;
1695
1696 while (src.sg_wa.bytes_left) {
1697 ccp_prepare_data(&src, NULL, &op, block_size, false);
1698 if (sha->final && !src.sg_wa.bytes_left)
1699 op.eom = 1;
1700
1701 ret = cmd_q->ccp->vdata->perform->sha(&op);
1702 if (ret) {
1703 cmd->engine_error = cmd_q->cmd_error;
1704 goto e_data;
1705 }
1706
1707 ccp_process_data(&src, NULL, &op);
1708 }
1709 } else {
1710 op.eom = 1;
1711 ret = cmd_q->ccp->vdata->perform->sha(&op);
1712 if (ret) {
1713 cmd->engine_error = cmd_q->cmd_error;
1714 goto e_data;
1715 }
1716 }
1717
1718 /* Retrieve the SHA context - convert from LE to BE using
1719 * 32-byte (256-bit) byteswapping to BE
1720 */
1721 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1722 CCP_PASSTHRU_BYTESWAP_256BIT);
1723 if (ret) {
1724 cmd->engine_error = cmd_q->cmd_error;
1725 goto e_data;
1726 }
1727
1728 if (sha->final) {
1729 /* Finishing up, so get the digest */
1730 switch (sha->type) {
1731 case CCP_SHA_TYPE_1:
1732 case CCP_SHA_TYPE_224:
1733 case CCP_SHA_TYPE_256:
1734 ccp_get_dm_area(&ctx, ooffset,
1735 sha->ctx, 0,
1736 digest_size);
1737 break;
1738 case CCP_SHA_TYPE_384:
1739 case CCP_SHA_TYPE_512:
1740 ccp_get_dm_area(&ctx, 0,
1741 sha->ctx, LSB_ITEM_SIZE - ooffset,
1742 LSB_ITEM_SIZE);
1743 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1744 sha->ctx, 0,
1745 LSB_ITEM_SIZE - ooffset);
1746 break;
1747 default:
1748 ret = -EINVAL;
Olivier Deprez0e641232021-09-23 10:07:05 +02001749 goto e_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001750 }
1751 } else {
1752 /* Stash the context */
1753 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1754 sb_count * CCP_SB_BYTES);
1755 }
1756
1757 if (sha->final && sha->opad) {
1758 /* HMAC operation, recursively perform final SHA */
1759 struct ccp_cmd hmac_cmd;
1760 struct scatterlist sg;
1761 u8 *hmac_buf;
1762
1763 if (sha->opad_len != block_size) {
1764 ret = -EINVAL;
1765 goto e_data;
1766 }
1767
1768 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1769 if (!hmac_buf) {
1770 ret = -ENOMEM;
1771 goto e_data;
1772 }
1773 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1774
1775 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1776 switch (sha->type) {
1777 case CCP_SHA_TYPE_1:
1778 case CCP_SHA_TYPE_224:
1779 case CCP_SHA_TYPE_256:
1780 memcpy(hmac_buf + block_size,
1781 ctx.address + ooffset,
1782 digest_size);
1783 break;
1784 case CCP_SHA_TYPE_384:
1785 case CCP_SHA_TYPE_512:
1786 memcpy(hmac_buf + block_size,
1787 ctx.address + LSB_ITEM_SIZE + ooffset,
1788 LSB_ITEM_SIZE);
1789 memcpy(hmac_buf + block_size +
1790 (LSB_ITEM_SIZE - ooffset),
1791 ctx.address,
1792 LSB_ITEM_SIZE);
1793 break;
1794 default:
Olivier Deprez0e641232021-09-23 10:07:05 +02001795 kfree(hmac_buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001796 ret = -EINVAL;
Olivier Deprez0e641232021-09-23 10:07:05 +02001797 goto e_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001798 }
1799
1800 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1801 hmac_cmd.engine = CCP_ENGINE_SHA;
1802 hmac_cmd.u.sha.type = sha->type;
1803 hmac_cmd.u.sha.ctx = sha->ctx;
1804 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1805 hmac_cmd.u.sha.src = &sg;
1806 hmac_cmd.u.sha.src_len = block_size + digest_size;
1807 hmac_cmd.u.sha.opad = NULL;
1808 hmac_cmd.u.sha.opad_len = 0;
1809 hmac_cmd.u.sha.first = 1;
1810 hmac_cmd.u.sha.final = 1;
1811 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1812
1813 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1814 if (ret)
1815 cmd->engine_error = hmac_cmd.engine_error;
1816
1817 kfree(hmac_buf);
1818 }
1819
1820e_data:
1821 if (sha->src)
1822 ccp_free_data(&src, cmd_q);
1823
1824e_ctx:
1825 ccp_dm_free(&ctx);
1826
1827 return ret;
1828}
1829
David Brazdil0f672f62019-12-10 10:32:29 +00001830static noinline_for_stack int
1831ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001832{
1833 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1834 struct ccp_dm_workarea exp, src, dst;
1835 struct ccp_op op;
1836 unsigned int sb_count, i_len, o_len;
1837 int ret;
1838
1839 /* Check against the maximum allowable size, in bits */
1840 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1841 return -EINVAL;
1842
1843 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1844 return -EINVAL;
1845
1846 memset(&op, 0, sizeof(op));
1847 op.cmd_q = cmd_q;
1848 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1849
1850 /* The RSA modulus must precede the message being acted upon, so
1851 * it must be copied to a DMA area where the message and the
1852 * modulus can be concatenated. Therefore the input buffer
1853 * length required is twice the output buffer length (which
1854 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1855 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1856 * required.
1857 */
1858 o_len = 32 * ((rsa->key_size + 255) / 256);
1859 i_len = o_len * 2;
1860
1861 sb_count = 0;
1862 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1863 /* sb_count is the number of storage block slots required
1864 * for the modulus.
1865 */
1866 sb_count = o_len / CCP_SB_BYTES;
1867 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1868 sb_count);
1869 if (!op.sb_key)
1870 return -EIO;
1871 } else {
1872 /* A version 5 device allows a modulus size that will not fit
1873 * in the LSB, so the command will transfer it from memory.
1874 * Set the sb key to the default, even though it's not used.
1875 */
1876 op.sb_key = cmd_q->sb_key;
1877 }
1878
1879 /* The RSA exponent must be in little endian format. Reverse its
1880 * byte order.
1881 */
1882 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1883 if (ret)
1884 goto e_sb;
1885
1886 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1887 if (ret)
1888 goto e_exp;
1889
1890 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1891 /* Copy the exponent to the local storage block, using
1892 * as many 32-byte blocks as were allocated above. It's
1893 * already little endian, so no further change is required.
1894 */
1895 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1896 CCP_PASSTHRU_BYTESWAP_NOOP);
1897 if (ret) {
1898 cmd->engine_error = cmd_q->cmd_error;
1899 goto e_exp;
1900 }
1901 } else {
1902 /* The exponent can be retrieved from memory via DMA. */
1903 op.exp.u.dma.address = exp.dma.address;
1904 op.exp.u.dma.offset = 0;
1905 }
1906
1907 /* Concatenate the modulus and the message. Both the modulus and
1908 * the operands must be in little endian format. Since the input
1909 * is in big endian format it must be converted.
1910 */
1911 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1912 if (ret)
1913 goto e_exp;
1914
1915 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1916 if (ret)
1917 goto e_src;
1918 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1919 if (ret)
1920 goto e_src;
1921
1922 /* Prepare the output area for the operation */
1923 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1924 if (ret)
1925 goto e_src;
1926
1927 op.soc = 1;
1928 op.src.u.dma.address = src.dma.address;
1929 op.src.u.dma.offset = 0;
1930 op.src.u.dma.length = i_len;
1931 op.dst.u.dma.address = dst.dma.address;
1932 op.dst.u.dma.offset = 0;
1933 op.dst.u.dma.length = o_len;
1934
1935 op.u.rsa.mod_size = rsa->key_size;
1936 op.u.rsa.input_len = i_len;
1937
1938 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1939 if (ret) {
1940 cmd->engine_error = cmd_q->cmd_error;
1941 goto e_dst;
1942 }
1943
1944 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1945
1946e_dst:
1947 ccp_dm_free(&dst);
1948
1949e_src:
1950 ccp_dm_free(&src);
1951
1952e_exp:
1953 ccp_dm_free(&exp);
1954
1955e_sb:
1956 if (sb_count)
1957 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1958
1959 return ret;
1960}
1961
David Brazdil0f672f62019-12-10 10:32:29 +00001962static noinline_for_stack int
1963ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964{
1965 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1966 struct ccp_dm_workarea mask;
1967 struct ccp_data src, dst;
1968 struct ccp_op op;
1969 bool in_place = false;
1970 unsigned int i;
1971 int ret = 0;
1972
1973 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1974 return -EINVAL;
1975
1976 if (!pt->src || !pt->dst)
1977 return -EINVAL;
1978
1979 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1980 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1981 return -EINVAL;
1982 if (!pt->mask)
1983 return -EINVAL;
1984 }
1985
1986 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1987
1988 memset(&op, 0, sizeof(op));
1989 op.cmd_q = cmd_q;
1990 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1991
1992 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1993 /* Load the mask */
1994 op.sb_key = cmd_q->sb_key;
1995
1996 ret = ccp_init_dm_workarea(&mask, cmd_q,
1997 CCP_PASSTHRU_SB_COUNT *
1998 CCP_SB_BYTES,
1999 DMA_TO_DEVICE);
2000 if (ret)
2001 return ret;
2002
2003 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2004 if (ret)
2005 goto e_mask;
2006 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2007 CCP_PASSTHRU_BYTESWAP_NOOP);
2008 if (ret) {
2009 cmd->engine_error = cmd_q->cmd_error;
2010 goto e_mask;
2011 }
2012 }
2013
2014 /* Prepare the input and output data workareas. For in-place
2015 * operations we need to set the dma direction to BIDIRECTIONAL
2016 * and copy the src workarea to the dst workarea.
2017 */
2018 if (sg_virt(pt->src) == sg_virt(pt->dst))
2019 in_place = true;
2020
2021 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2022 CCP_PASSTHRU_MASKSIZE,
2023 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2024 if (ret)
2025 goto e_mask;
2026
2027 if (in_place) {
2028 dst = src;
2029 } else {
2030 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2031 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2032 if (ret)
2033 goto e_src;
2034 }
2035
2036 /* Send data to the CCP Passthru engine
2037 * Because the CCP engine works on a single source and destination
2038 * dma address at a time, each entry in the source scatterlist
2039 * (after the dma_map_sg call) must be less than or equal to the
2040 * (remaining) length in the destination scatterlist entry and the
2041 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2042 */
2043 dst.sg_wa.sg_used = 0;
2044 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2045 if (!dst.sg_wa.sg ||
Olivier Deprez0e641232021-09-23 10:07:05 +02002046 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002047 ret = -EINVAL;
2048 goto e_dst;
2049 }
2050
2051 if (i == src.sg_wa.dma_count) {
2052 op.eom = 1;
2053 op.soc = 1;
2054 }
2055
2056 op.src.type = CCP_MEMTYPE_SYSTEM;
2057 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2058 op.src.u.dma.offset = 0;
2059 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2060
2061 op.dst.type = CCP_MEMTYPE_SYSTEM;
2062 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2063 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2064 op.dst.u.dma.length = op.src.u.dma.length;
2065
2066 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2067 if (ret) {
2068 cmd->engine_error = cmd_q->cmd_error;
2069 goto e_dst;
2070 }
2071
Olivier Deprez0e641232021-09-23 10:07:05 +02002072 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2073 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002074 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2075 dst.sg_wa.sg_used = 0;
2076 }
2077 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2078 }
2079
2080e_dst:
2081 if (!in_place)
2082 ccp_free_data(&dst, cmd_q);
2083
2084e_src:
2085 ccp_free_data(&src, cmd_q);
2086
2087e_mask:
2088 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2089 ccp_dm_free(&mask);
2090
2091 return ret;
2092}
2093
David Brazdil0f672f62019-12-10 10:32:29 +00002094static noinline_for_stack int
2095ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002096 struct ccp_cmd *cmd)
2097{
2098 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2099 struct ccp_dm_workarea mask;
2100 struct ccp_op op;
2101 int ret;
2102
2103 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2104 return -EINVAL;
2105
2106 if (!pt->src_dma || !pt->dst_dma)
2107 return -EINVAL;
2108
2109 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2110 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2111 return -EINVAL;
2112 if (!pt->mask)
2113 return -EINVAL;
2114 }
2115
2116 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2117
2118 memset(&op, 0, sizeof(op));
2119 op.cmd_q = cmd_q;
2120 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2121
2122 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2123 /* Load the mask */
2124 op.sb_key = cmd_q->sb_key;
2125
2126 mask.length = pt->mask_len;
2127 mask.dma.address = pt->mask;
2128 mask.dma.length = pt->mask_len;
2129
2130 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2131 CCP_PASSTHRU_BYTESWAP_NOOP);
2132 if (ret) {
2133 cmd->engine_error = cmd_q->cmd_error;
2134 return ret;
2135 }
2136 }
2137
2138 /* Send data to the CCP Passthru engine */
2139 op.eom = 1;
2140 op.soc = 1;
2141
2142 op.src.type = CCP_MEMTYPE_SYSTEM;
2143 op.src.u.dma.address = pt->src_dma;
2144 op.src.u.dma.offset = 0;
2145 op.src.u.dma.length = pt->src_len;
2146
2147 op.dst.type = CCP_MEMTYPE_SYSTEM;
2148 op.dst.u.dma.address = pt->dst_dma;
2149 op.dst.u.dma.offset = 0;
2150 op.dst.u.dma.length = pt->src_len;
2151
2152 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2153 if (ret)
2154 cmd->engine_error = cmd_q->cmd_error;
2155
2156 return ret;
2157}
2158
2159static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2160{
2161 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2162 struct ccp_dm_workarea src, dst;
2163 struct ccp_op op;
2164 int ret;
2165 u8 *save;
2166
2167 if (!ecc->u.mm.operand_1 ||
2168 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2169 return -EINVAL;
2170
2171 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2172 if (!ecc->u.mm.operand_2 ||
2173 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2174 return -EINVAL;
2175
2176 if (!ecc->u.mm.result ||
2177 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2178 return -EINVAL;
2179
2180 memset(&op, 0, sizeof(op));
2181 op.cmd_q = cmd_q;
2182 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2183
2184 /* Concatenate the modulus and the operands. Both the modulus and
2185 * the operands must be in little endian format. Since the input
2186 * is in big endian format it must be converted and placed in a
2187 * fixed length buffer.
2188 */
2189 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2190 DMA_TO_DEVICE);
2191 if (ret)
2192 return ret;
2193
2194 /* Save the workarea address since it is updated in order to perform
2195 * the concatenation
2196 */
2197 save = src.address;
2198
2199 /* Copy the ECC modulus */
2200 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2201 if (ret)
2202 goto e_src;
2203 src.address += CCP_ECC_OPERAND_SIZE;
2204
2205 /* Copy the first operand */
2206 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2207 ecc->u.mm.operand_1_len);
2208 if (ret)
2209 goto e_src;
2210 src.address += CCP_ECC_OPERAND_SIZE;
2211
2212 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2213 /* Copy the second operand */
2214 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2215 ecc->u.mm.operand_2_len);
2216 if (ret)
2217 goto e_src;
2218 src.address += CCP_ECC_OPERAND_SIZE;
2219 }
2220
2221 /* Restore the workarea address */
2222 src.address = save;
2223
2224 /* Prepare the output area for the operation */
2225 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2226 DMA_FROM_DEVICE);
2227 if (ret)
2228 goto e_src;
2229
2230 op.soc = 1;
2231 op.src.u.dma.address = src.dma.address;
2232 op.src.u.dma.offset = 0;
2233 op.src.u.dma.length = src.length;
2234 op.dst.u.dma.address = dst.dma.address;
2235 op.dst.u.dma.offset = 0;
2236 op.dst.u.dma.length = dst.length;
2237
2238 op.u.ecc.function = cmd->u.ecc.function;
2239
2240 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2241 if (ret) {
2242 cmd->engine_error = cmd_q->cmd_error;
2243 goto e_dst;
2244 }
2245
2246 ecc->ecc_result = le16_to_cpup(
2247 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2248 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2249 ret = -EIO;
2250 goto e_dst;
2251 }
2252
2253 /* Save the ECC result */
2254 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2255 CCP_ECC_MODULUS_BYTES);
2256
2257e_dst:
2258 ccp_dm_free(&dst);
2259
2260e_src:
2261 ccp_dm_free(&src);
2262
2263 return ret;
2264}
2265
2266static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2267{
2268 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2269 struct ccp_dm_workarea src, dst;
2270 struct ccp_op op;
2271 int ret;
2272 u8 *save;
2273
2274 if (!ecc->u.pm.point_1.x ||
2275 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2276 !ecc->u.pm.point_1.y ||
2277 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2278 return -EINVAL;
2279
2280 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2281 if (!ecc->u.pm.point_2.x ||
2282 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2283 !ecc->u.pm.point_2.y ||
2284 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2285 return -EINVAL;
2286 } else {
2287 if (!ecc->u.pm.domain_a ||
2288 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2289 return -EINVAL;
2290
2291 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2292 if (!ecc->u.pm.scalar ||
2293 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2294 return -EINVAL;
2295 }
2296
2297 if (!ecc->u.pm.result.x ||
2298 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2299 !ecc->u.pm.result.y ||
2300 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2301 return -EINVAL;
2302
2303 memset(&op, 0, sizeof(op));
2304 op.cmd_q = cmd_q;
2305 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2306
2307 /* Concatenate the modulus and the operands. Both the modulus and
2308 * the operands must be in little endian format. Since the input
2309 * is in big endian format it must be converted and placed in a
2310 * fixed length buffer.
2311 */
2312 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2313 DMA_TO_DEVICE);
2314 if (ret)
2315 return ret;
2316
2317 /* Save the workarea address since it is updated in order to perform
2318 * the concatenation
2319 */
2320 save = src.address;
2321
2322 /* Copy the ECC modulus */
2323 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2324 if (ret)
2325 goto e_src;
2326 src.address += CCP_ECC_OPERAND_SIZE;
2327
2328 /* Copy the first point X and Y coordinate */
2329 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2330 ecc->u.pm.point_1.x_len);
2331 if (ret)
2332 goto e_src;
2333 src.address += CCP_ECC_OPERAND_SIZE;
2334 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2335 ecc->u.pm.point_1.y_len);
2336 if (ret)
2337 goto e_src;
2338 src.address += CCP_ECC_OPERAND_SIZE;
2339
2340 /* Set the first point Z coordinate to 1 */
2341 *src.address = 0x01;
2342 src.address += CCP_ECC_OPERAND_SIZE;
2343
2344 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2345 /* Copy the second point X and Y coordinate */
2346 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2347 ecc->u.pm.point_2.x_len);
2348 if (ret)
2349 goto e_src;
2350 src.address += CCP_ECC_OPERAND_SIZE;
2351 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2352 ecc->u.pm.point_2.y_len);
2353 if (ret)
2354 goto e_src;
2355 src.address += CCP_ECC_OPERAND_SIZE;
2356
2357 /* Set the second point Z coordinate to 1 */
2358 *src.address = 0x01;
2359 src.address += CCP_ECC_OPERAND_SIZE;
2360 } else {
2361 /* Copy the Domain "a" parameter */
2362 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2363 ecc->u.pm.domain_a_len);
2364 if (ret)
2365 goto e_src;
2366 src.address += CCP_ECC_OPERAND_SIZE;
2367
2368 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2369 /* Copy the scalar value */
2370 ret = ccp_reverse_set_dm_area(&src, 0,
2371 ecc->u.pm.scalar, 0,
2372 ecc->u.pm.scalar_len);
2373 if (ret)
2374 goto e_src;
2375 src.address += CCP_ECC_OPERAND_SIZE;
2376 }
2377 }
2378
2379 /* Restore the workarea address */
2380 src.address = save;
2381
2382 /* Prepare the output area for the operation */
2383 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2384 DMA_FROM_DEVICE);
2385 if (ret)
2386 goto e_src;
2387
2388 op.soc = 1;
2389 op.src.u.dma.address = src.dma.address;
2390 op.src.u.dma.offset = 0;
2391 op.src.u.dma.length = src.length;
2392 op.dst.u.dma.address = dst.dma.address;
2393 op.dst.u.dma.offset = 0;
2394 op.dst.u.dma.length = dst.length;
2395
2396 op.u.ecc.function = cmd->u.ecc.function;
2397
2398 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2399 if (ret) {
2400 cmd->engine_error = cmd_q->cmd_error;
2401 goto e_dst;
2402 }
2403
2404 ecc->ecc_result = le16_to_cpup(
2405 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2406 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2407 ret = -EIO;
2408 goto e_dst;
2409 }
2410
2411 /* Save the workarea address since it is updated as we walk through
2412 * to copy the point math result
2413 */
2414 save = dst.address;
2415
2416 /* Save the ECC result X and Y coordinates */
2417 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2418 CCP_ECC_MODULUS_BYTES);
2419 dst.address += CCP_ECC_OUTPUT_SIZE;
2420 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2421 CCP_ECC_MODULUS_BYTES);
2422 dst.address += CCP_ECC_OUTPUT_SIZE;
2423
2424 /* Restore the workarea address */
2425 dst.address = save;
2426
2427e_dst:
2428 ccp_dm_free(&dst);
2429
2430e_src:
2431 ccp_dm_free(&src);
2432
2433 return ret;
2434}
2435
David Brazdil0f672f62019-12-10 10:32:29 +00002436static noinline_for_stack int
2437ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002438{
2439 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2440
2441 ecc->ecc_result = 0;
2442
2443 if (!ecc->mod ||
2444 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2445 return -EINVAL;
2446
2447 switch (ecc->function) {
2448 case CCP_ECC_FUNCTION_MMUL_384BIT:
2449 case CCP_ECC_FUNCTION_MADD_384BIT:
2450 case CCP_ECC_FUNCTION_MINV_384BIT:
2451 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2452
2453 case CCP_ECC_FUNCTION_PADD_384BIT:
2454 case CCP_ECC_FUNCTION_PMUL_384BIT:
2455 case CCP_ECC_FUNCTION_PDBL_384BIT:
2456 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2457
2458 default:
2459 return -EINVAL;
2460 }
2461}
2462
2463int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2464{
2465 int ret;
2466
2467 cmd->engine_error = 0;
2468 cmd_q->cmd_error = 0;
2469 cmd_q->int_rcvd = 0;
2470 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2471
2472 switch (cmd->engine) {
2473 case CCP_ENGINE_AES:
David Brazdil0f672f62019-12-10 10:32:29 +00002474 switch (cmd->u.aes.mode) {
2475 case CCP_AES_MODE_CMAC:
2476 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2477 break;
2478 case CCP_AES_MODE_GCM:
2479 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2480 break;
2481 default:
2482 ret = ccp_run_aes_cmd(cmd_q, cmd);
2483 break;
2484 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002485 break;
2486 case CCP_ENGINE_XTS_AES_128:
2487 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2488 break;
2489 case CCP_ENGINE_DES3:
2490 ret = ccp_run_des3_cmd(cmd_q, cmd);
2491 break;
2492 case CCP_ENGINE_SHA:
2493 ret = ccp_run_sha_cmd(cmd_q, cmd);
2494 break;
2495 case CCP_ENGINE_RSA:
2496 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2497 break;
2498 case CCP_ENGINE_PASSTHRU:
2499 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2500 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2501 else
2502 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2503 break;
2504 case CCP_ENGINE_ECC:
2505 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2506 break;
2507 default:
2508 ret = -EINVAL;
2509 }
2510
2511 return ret;
2512}