blob: dce3a6f96c97e74d8e8813b87830ee2888e6e028 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 *
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 *
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
10 *
David Brazdil0f672f62019-12-10 10:32:29 +000011 * You could find the datasheet in Documentation/arm/sunxi.rst
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012 */
13#include "sun4i-ss.h"
14
David Brazdil0f672f62019-12-10 10:32:29 +000015static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016{
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 u32 mode = ctx->mode;
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt = SS_RX_DEFAULT;
25 u32 tx_cnt = 0;
26 u32 spaces;
27 u32 v;
28 int err = 0;
29 unsigned int i;
30 unsigned int ileft = areq->cryptlen;
31 unsigned int oleft = areq->cryptlen;
32 unsigned int todo;
Olivier Deprez0e641232021-09-23 10:07:05 +020033 unsigned long pi = 0, po = 0; /* progress for in and out */
34 bool miter_err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 struct sg_mapping_iter mi, mo;
36 unsigned int oi, oo; /* offset for in and out */
37 unsigned long flags;
38
39 if (!areq->cryptlen)
40 return 0;
41
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042 if (!areq->src || !areq->dst) {
43 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
44 return -EINVAL;
45 }
46
47 spin_lock_irqsave(&ss->slock, flags);
48
Olivier Deprez0e641232021-09-23 10:07:05 +020049 for (i = 0; i < op->keylen / 4; i++)
50 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52 if (areq->iv) {
53 for (i = 0; i < 4 && i < ivsize / 4; i++) {
54 v = *(u32 *)(areq->iv + i * 4);
Olivier Deprez0e641232021-09-23 10:07:05 +020055 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056 }
57 }
58 writel(mode, ss->base + SS_CTL);
59
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060
61 ileft = areq->cryptlen / 4;
62 oleft = areq->cryptlen / 4;
63 oi = 0;
64 oo = 0;
65 do {
Olivier Deprez0e641232021-09-23 10:07:05 +020066 if (ileft) {
67 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
68 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
69 if (pi)
70 sg_miter_skip(&mi, pi);
71 miter_err = sg_miter_next(&mi);
72 if (!miter_err || !mi.addr) {
73 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
74 err = -EINVAL;
75 goto release_ss;
76 }
77 todo = min(rx_cnt, ileft);
78 todo = min_t(size_t, todo, (mi.length - oi) / 4);
79 if (todo) {
80 ileft -= todo;
81 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
82 oi += todo * 4;
83 }
84 if (oi == mi.length) {
85 pi += mi.length;
86 oi = 0;
87 }
88 sg_miter_stop(&mi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089 }
90
91 spaces = readl(ss->base + SS_FCSR);
92 rx_cnt = SS_RXFIFO_SPACES(spaces);
93 tx_cnt = SS_TXFIFO_SPACES(spaces);
94
Olivier Deprez0e641232021-09-23 10:07:05 +020095 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
96 SG_MITER_TO_SG | SG_MITER_ATOMIC);
97 if (po)
98 sg_miter_skip(&mo, po);
99 miter_err = sg_miter_next(&mo);
100 if (!miter_err || !mo.addr) {
101 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
102 err = -EINVAL;
103 goto release_ss;
104 }
105 todo = min(tx_cnt, oleft);
106 todo = min_t(size_t, todo, (mo.length - oo) / 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 if (todo) {
108 oleft -= todo;
109 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
110 oo += todo * 4;
111 }
112 if (oo == mo.length) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 oo = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200114 po += mo.length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200116 sg_miter_stop(&mo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117 } while (oleft);
118
119 if (areq->iv) {
120 for (i = 0; i < 4 && i < ivsize / 4; i++) {
121 v = readl(ss->base + SS_IV0 + i * 4);
122 *(u32 *)(areq->iv + i * 4) = v;
123 }
124 }
125
126release_ss:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 writel(0, ss->base + SS_CTL);
128 spin_unlock_irqrestore(&ss->slock, flags);
129 return err;
130}
131
David Brazdil0f672f62019-12-10 10:32:29 +0000132
133static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
134{
135 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
136 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
137 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
138 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
139 int err;
140
141 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
142 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
143 NULL);
144 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
145 areq->cryptlen, areq->iv);
146 if (ctx->mode & SS_DECRYPTION)
147 err = crypto_skcipher_decrypt(subreq);
148 else
149 err = crypto_skcipher_encrypt(subreq);
150 skcipher_request_zero(subreq);
151
152 return err;
153}
154
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155/* Generic function that support SG with size not multiple of 4 */
156static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
157{
158 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
159 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
160 struct sun4i_ss_ctx *ss = op->ss;
161 int no_chunk = 1;
162 struct scatterlist *in_sg = areq->src;
163 struct scatterlist *out_sg = areq->dst;
164 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
165 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
David Brazdil0f672f62019-12-10 10:32:29 +0000166 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
167 struct sun4i_ss_alg_template *algt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168 u32 mode = ctx->mode;
169 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
170 u32 rx_cnt = SS_RX_DEFAULT;
171 u32 tx_cnt = 0;
172 u32 v;
173 u32 spaces;
174 int err = 0;
175 unsigned int i;
176 unsigned int ileft = areq->cryptlen;
177 unsigned int oleft = areq->cryptlen;
178 unsigned int todo;
179 struct sg_mapping_iter mi, mo;
Olivier Deprez0e641232021-09-23 10:07:05 +0200180 unsigned long pi = 0, po = 0; /* progress for in and out */
181 bool miter_err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 unsigned int oi, oo; /* offset for in and out */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 unsigned int ob = 0; /* offset in buf */
184 unsigned int obo = 0; /* offset in bufo*/
185 unsigned int obl = 0; /* length of data in bufo */
186 unsigned long flags;
Olivier Deprez0e641232021-09-23 10:07:05 +0200187 bool need_fallback = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
189 if (!areq->cryptlen)
190 return 0;
191
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 if (!areq->src || !areq->dst) {
193 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
194 return -EINVAL;
195 }
196
David Brazdil0f672f62019-12-10 10:32:29 +0000197 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
198 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
199 need_fallback = true;
200
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 /*
202 * if we have only SGs with size multiple of 4,
203 * we can use the SS optimized function
204 */
205 while (in_sg && no_chunk == 1) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200206 if ((in_sg->length | in_sg->offset) & 3u)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 no_chunk = 0;
208 in_sg = sg_next(in_sg);
209 }
210 while (out_sg && no_chunk == 1) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200211 if ((out_sg->length | out_sg->offset) & 3u)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212 no_chunk = 0;
213 out_sg = sg_next(out_sg);
214 }
215
David Brazdil0f672f62019-12-10 10:32:29 +0000216 if (no_chunk == 1 && !need_fallback)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217 return sun4i_ss_opti_poll(areq);
218
David Brazdil0f672f62019-12-10 10:32:29 +0000219 if (need_fallback)
220 return sun4i_ss_cipher_poll_fallback(areq);
221
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 spin_lock_irqsave(&ss->slock, flags);
223
Olivier Deprez0e641232021-09-23 10:07:05 +0200224 for (i = 0; i < op->keylen / 4; i++)
225 writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226
227 if (areq->iv) {
228 for (i = 0; i < 4 && i < ivsize / 4; i++) {
229 v = *(u32 *)(areq->iv + i * 4);
Olivier Deprez0e641232021-09-23 10:07:05 +0200230 writesl(ss->base + SS_IV0 + i * 4, &v, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 }
232 }
233 writel(mode, ss->base + SS_CTL);
234
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000235 ileft = areq->cryptlen;
236 oleft = areq->cryptlen;
237 oi = 0;
238 oo = 0;
239
240 while (oleft) {
241 if (ileft) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200242 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
243 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
244 if (pi)
245 sg_miter_skip(&mi, pi);
246 miter_err = sg_miter_next(&mi);
247 if (!miter_err || !mi.addr) {
248 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
249 err = -EINVAL;
250 goto release_ss;
251 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252 /*
253 * todo is the number of consecutive 4byte word that we
254 * can read from current SG
255 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200256 todo = min(rx_cnt, ileft / 4);
257 todo = min_t(size_t, todo, (mi.length - oi) / 4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 if (todo && !ob) {
259 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
260 todo);
261 ileft -= todo * 4;
262 oi += todo * 4;
263 } else {
264 /*
265 * not enough consecutive bytes, so we need to
266 * linearize in buf. todo is in bytes
267 * After that copy, if we have a multiple of 4
268 * we need to be able to write all buf in one
269 * pass, so it is why we min() with rx_cnt
270 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200271 todo = min(rx_cnt * 4 - ob, ileft);
272 todo = min_t(size_t, todo, mi.length - oi);
273 memcpy(ss->buf + ob, mi.addr + oi, todo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 ileft -= todo;
275 oi += todo;
276 ob += todo;
277 if (!(ob % 4)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200278 writesl(ss->base + SS_RXFIFO, ss->buf,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 ob / 4);
280 ob = 0;
281 }
282 }
283 if (oi == mi.length) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200284 pi += mi.length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 oi = 0;
286 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200287 sg_miter_stop(&mi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288 }
289
290 spaces = readl(ss->base + SS_FCSR);
291 rx_cnt = SS_RXFIFO_SPACES(spaces);
292 tx_cnt = SS_TXFIFO_SPACES(spaces);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293
294 if (!tx_cnt)
295 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +0200296 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
297 SG_MITER_TO_SG | SG_MITER_ATOMIC);
298 if (po)
299 sg_miter_skip(&mo, po);
300 miter_err = sg_miter_next(&mo);
301 if (!miter_err || !mo.addr) {
302 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
303 err = -EINVAL;
304 goto release_ss;
305 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000306 /* todo in 4bytes word */
Olivier Deprez0e641232021-09-23 10:07:05 +0200307 todo = min(tx_cnt, oleft / 4);
308 todo = min_t(size_t, todo, (mo.length - oo) / 4);
309
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 if (todo) {
311 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
312 oleft -= todo * 4;
313 oo += todo * 4;
314 if (oo == mo.length) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200315 po += mo.length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000316 oo = 0;
317 }
318 } else {
319 /*
320 * read obl bytes in bufo, we read at maximum for
321 * emptying the device
322 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200323 readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324 obl = tx_cnt * 4;
325 obo = 0;
326 do {
327 /*
328 * how many bytes we can copy ?
329 * no more than remaining SG size
330 * no more than remaining buffer
331 * no need to test against oleft
332 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200333 todo = min_t(size_t,
334 mo.length - oo, obl - obo);
335 memcpy(mo.addr + oo, ss->bufo + obo, todo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 oleft -= todo;
337 obo += todo;
338 oo += todo;
339 if (oo == mo.length) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200340 po += mo.length;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000341 sg_miter_next(&mo);
342 oo = 0;
343 }
344 } while (obo < obl);
345 /* bufo must be fully used here */
346 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200347 sg_miter_stop(&mo);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348 }
349 if (areq->iv) {
350 for (i = 0; i < 4 && i < ivsize / 4; i++) {
351 v = readl(ss->base + SS_IV0 + i * 4);
352 *(u32 *)(areq->iv + i * 4) = v;
353 }
354 }
355
356release_ss:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357 writel(0, ss->base + SS_CTL);
358 spin_unlock_irqrestore(&ss->slock, flags);
359
360 return err;
361}
362
363/* CBC AES */
364int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
365{
366 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
367 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
368 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
369
370 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
371 op->keymode;
372 return sun4i_ss_cipher_poll(areq);
373}
374
375int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
376{
377 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
378 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
379 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
380
381 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
382 op->keymode;
383 return sun4i_ss_cipher_poll(areq);
384}
385
386/* ECB AES */
387int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
388{
389 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
390 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
391 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
392
393 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
394 op->keymode;
395 return sun4i_ss_cipher_poll(areq);
396}
397
398int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
399{
400 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
401 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
402 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
403
404 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
405 op->keymode;
406 return sun4i_ss_cipher_poll(areq);
407}
408
409/* CBC DES */
410int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
411{
412 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
413 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
414 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
415
416 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
417 op->keymode;
418 return sun4i_ss_cipher_poll(areq);
419}
420
421int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
422{
423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
424 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
425 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
426
427 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
428 op->keymode;
429 return sun4i_ss_cipher_poll(areq);
430}
431
432/* ECB DES */
433int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
434{
435 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
436 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
437 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
438
439 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
440 op->keymode;
441 return sun4i_ss_cipher_poll(areq);
442}
443
444int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
445{
446 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
447 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
448 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
449
450 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
451 op->keymode;
452 return sun4i_ss_cipher_poll(areq);
453}
454
455/* CBC 3DES */
456int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
457{
458 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
459 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
460 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
461
462 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
463 op->keymode;
464 return sun4i_ss_cipher_poll(areq);
465}
466
467int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
468{
469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
470 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
471 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
472
473 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
474 op->keymode;
475 return sun4i_ss_cipher_poll(areq);
476}
477
478/* ECB 3DES */
479int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
480{
481 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
482 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
483 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
484
485 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
486 op->keymode;
487 return sun4i_ss_cipher_poll(areq);
488}
489
490int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
491{
492 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
493 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
494 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
495
496 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
497 op->keymode;
498 return sun4i_ss_cipher_poll(areq);
499}
500
501int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
502{
503 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
504 struct sun4i_ss_alg_template *algt;
David Brazdil0f672f62019-12-10 10:32:29 +0000505 const char *name = crypto_tfm_alg_name(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506
507 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
508
509 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
510 alg.crypto.base);
511 op->ss = algt->ss;
512
513 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
514 sizeof(struct sun4i_cipher_req_ctx));
515
David Brazdil0f672f62019-12-10 10:32:29 +0000516 op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
517 if (IS_ERR(op->fallback_tfm)) {
518 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
519 name, PTR_ERR(op->fallback_tfm));
520 return PTR_ERR(op->fallback_tfm);
521 }
522
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000523 return 0;
524}
525
David Brazdil0f672f62019-12-10 10:32:29 +0000526void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
527{
528 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
529 crypto_free_sync_skcipher(op->fallback_tfm);
530}
531
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532/* check and set the AES key, prepare the mode to be used */
533int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
534 unsigned int keylen)
535{
536 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
537 struct sun4i_ss_ctx *ss = op->ss;
538
539 switch (keylen) {
540 case 128 / 8:
541 op->keymode = SS_AES_128BITS;
542 break;
543 case 192 / 8:
544 op->keymode = SS_AES_192BITS;
545 break;
546 case 256 / 8:
547 op->keymode = SS_AES_256BITS;
548 break;
549 default:
550 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
551 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
552 return -EINVAL;
553 }
554 op->keylen = keylen;
555 memcpy(op->key, key, keylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000556
557 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
558 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
559
560 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561}
562
563/* check and set the DES key, prepare the mode to be used */
564int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
565 unsigned int keylen)
566{
567 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
David Brazdil0f672f62019-12-10 10:32:29 +0000568 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569
David Brazdil0f672f62019-12-10 10:32:29 +0000570 err = verify_skcipher_des_key(tfm, key);
571 if (err)
572 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573
574 op->keylen = keylen;
575 memcpy(op->key, key, keylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000576
577 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
578 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
579
580 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581}
582
583/* check and set the 3DES key, prepare the mode to be used */
584int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
585 unsigned int keylen)
586{
587 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
David Brazdil0f672f62019-12-10 10:32:29 +0000588 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589
David Brazdil0f672f62019-12-10 10:32:29 +0000590 err = verify_skcipher_des3_key(tfm, key);
591 if (err)
592 return err;
593
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594 op->keylen = keylen;
595 memcpy(op->key, key, keylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000596
597 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
598 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
599
600 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
601
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602}