blob: 2effb6d21e8bb200b22cf0de08414df9710e8469 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Marvell
4 *
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 */
7
8#include <crypto/hmac.h>
9#include <crypto/md5.h>
10#include <crypto/sha.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmapool.h>
14
15#include "safexcel.h"
16
17struct safexcel_ahash_ctx {
18 struct safexcel_context base;
19 struct safexcel_crypto_priv *priv;
20
21 u32 alg;
22
23 u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
24 u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
25};
26
27struct safexcel_ahash_req {
28 bool last_req;
29 bool finish;
30 bool hmac;
31 bool needs_inv;
David Brazdil0f672f62019-12-10 10:32:29 +000032 bool hmac_zlen;
33 bool len_is_le;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034
35 int nents;
36 dma_addr_t result_dma;
37
38 u32 digest;
39
David Brazdil0f672f62019-12-10 10:32:29 +000040 u8 state_sz; /* expected state size, only set once */
41 u8 block_sz; /* block size, only set once */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
43
David Brazdil0f672f62019-12-10 10:32:29 +000044 u64 len;
45 u64 processed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046
David Brazdil0f672f62019-12-10 10:32:29 +000047 u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048 dma_addr_t cache_dma;
49 unsigned int cache_sz;
50
David Brazdil0f672f62019-12-10 10:32:29 +000051 u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052};
53
54static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
55{
David Brazdil0f672f62019-12-10 10:32:29 +000056 return req->len - req->processed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057}
58
59static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
60 u32 input_length, u32 result_length)
61{
62 struct safexcel_token *token =
63 (struct safexcel_token *)cdesc->control_data.token;
64
65 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
66 token[0].packet_length = input_length;
67 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
68 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
69
70 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
71 token[1].packet_length = result_length;
72 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
73 EIP197_TOKEN_STAT_LAST_PACKET;
74 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
75 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
76}
77
78static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
79 struct safexcel_ahash_req *req,
David Brazdil0f672f62019-12-10 10:32:29 +000080 struct safexcel_command_desc *cdesc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081{
82 struct safexcel_crypto_priv *priv = ctx->priv;
David Brazdil0f672f62019-12-10 10:32:29 +000083 u64 count = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 cdesc->control_data.control0 |= ctx->alg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086
David Brazdil0f672f62019-12-10 10:32:29 +000087 /*
88 * Copy the input digest if needed, and setup the context
89 * fields. Do this now as we need it to setup the first command
90 * descriptor.
91 */
92 if (!req->processed) {
93 /* First - and possibly only - block of basic hash only */
94 if (req->finish) {
95 cdesc->control_data.control0 |=
96 CONTEXT_CONTROL_TYPE_HASH_OUT |
97 CONTEXT_CONTROL_RESTART_HASH |
98 /* ensure its not 0! */
99 CONTEXT_CONTROL_SIZE(1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000101 cdesc->control_data.control0 |=
102 CONTEXT_CONTROL_TYPE_HASH_OUT |
103 CONTEXT_CONTROL_RESTART_HASH |
104 CONTEXT_CONTROL_NO_FINISH_HASH |
105 /* ensure its not 0! */
106 CONTEXT_CONTROL_SIZE(1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 }
David Brazdil0f672f62019-12-10 10:32:29 +0000108 return;
109 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110
David Brazdil0f672f62019-12-10 10:32:29 +0000111 /* Hash continuation or HMAC, setup (inner) digest from state */
112 memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
David Brazdil0f672f62019-12-10 10:32:29 +0000114 if (req->finish) {
115 /* Compute digest count for hash/HMAC finish operations */
116 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
117 req->hmac_zlen || (req->processed != req->block_sz)) {
118 count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119
David Brazdil0f672f62019-12-10 10:32:29 +0000120 /* This is a hardware limitation, as the
121 * counter must fit into an u32. This represents
122 * a fairly big amount of input data, so we
123 * shouldn't see this.
124 */
125 if (unlikely(count & 0xffffffff00000000ULL)) {
126 dev_warn(priv->dev,
127 "Input data is too big\n");
128 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129 }
130 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131
David Brazdil0f672f62019-12-10 10:32:29 +0000132 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
133 /* Special case: zero length HMAC */
134 req->hmac_zlen ||
135 /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
136 (req->processed != req->block_sz)) {
137 /* Basic hash continue operation, need digest + cnt */
138 cdesc->control_data.control0 |=
139 CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
140 CONTEXT_CONTROL_TYPE_HASH_OUT |
141 CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
142 /* For zero-len HMAC, don't finalize, already padded! */
143 if (req->hmac_zlen)
144 cdesc->control_data.control0 |=
145 CONTEXT_CONTROL_NO_FINISH_HASH;
146 cdesc->control_data.control1 |=
147 CONTEXT_CONTROL_DIGEST_CNT;
148 ctx->base.ctxr->data[req->state_sz >> 2] =
149 cpu_to_le32(count);
150 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
151
152 /* Clear zero-length HMAC flag for next operation! */
153 req->hmac_zlen = false;
154 } else { /* HMAC */
155 /* Need outer digest for HMAC finalization */
156 memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
157 ctx->opad, req->state_sz);
158
159 /* Single pass HMAC - no digest count */
160 cdesc->control_data.control0 |=
161 CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
162 CONTEXT_CONTROL_TYPE_HASH_OUT |
163 CONTEXT_CONTROL_DIGEST_HMAC;
164 }
165 } else { /* Hash continuation, do not finish yet */
166 cdesc->control_data.control0 |=
167 CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
168 CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
169 CONTEXT_CONTROL_TYPE_HASH_OUT |
170 CONTEXT_CONTROL_NO_FINISH_HASH;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 }
172}
173
David Brazdil0f672f62019-12-10 10:32:29 +0000174static int safexcel_ahash_enqueue(struct ahash_request *areq);
175
176static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
177 int ring,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 struct crypto_async_request *async,
179 bool *should_complete, int *ret)
180{
181 struct safexcel_result_desc *rdesc;
182 struct ahash_request *areq = ahash_request_cast(async);
183 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
184 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
David Brazdil0f672f62019-12-10 10:32:29 +0000185 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186 u64 cache_len;
187
188 *ret = 0;
189
190 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
191 if (IS_ERR(rdesc)) {
192 dev_err(priv->dev,
193 "hash: result: could not retrieve the result descriptor\n");
194 *ret = PTR_ERR(rdesc);
195 } else {
196 *ret = safexcel_rdesc_check_errors(priv, rdesc);
197 }
198
199 safexcel_complete(priv, ring);
200
201 if (sreq->nents) {
202 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
203 sreq->nents = 0;
204 }
205
206 if (sreq->result_dma) {
207 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
208 DMA_FROM_DEVICE);
209 sreq->result_dma = 0;
210 }
211
212 if (sreq->cache_dma) {
213 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
214 DMA_TO_DEVICE);
215 sreq->cache_dma = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000216 sreq->cache_sz = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217 }
218
David Brazdil0f672f62019-12-10 10:32:29 +0000219 if (sreq->finish) {
220 if (sreq->hmac &&
221 (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
222 /* Faking HMAC using hash - need to do outer hash */
223 memcpy(sreq->cache, sreq->state,
224 crypto_ahash_digestsize(ahash));
225
226 memcpy(sreq->state, ctx->opad, sreq->state_sz);
227
228 sreq->len = sreq->block_sz +
229 crypto_ahash_digestsize(ahash);
230 sreq->processed = sreq->block_sz;
231 sreq->hmac = 0;
232
233 ctx->base.needs_inv = true;
234 areq->nbytes = 0;
235 safexcel_ahash_enqueue(areq);
236
237 *should_complete = false; /* Not done yet */
238 return 1;
239 }
240
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 memcpy(areq->result, sreq->state,
242 crypto_ahash_digestsize(ahash));
David Brazdil0f672f62019-12-10 10:32:29 +0000243 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244
245 cache_len = safexcel_queued_len(sreq);
246 if (cache_len)
247 memcpy(sreq->cache, sreq->cache_next, cache_len);
248
249 *should_complete = true;
250
251 return 1;
252}
253
254static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
255 int *commands, int *results)
256{
257 struct ahash_request *areq = ahash_request_cast(async);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
259 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
260 struct safexcel_crypto_priv *priv = ctx->priv;
261 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
262 struct safexcel_result_desc *rdesc;
263 struct scatterlist *sg;
David Brazdil0f672f62019-12-10 10:32:29 +0000264 int i, extra = 0, n_cdesc = 0, ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 u64 queued, len, cache_len;
266
267 queued = len = safexcel_queued_len(req);
David Brazdil0f672f62019-12-10 10:32:29 +0000268 if (queued <= HASH_CACHE_SIZE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269 cache_len = queued;
270 else
271 cache_len = queued - areq->nbytes;
272
David Brazdil0f672f62019-12-10 10:32:29 +0000273 if (!req->finish && !req->last_req) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274 /* If this is not the last request and the queued data does not
David Brazdil0f672f62019-12-10 10:32:29 +0000275 * fit into full cache blocks, cache it for the next send call.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276 */
David Brazdil0f672f62019-12-10 10:32:29 +0000277 extra = queued & (HASH_CACHE_SIZE - 1);
278
279 /* If this is not the last request and the queued data
280 * is a multiple of a block, cache the last one for now.
281 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282 if (!extra)
David Brazdil0f672f62019-12-10 10:32:29 +0000283 extra = HASH_CACHE_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284
David Brazdil0f672f62019-12-10 10:32:29 +0000285 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
286 req->cache_next, extra,
287 areq->nbytes - extra);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288
David Brazdil0f672f62019-12-10 10:32:29 +0000289 queued -= extra;
290 len -= extra;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291
David Brazdil0f672f62019-12-10 10:32:29 +0000292 if (!queued) {
293 *commands = 0;
294 *results = 0;
295 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296 }
297 }
298
299 /* Add a command descriptor for the cached data, if any */
300 if (cache_len) {
301 req->cache_dma = dma_map_single(priv->dev, req->cache,
302 cache_len, DMA_TO_DEVICE);
303 if (dma_mapping_error(priv->dev, req->cache_dma))
304 return -EINVAL;
305
306 req->cache_sz = cache_len;
307 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
308 (cache_len == len),
309 req->cache_dma, cache_len, len,
310 ctx->base.ctxr_dma);
311 if (IS_ERR(first_cdesc)) {
312 ret = PTR_ERR(first_cdesc);
313 goto unmap_cache;
314 }
315 n_cdesc++;
316
317 queued -= cache_len;
318 if (!queued)
319 goto send_command;
320 }
321
David Brazdil0f672f62019-12-10 10:32:29 +0000322 /* Skip descriptor generation for zero-length requests */
323 if (!areq->nbytes)
324 goto send_command;
325
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 /* Now handle the current ahash request buffer(s) */
327 req->nents = dma_map_sg(priv->dev, areq->src,
David Brazdil0f672f62019-12-10 10:32:29 +0000328 sg_nents_for_len(areq->src,
329 areq->nbytes),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330 DMA_TO_DEVICE);
331 if (!req->nents) {
332 ret = -ENOMEM;
333 goto cdesc_rollback;
334 }
335
336 for_each_sg(areq->src, sg, req->nents, i) {
337 int sglen = sg_dma_len(sg);
338
339 /* Do not overflow the request */
340 if (queued < sglen)
341 sglen = queued;
342
343 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
David Brazdil0f672f62019-12-10 10:32:29 +0000344 !(queued - sglen),
345 sg_dma_address(sg),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 sglen, len, ctx->base.ctxr_dma);
347 if (IS_ERR(cdesc)) {
348 ret = PTR_ERR(cdesc);
349 goto unmap_sg;
350 }
351 n_cdesc++;
352
353 if (n_cdesc == 1)
354 first_cdesc = cdesc;
355
356 queued -= sglen;
357 if (!queued)
358 break;
359 }
360
361send_command:
362 /* Setup the context options */
David Brazdil0f672f62019-12-10 10:32:29 +0000363 safexcel_context_control(ctx, req, first_cdesc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364
365 /* Add the token */
366 safexcel_hash_token(first_cdesc, len, req->state_sz);
367
368 req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
369 DMA_FROM_DEVICE);
370 if (dma_mapping_error(priv->dev, req->result_dma)) {
371 ret = -EINVAL;
372 goto unmap_sg;
373 }
374
375 /* Add a result descriptor */
376 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
377 req->state_sz);
378 if (IS_ERR(rdesc)) {
379 ret = PTR_ERR(rdesc);
380 goto unmap_result;
381 }
382
383 safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
384
David Brazdil0f672f62019-12-10 10:32:29 +0000385 req->processed += len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386
387 *commands = n_cdesc;
388 *results = 1;
389 return 0;
390
391unmap_result:
392 dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
393 DMA_FROM_DEVICE);
394unmap_sg:
395 dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
396cdesc_rollback:
397 for (i = 0; i < n_cdesc; i++)
398 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
399unmap_cache:
400 if (req->cache_dma) {
401 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
402 DMA_TO_DEVICE);
David Brazdil0f672f62019-12-10 10:32:29 +0000403 req->cache_dma = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404 req->cache_sz = 0;
405 }
406
407 return ret;
408}
409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
411 int ring,
412 struct crypto_async_request *async,
413 bool *should_complete, int *ret)
414{
415 struct safexcel_result_desc *rdesc;
416 struct ahash_request *areq = ahash_request_cast(async);
417 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
419 int enq_ret;
420
421 *ret = 0;
422
423 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
424 if (IS_ERR(rdesc)) {
425 dev_err(priv->dev,
426 "hash: invalidate: could not retrieve the result descriptor\n");
427 *ret = PTR_ERR(rdesc);
428 } else {
429 *ret = safexcel_rdesc_check_errors(priv, rdesc);
430 }
431
432 safexcel_complete(priv, ring);
433
434 if (ctx->base.exit_inv) {
435 dma_pool_free(priv->context_pool, ctx->base.ctxr,
436 ctx->base.ctxr_dma);
437
438 *should_complete = true;
439 return 1;
440 }
441
442 ring = safexcel_select_ring(priv);
443 ctx->base.ring = ring;
444
445 spin_lock_bh(&priv->ring[ring].queue_lock);
446 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
447 spin_unlock_bh(&priv->ring[ring].queue_lock);
448
449 if (enq_ret != -EINPROGRESS)
450 *ret = enq_ret;
451
452 queue_work(priv->ring[ring].workqueue,
453 &priv->ring[ring].work_data.work);
454
455 *should_complete = false;
456
457 return 1;
458}
459
460static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
461 struct crypto_async_request *async,
462 bool *should_complete, int *ret)
463{
464 struct ahash_request *areq = ahash_request_cast(async);
465 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
466 int err;
467
468 BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
469
470 if (req->needs_inv) {
471 req->needs_inv = false;
472 err = safexcel_handle_inv_result(priv, ring, async,
473 should_complete, ret);
474 } else {
475 err = safexcel_handle_req_result(priv, ring, async,
476 should_complete, ret);
477 }
478
479 return err;
480}
481
482static int safexcel_ahash_send_inv(struct crypto_async_request *async,
483 int ring, int *commands, int *results)
484{
485 struct ahash_request *areq = ahash_request_cast(async);
486 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
487 int ret;
488
489 ret = safexcel_invalidate_cache(async, ctx->priv,
490 ctx->base.ctxr_dma, ring);
491 if (unlikely(ret))
492 return ret;
493
494 *commands = 1;
495 *results = 1;
496
497 return 0;
498}
499
500static int safexcel_ahash_send(struct crypto_async_request *async,
501 int ring, int *commands, int *results)
502{
503 struct ahash_request *areq = ahash_request_cast(async);
504 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
505 int ret;
506
507 if (req->needs_inv)
508 ret = safexcel_ahash_send_inv(async, ring, commands, results);
509 else
510 ret = safexcel_ahash_send_req(async, ring, commands, results);
511
512 return ret;
513}
514
515static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
516{
517 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
518 struct safexcel_crypto_priv *priv = ctx->priv;
519 EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
520 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
521 struct safexcel_inv_result result = {};
522 int ring = ctx->base.ring;
523
David Brazdil0f672f62019-12-10 10:32:29 +0000524 memset(req, 0, EIP197_AHASH_REQ_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000525
526 /* create invalidation request */
527 init_completion(&result.completion);
528 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
529 safexcel_inv_complete, &result);
530
531 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
532 ctx = crypto_tfm_ctx(req->base.tfm);
533 ctx->base.exit_inv = true;
534 rctx->needs_inv = true;
535
536 spin_lock_bh(&priv->ring[ring].queue_lock);
537 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
538 spin_unlock_bh(&priv->ring[ring].queue_lock);
539
540 queue_work(priv->ring[ring].workqueue,
541 &priv->ring[ring].work_data.work);
542
543 wait_for_completion(&result.completion);
544
545 if (result.error) {
546 dev_warn(priv->dev, "hash: completion error (%d)\n",
547 result.error);
548 return result.error;
549 }
550
551 return 0;
552}
553
554/* safexcel_ahash_cache: cache data until at least one request can be sent to
555 * the engine, aka. when there is at least 1 block size in the pipe.
556 */
557static int safexcel_ahash_cache(struct ahash_request *areq)
558{
559 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
David Brazdil0f672f62019-12-10 10:32:29 +0000560 u64 cache_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 /* cache_len: everything accepted by the driver but not sent yet,
563 * tot sz handled by update() - last req sz - tot sz handled by send()
564 */
David Brazdil0f672f62019-12-10 10:32:29 +0000565 cache_len = safexcel_queued_len(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566
567 /*
568 * In case there isn't enough bytes to proceed (less than a
569 * block size), cache the data until we have enough.
570 */
David Brazdil0f672f62019-12-10 10:32:29 +0000571 if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
573 req->cache + cache_len,
574 areq->nbytes, 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000575 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000576 }
577
578 /* We couldn't cache all the data */
579 return -E2BIG;
580}
581
582static int safexcel_ahash_enqueue(struct ahash_request *areq)
583{
584 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
585 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
586 struct safexcel_crypto_priv *priv = ctx->priv;
587 int ret, ring;
588
589 req->needs_inv = false;
590
591 if (ctx->base.ctxr) {
592 if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
David Brazdil0f672f62019-12-10 10:32:29 +0000593 req->processed &&
594 (/* invalidate for basic hash continuation finish */
595 (req->finish &&
596 (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
597 /* invalidate if (i)digest changed */
598 memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
599 /* invalidate for HMAC continuation finish */
600 (req->finish && (req->processed != req->block_sz)) ||
601 /* invalidate for HMAC finish with odigest changed */
602 (req->finish &&
603 memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
604 ctx->opad, req->state_sz))))
605 /*
606 * We're still setting needs_inv here, even though it is
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607 * cleared right away, because the needs_inv flag can be
608 * set in other functions and we want to keep the same
609 * logic.
610 */
David Brazdil0f672f62019-12-10 10:32:29 +0000611 ctx->base.needs_inv = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000612
613 if (ctx->base.needs_inv) {
614 ctx->base.needs_inv = false;
615 req->needs_inv = true;
616 }
617 } else {
618 ctx->base.ring = safexcel_select_ring(priv);
619 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
620 EIP197_GFP_FLAGS(areq->base),
621 &ctx->base.ctxr_dma);
622 if (!ctx->base.ctxr)
623 return -ENOMEM;
624 }
625
626 ring = ctx->base.ring;
627
628 spin_lock_bh(&priv->ring[ring].queue_lock);
629 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
630 spin_unlock_bh(&priv->ring[ring].queue_lock);
631
632 queue_work(priv->ring[ring].workqueue,
633 &priv->ring[ring].work_data.work);
634
635 return ret;
636}
637
638static int safexcel_ahash_update(struct ahash_request *areq)
639{
640 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
David Brazdil0f672f62019-12-10 10:32:29 +0000641 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642
643 /* If the request is 0 length, do nothing */
644 if (!areq->nbytes)
645 return 0;
646
David Brazdil0f672f62019-12-10 10:32:29 +0000647 /* Add request to the cache if it fits */
648 ret = safexcel_ahash_cache(areq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649
David Brazdil0f672f62019-12-10 10:32:29 +0000650 /* Update total request length */
651 req->len += areq->nbytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652
David Brazdil0f672f62019-12-10 10:32:29 +0000653 /* If not all data could fit into the cache, go process the excess.
654 * Also go process immediately for an HMAC IV precompute, which
655 * will never be finished at all, but needs to be processed anyway.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656 */
David Brazdil0f672f62019-12-10 10:32:29 +0000657 if ((ret && !req->finish) || req->last_req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658 return safexcel_ahash_enqueue(areq);
659
660 return 0;
661}
662
663static int safexcel_ahash_final(struct ahash_request *areq)
664{
665 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
666 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
667
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 req->finish = true;
669
David Brazdil0f672f62019-12-10 10:32:29 +0000670 if (unlikely(!req->len && !areq->nbytes)) {
671 /*
672 * If we have an overall 0 length *hash* request:
673 * The HW cannot do 0 length hash, so we provide the correct
674 * result directly here.
675 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
677 memcpy(areq->result, md5_zero_message_hash,
678 MD5_DIGEST_SIZE);
679 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
680 memcpy(areq->result, sha1_zero_message_hash,
681 SHA1_DIGEST_SIZE);
682 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
683 memcpy(areq->result, sha224_zero_message_hash,
684 SHA224_DIGEST_SIZE);
685 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
686 memcpy(areq->result, sha256_zero_message_hash,
687 SHA256_DIGEST_SIZE);
688 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
689 memcpy(areq->result, sha384_zero_message_hash,
690 SHA384_DIGEST_SIZE);
691 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
692 memcpy(areq->result, sha512_zero_message_hash,
693 SHA512_DIGEST_SIZE);
694
695 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000696 } else if (unlikely(req->hmac &&
697 (req->len == req->block_sz) &&
698 !areq->nbytes)) {
699 /*
700 * If we have an overall 0 length *HMAC* request:
701 * For HMAC, we need to finalize the inner digest
702 * and then perform the outer hash.
703 */
704
705 /* generate pad block in the cache */
706 /* start with a hash block of all zeroes */
707 memset(req->cache, 0, req->block_sz);
708 /* set the first byte to 0x80 to 'append a 1 bit' */
709 req->cache[0] = 0x80;
710 /* add the length in bits in the last 2 bytes */
711 if (req->len_is_le) {
712 /* Little endian length word (e.g. MD5) */
713 req->cache[req->block_sz-8] = (req->block_sz << 3) &
714 255;
715 req->cache[req->block_sz-7] = (req->block_sz >> 5);
716 } else {
717 /* Big endian length word (e.g. any SHA) */
718 req->cache[req->block_sz-2] = (req->block_sz >> 5);
719 req->cache[req->block_sz-1] = (req->block_sz << 3) &
720 255;
721 }
722
723 req->len += req->block_sz; /* plus 1 hash block */
724
725 /* Set special zero-length HMAC flag */
726 req->hmac_zlen = true;
727
728 /* Finalize HMAC */
729 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
730 } else if (req->hmac) {
731 /* Finalize HMAC */
732 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733 }
734
735 return safexcel_ahash_enqueue(areq);
736}
737
738static int safexcel_ahash_finup(struct ahash_request *areq)
739{
740 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
741
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742 req->finish = true;
743
744 safexcel_ahash_update(areq);
745 return safexcel_ahash_final(areq);
746}
747
748static int safexcel_ahash_export(struct ahash_request *areq, void *out)
749{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
751 struct safexcel_ahash_export_state *export = out;
752
David Brazdil0f672f62019-12-10 10:32:29 +0000753 export->len = req->len;
754 export->processed = req->processed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755
756 export->digest = req->digest;
757
758 memcpy(export->state, req->state, req->state_sz);
David Brazdil0f672f62019-12-10 10:32:29 +0000759 memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000760
761 return 0;
762}
763
764static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
765{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000766 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
767 const struct safexcel_ahash_export_state *export = in;
768 int ret;
769
770 ret = crypto_ahash_init(areq);
771 if (ret)
772 return ret;
773
David Brazdil0f672f62019-12-10 10:32:29 +0000774 req->len = export->len;
775 req->processed = export->processed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776
777 req->digest = export->digest;
778
David Brazdil0f672f62019-12-10 10:32:29 +0000779 memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 memcpy(req->state, export->state, req->state_sz);
781
782 return 0;
783}
784
785static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
786{
787 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
788 struct safexcel_alg_template *tmpl =
789 container_of(__crypto_ahash_alg(tfm->__crt_alg),
790 struct safexcel_alg_template, alg.ahash);
791
792 ctx->priv = tmpl->priv;
793 ctx->base.send = safexcel_ahash_send;
794 ctx->base.handle_result = safexcel_handle_result;
795
796 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
797 sizeof(struct safexcel_ahash_req));
798 return 0;
799}
800
801static int safexcel_sha1_init(struct ahash_request *areq)
802{
803 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
804 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
805
806 memset(req, 0, sizeof(*req));
807
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
809 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
810 req->state_sz = SHA1_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +0000811 req->block_sz = SHA1_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000812
813 return 0;
814}
815
816static int safexcel_sha1_digest(struct ahash_request *areq)
817{
818 int ret = safexcel_sha1_init(areq);
819
820 if (ret)
821 return ret;
822
823 return safexcel_ahash_finup(areq);
824}
825
826static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
827{
828 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
829 struct safexcel_crypto_priv *priv = ctx->priv;
830 int ret;
831
832 /* context not allocated, skip invalidation */
833 if (!ctx->base.ctxr)
834 return;
835
836 if (priv->flags & EIP197_TRC_CACHE) {
837 ret = safexcel_ahash_exit_inv(tfm);
838 if (ret)
839 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
840 } else {
841 dma_pool_free(priv->context_pool, ctx->base.ctxr,
842 ctx->base.ctxr_dma);
843 }
844}
845
846struct safexcel_alg_template safexcel_alg_sha1 = {
847 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +0000848 .algo_mask = SAFEXCEL_ALG_SHA1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000849 .alg.ahash = {
850 .init = safexcel_sha1_init,
851 .update = safexcel_ahash_update,
852 .final = safexcel_ahash_final,
853 .finup = safexcel_ahash_finup,
854 .digest = safexcel_sha1_digest,
855 .export = safexcel_ahash_export,
856 .import = safexcel_ahash_import,
857 .halg = {
858 .digestsize = SHA1_DIGEST_SIZE,
859 .statesize = sizeof(struct safexcel_ahash_export_state),
860 .base = {
861 .cra_name = "sha1",
862 .cra_driver_name = "safexcel-sha1",
David Brazdil0f672f62019-12-10 10:32:29 +0000863 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864 .cra_flags = CRYPTO_ALG_ASYNC |
865 CRYPTO_ALG_KERN_DRIVER_ONLY,
866 .cra_blocksize = SHA1_BLOCK_SIZE,
867 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
868 .cra_init = safexcel_ahash_cra_init,
869 .cra_exit = safexcel_ahash_cra_exit,
870 .cra_module = THIS_MODULE,
871 },
872 },
873 },
874};
875
876static int safexcel_hmac_sha1_init(struct ahash_request *areq)
877{
David Brazdil0f672f62019-12-10 10:32:29 +0000878 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
880
David Brazdil0f672f62019-12-10 10:32:29 +0000881 memset(req, 0, sizeof(*req));
882
883 /* Start from ipad precompute */
884 memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
885 /* Already processed the key^ipad part now! */
886 req->len = SHA1_BLOCK_SIZE;
887 req->processed = SHA1_BLOCK_SIZE;
888
889 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
890 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
891 req->state_sz = SHA1_DIGEST_SIZE;
892 req->block_sz = SHA1_BLOCK_SIZE;
893 req->hmac = true;
894
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 return 0;
896}
897
898static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
899{
900 int ret = safexcel_hmac_sha1_init(areq);
901
902 if (ret)
903 return ret;
904
905 return safexcel_ahash_finup(areq);
906}
907
908struct safexcel_ahash_result {
909 struct completion completion;
910 int error;
911};
912
913static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
914{
915 struct safexcel_ahash_result *result = req->data;
916
917 if (error == -EINPROGRESS)
918 return;
919
920 result->error = error;
921 complete(&result->completion);
922}
923
924static int safexcel_hmac_init_pad(struct ahash_request *areq,
925 unsigned int blocksize, const u8 *key,
926 unsigned int keylen, u8 *ipad, u8 *opad)
927{
928 struct safexcel_ahash_result result;
929 struct scatterlist sg;
930 int ret, i;
931 u8 *keydup;
932
933 if (keylen <= blocksize) {
934 memcpy(ipad, key, keylen);
935 } else {
936 keydup = kmemdup(key, keylen, GFP_KERNEL);
937 if (!keydup)
938 return -ENOMEM;
939
940 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
941 safexcel_ahash_complete, &result);
942 sg_init_one(&sg, keydup, keylen);
943 ahash_request_set_crypt(areq, &sg, ipad, keylen);
944 init_completion(&result.completion);
945
946 ret = crypto_ahash_digest(areq);
947 if (ret == -EINPROGRESS || ret == -EBUSY) {
948 wait_for_completion_interruptible(&result.completion);
949 ret = result.error;
950 }
951
952 /* Avoid leaking */
953 memzero_explicit(keydup, keylen);
954 kfree(keydup);
955
956 if (ret)
957 return ret;
958
959 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
960 }
961
962 memset(ipad + keylen, 0, blocksize - keylen);
963 memcpy(opad, ipad, blocksize);
964
965 for (i = 0; i < blocksize; i++) {
966 ipad[i] ^= HMAC_IPAD_VALUE;
967 opad[i] ^= HMAC_OPAD_VALUE;
968 }
969
970 return 0;
971}
972
973static int safexcel_hmac_init_iv(struct ahash_request *areq,
974 unsigned int blocksize, u8 *pad, void *state)
975{
976 struct safexcel_ahash_result result;
977 struct safexcel_ahash_req *req;
978 struct scatterlist sg;
979 int ret;
980
981 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
982 safexcel_ahash_complete, &result);
983 sg_init_one(&sg, pad, blocksize);
984 ahash_request_set_crypt(areq, &sg, pad, blocksize);
985 init_completion(&result.completion);
986
987 ret = crypto_ahash_init(areq);
988 if (ret)
989 return ret;
990
991 req = ahash_request_ctx(areq);
992 req->hmac = true;
993 req->last_req = true;
994
995 ret = crypto_ahash_update(areq);
996 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
997 return ret;
998
999 wait_for_completion_interruptible(&result.completion);
1000 if (result.error)
1001 return result.error;
1002
1003 return crypto_ahash_export(areq, state);
1004}
1005
1006int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
1007 void *istate, void *ostate)
1008{
1009 struct ahash_request *areq;
1010 struct crypto_ahash *tfm;
1011 unsigned int blocksize;
1012 u8 *ipad, *opad;
1013 int ret;
1014
1015 tfm = crypto_alloc_ahash(alg, 0, 0);
1016 if (IS_ERR(tfm))
1017 return PTR_ERR(tfm);
1018
1019 areq = ahash_request_alloc(tfm, GFP_KERNEL);
1020 if (!areq) {
1021 ret = -ENOMEM;
1022 goto free_ahash;
1023 }
1024
1025 crypto_ahash_clear_flags(tfm, ~0);
1026 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1027
1028 ipad = kcalloc(2, blocksize, GFP_KERNEL);
1029 if (!ipad) {
1030 ret = -ENOMEM;
1031 goto free_request;
1032 }
1033
1034 opad = ipad + blocksize;
1035
1036 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
1037 if (ret)
1038 goto free_ipad;
1039
1040 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
1041 if (ret)
1042 goto free_ipad;
1043
1044 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
1045
1046free_ipad:
1047 kfree(ipad);
1048free_request:
1049 ahash_request_free(areq);
1050free_ahash:
1051 crypto_free_ahash(tfm);
1052
1053 return ret;
1054}
1055
1056static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
1057 unsigned int keylen, const char *alg,
1058 unsigned int state_sz)
1059{
1060 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1061 struct safexcel_crypto_priv *priv = ctx->priv;
1062 struct safexcel_ahash_export_state istate, ostate;
David Brazdil0f672f62019-12-10 10:32:29 +00001063 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064
1065 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
1066 if (ret)
1067 return ret;
1068
David Brazdil0f672f62019-12-10 10:32:29 +00001069 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
1070 (memcmp(ctx->ipad, istate.state, state_sz) ||
1071 memcmp(ctx->opad, ostate.state, state_sz)))
1072 ctx->base.needs_inv = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001073
1074 memcpy(ctx->ipad, &istate.state, state_sz);
1075 memcpy(ctx->opad, &ostate.state, state_sz);
1076
1077 return 0;
1078}
1079
1080static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1081 unsigned int keylen)
1082{
1083 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
1084 SHA1_DIGEST_SIZE);
1085}
1086
1087struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1088 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001089 .algo_mask = SAFEXCEL_ALG_SHA1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001090 .alg.ahash = {
1091 .init = safexcel_hmac_sha1_init,
1092 .update = safexcel_ahash_update,
1093 .final = safexcel_ahash_final,
1094 .finup = safexcel_ahash_finup,
1095 .digest = safexcel_hmac_sha1_digest,
1096 .setkey = safexcel_hmac_sha1_setkey,
1097 .export = safexcel_ahash_export,
1098 .import = safexcel_ahash_import,
1099 .halg = {
1100 .digestsize = SHA1_DIGEST_SIZE,
1101 .statesize = sizeof(struct safexcel_ahash_export_state),
1102 .base = {
1103 .cra_name = "hmac(sha1)",
1104 .cra_driver_name = "safexcel-hmac-sha1",
David Brazdil0f672f62019-12-10 10:32:29 +00001105 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106 .cra_flags = CRYPTO_ALG_ASYNC |
1107 CRYPTO_ALG_KERN_DRIVER_ONLY,
1108 .cra_blocksize = SHA1_BLOCK_SIZE,
1109 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1110 .cra_init = safexcel_ahash_cra_init,
1111 .cra_exit = safexcel_ahash_cra_exit,
1112 .cra_module = THIS_MODULE,
1113 },
1114 },
1115 },
1116};
1117
1118static int safexcel_sha256_init(struct ahash_request *areq)
1119{
1120 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1121 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1122
1123 memset(req, 0, sizeof(*req));
1124
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1126 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1127 req->state_sz = SHA256_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001128 req->block_sz = SHA256_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001129
1130 return 0;
1131}
1132
1133static int safexcel_sha256_digest(struct ahash_request *areq)
1134{
1135 int ret = safexcel_sha256_init(areq);
1136
1137 if (ret)
1138 return ret;
1139
1140 return safexcel_ahash_finup(areq);
1141}
1142
1143struct safexcel_alg_template safexcel_alg_sha256 = {
1144 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001145 .algo_mask = SAFEXCEL_ALG_SHA2_256,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001146 .alg.ahash = {
1147 .init = safexcel_sha256_init,
1148 .update = safexcel_ahash_update,
1149 .final = safexcel_ahash_final,
1150 .finup = safexcel_ahash_finup,
1151 .digest = safexcel_sha256_digest,
1152 .export = safexcel_ahash_export,
1153 .import = safexcel_ahash_import,
1154 .halg = {
1155 .digestsize = SHA256_DIGEST_SIZE,
1156 .statesize = sizeof(struct safexcel_ahash_export_state),
1157 .base = {
1158 .cra_name = "sha256",
1159 .cra_driver_name = "safexcel-sha256",
David Brazdil0f672f62019-12-10 10:32:29 +00001160 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001161 .cra_flags = CRYPTO_ALG_ASYNC |
1162 CRYPTO_ALG_KERN_DRIVER_ONLY,
1163 .cra_blocksize = SHA256_BLOCK_SIZE,
1164 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1165 .cra_init = safexcel_ahash_cra_init,
1166 .cra_exit = safexcel_ahash_cra_exit,
1167 .cra_module = THIS_MODULE,
1168 },
1169 },
1170 },
1171};
1172
1173static int safexcel_sha224_init(struct ahash_request *areq)
1174{
1175 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1176 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1177
1178 memset(req, 0, sizeof(*req));
1179
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001180 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1181 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1182 req->state_sz = SHA256_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001183 req->block_sz = SHA256_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184
1185 return 0;
1186}
1187
1188static int safexcel_sha224_digest(struct ahash_request *areq)
1189{
1190 int ret = safexcel_sha224_init(areq);
1191
1192 if (ret)
1193 return ret;
1194
1195 return safexcel_ahash_finup(areq);
1196}
1197
1198struct safexcel_alg_template safexcel_alg_sha224 = {
1199 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001200 .algo_mask = SAFEXCEL_ALG_SHA2_256,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001201 .alg.ahash = {
1202 .init = safexcel_sha224_init,
1203 .update = safexcel_ahash_update,
1204 .final = safexcel_ahash_final,
1205 .finup = safexcel_ahash_finup,
1206 .digest = safexcel_sha224_digest,
1207 .export = safexcel_ahash_export,
1208 .import = safexcel_ahash_import,
1209 .halg = {
1210 .digestsize = SHA224_DIGEST_SIZE,
1211 .statesize = sizeof(struct safexcel_ahash_export_state),
1212 .base = {
1213 .cra_name = "sha224",
1214 .cra_driver_name = "safexcel-sha224",
David Brazdil0f672f62019-12-10 10:32:29 +00001215 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216 .cra_flags = CRYPTO_ALG_ASYNC |
1217 CRYPTO_ALG_KERN_DRIVER_ONLY,
1218 .cra_blocksize = SHA224_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1220 .cra_init = safexcel_ahash_cra_init,
1221 .cra_exit = safexcel_ahash_cra_exit,
1222 .cra_module = THIS_MODULE,
1223 },
1224 },
1225 },
1226};
1227
1228static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1229 unsigned int keylen)
1230{
1231 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1232 SHA256_DIGEST_SIZE);
1233}
1234
1235static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1236{
David Brazdil0f672f62019-12-10 10:32:29 +00001237 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001238 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1239
David Brazdil0f672f62019-12-10 10:32:29 +00001240 memset(req, 0, sizeof(*req));
1241
1242 /* Start from ipad precompute */
1243 memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1244 /* Already processed the key^ipad part now! */
1245 req->len = SHA256_BLOCK_SIZE;
1246 req->processed = SHA256_BLOCK_SIZE;
1247
1248 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1249 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1250 req->state_sz = SHA256_DIGEST_SIZE;
1251 req->block_sz = SHA256_BLOCK_SIZE;
1252 req->hmac = true;
1253
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254 return 0;
1255}
1256
1257static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1258{
1259 int ret = safexcel_hmac_sha224_init(areq);
1260
1261 if (ret)
1262 return ret;
1263
1264 return safexcel_ahash_finup(areq);
1265}
1266
1267struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1268 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001269 .algo_mask = SAFEXCEL_ALG_SHA2_256,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001270 .alg.ahash = {
1271 .init = safexcel_hmac_sha224_init,
1272 .update = safexcel_ahash_update,
1273 .final = safexcel_ahash_final,
1274 .finup = safexcel_ahash_finup,
1275 .digest = safexcel_hmac_sha224_digest,
1276 .setkey = safexcel_hmac_sha224_setkey,
1277 .export = safexcel_ahash_export,
1278 .import = safexcel_ahash_import,
1279 .halg = {
1280 .digestsize = SHA224_DIGEST_SIZE,
1281 .statesize = sizeof(struct safexcel_ahash_export_state),
1282 .base = {
1283 .cra_name = "hmac(sha224)",
1284 .cra_driver_name = "safexcel-hmac-sha224",
David Brazdil0f672f62019-12-10 10:32:29 +00001285 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001286 .cra_flags = CRYPTO_ALG_ASYNC |
1287 CRYPTO_ALG_KERN_DRIVER_ONLY,
1288 .cra_blocksize = SHA224_BLOCK_SIZE,
1289 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1290 .cra_init = safexcel_ahash_cra_init,
1291 .cra_exit = safexcel_ahash_cra_exit,
1292 .cra_module = THIS_MODULE,
1293 },
1294 },
1295 },
1296};
1297
1298static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1299 unsigned int keylen)
1300{
1301 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1302 SHA256_DIGEST_SIZE);
1303}
1304
1305static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1306{
David Brazdil0f672f62019-12-10 10:32:29 +00001307 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1309
David Brazdil0f672f62019-12-10 10:32:29 +00001310 memset(req, 0, sizeof(*req));
1311
1312 /* Start from ipad precompute */
1313 memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1314 /* Already processed the key^ipad part now! */
1315 req->len = SHA256_BLOCK_SIZE;
1316 req->processed = SHA256_BLOCK_SIZE;
1317
1318 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1319 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1320 req->state_sz = SHA256_DIGEST_SIZE;
1321 req->block_sz = SHA256_BLOCK_SIZE;
1322 req->hmac = true;
1323
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324 return 0;
1325}
1326
1327static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1328{
1329 int ret = safexcel_hmac_sha256_init(areq);
1330
1331 if (ret)
1332 return ret;
1333
1334 return safexcel_ahash_finup(areq);
1335}
1336
1337struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1338 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001339 .algo_mask = SAFEXCEL_ALG_SHA2_256,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001340 .alg.ahash = {
1341 .init = safexcel_hmac_sha256_init,
1342 .update = safexcel_ahash_update,
1343 .final = safexcel_ahash_final,
1344 .finup = safexcel_ahash_finup,
1345 .digest = safexcel_hmac_sha256_digest,
1346 .setkey = safexcel_hmac_sha256_setkey,
1347 .export = safexcel_ahash_export,
1348 .import = safexcel_ahash_import,
1349 .halg = {
1350 .digestsize = SHA256_DIGEST_SIZE,
1351 .statesize = sizeof(struct safexcel_ahash_export_state),
1352 .base = {
1353 .cra_name = "hmac(sha256)",
1354 .cra_driver_name = "safexcel-hmac-sha256",
David Brazdil0f672f62019-12-10 10:32:29 +00001355 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001356 .cra_flags = CRYPTO_ALG_ASYNC |
1357 CRYPTO_ALG_KERN_DRIVER_ONLY,
1358 .cra_blocksize = SHA256_BLOCK_SIZE,
1359 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1360 .cra_init = safexcel_ahash_cra_init,
1361 .cra_exit = safexcel_ahash_cra_exit,
1362 .cra_module = THIS_MODULE,
1363 },
1364 },
1365 },
1366};
1367
1368static int safexcel_sha512_init(struct ahash_request *areq)
1369{
1370 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1371 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1372
1373 memset(req, 0, sizeof(*req));
1374
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1376 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1377 req->state_sz = SHA512_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001378 req->block_sz = SHA512_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001379
1380 return 0;
1381}
1382
1383static int safexcel_sha512_digest(struct ahash_request *areq)
1384{
1385 int ret = safexcel_sha512_init(areq);
1386
1387 if (ret)
1388 return ret;
1389
1390 return safexcel_ahash_finup(areq);
1391}
1392
1393struct safexcel_alg_template safexcel_alg_sha512 = {
1394 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001395 .algo_mask = SAFEXCEL_ALG_SHA2_512,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001396 .alg.ahash = {
1397 .init = safexcel_sha512_init,
1398 .update = safexcel_ahash_update,
1399 .final = safexcel_ahash_final,
1400 .finup = safexcel_ahash_finup,
1401 .digest = safexcel_sha512_digest,
1402 .export = safexcel_ahash_export,
1403 .import = safexcel_ahash_import,
1404 .halg = {
1405 .digestsize = SHA512_DIGEST_SIZE,
1406 .statesize = sizeof(struct safexcel_ahash_export_state),
1407 .base = {
1408 .cra_name = "sha512",
1409 .cra_driver_name = "safexcel-sha512",
David Brazdil0f672f62019-12-10 10:32:29 +00001410 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001411 .cra_flags = CRYPTO_ALG_ASYNC |
1412 CRYPTO_ALG_KERN_DRIVER_ONLY,
1413 .cra_blocksize = SHA512_BLOCK_SIZE,
1414 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1415 .cra_init = safexcel_ahash_cra_init,
1416 .cra_exit = safexcel_ahash_cra_exit,
1417 .cra_module = THIS_MODULE,
1418 },
1419 },
1420 },
1421};
1422
1423static int safexcel_sha384_init(struct ahash_request *areq)
1424{
1425 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1426 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1427
1428 memset(req, 0, sizeof(*req));
1429
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001430 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1431 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1432 req->state_sz = SHA512_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001433 req->block_sz = SHA512_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001434
1435 return 0;
1436}
1437
1438static int safexcel_sha384_digest(struct ahash_request *areq)
1439{
1440 int ret = safexcel_sha384_init(areq);
1441
1442 if (ret)
1443 return ret;
1444
1445 return safexcel_ahash_finup(areq);
1446}
1447
1448struct safexcel_alg_template safexcel_alg_sha384 = {
1449 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001450 .algo_mask = SAFEXCEL_ALG_SHA2_512,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001451 .alg.ahash = {
1452 .init = safexcel_sha384_init,
1453 .update = safexcel_ahash_update,
1454 .final = safexcel_ahash_final,
1455 .finup = safexcel_ahash_finup,
1456 .digest = safexcel_sha384_digest,
1457 .export = safexcel_ahash_export,
1458 .import = safexcel_ahash_import,
1459 .halg = {
1460 .digestsize = SHA384_DIGEST_SIZE,
1461 .statesize = sizeof(struct safexcel_ahash_export_state),
1462 .base = {
1463 .cra_name = "sha384",
1464 .cra_driver_name = "safexcel-sha384",
David Brazdil0f672f62019-12-10 10:32:29 +00001465 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001466 .cra_flags = CRYPTO_ALG_ASYNC |
1467 CRYPTO_ALG_KERN_DRIVER_ONLY,
1468 .cra_blocksize = SHA384_BLOCK_SIZE,
1469 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1470 .cra_init = safexcel_ahash_cra_init,
1471 .cra_exit = safexcel_ahash_cra_exit,
1472 .cra_module = THIS_MODULE,
1473 },
1474 },
1475 },
1476};
1477
1478static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
1479 unsigned int keylen)
1480{
1481 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
1482 SHA512_DIGEST_SIZE);
1483}
1484
1485static int safexcel_hmac_sha512_init(struct ahash_request *areq)
1486{
David Brazdil0f672f62019-12-10 10:32:29 +00001487 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001488 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1489
David Brazdil0f672f62019-12-10 10:32:29 +00001490 memset(req, 0, sizeof(*req));
1491
1492 /* Start from ipad precompute */
1493 memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1494 /* Already processed the key^ipad part now! */
1495 req->len = SHA512_BLOCK_SIZE;
1496 req->processed = SHA512_BLOCK_SIZE;
1497
1498 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1499 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1500 req->state_sz = SHA512_DIGEST_SIZE;
1501 req->block_sz = SHA512_BLOCK_SIZE;
1502 req->hmac = true;
1503
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001504 return 0;
1505}
1506
1507static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
1508{
1509 int ret = safexcel_hmac_sha512_init(areq);
1510
1511 if (ret)
1512 return ret;
1513
1514 return safexcel_ahash_finup(areq);
1515}
1516
1517struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
1518 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001519 .algo_mask = SAFEXCEL_ALG_SHA2_512,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001520 .alg.ahash = {
1521 .init = safexcel_hmac_sha512_init,
1522 .update = safexcel_ahash_update,
1523 .final = safexcel_ahash_final,
1524 .finup = safexcel_ahash_finup,
1525 .digest = safexcel_hmac_sha512_digest,
1526 .setkey = safexcel_hmac_sha512_setkey,
1527 .export = safexcel_ahash_export,
1528 .import = safexcel_ahash_import,
1529 .halg = {
1530 .digestsize = SHA512_DIGEST_SIZE,
1531 .statesize = sizeof(struct safexcel_ahash_export_state),
1532 .base = {
1533 .cra_name = "hmac(sha512)",
1534 .cra_driver_name = "safexcel-hmac-sha512",
David Brazdil0f672f62019-12-10 10:32:29 +00001535 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001536 .cra_flags = CRYPTO_ALG_ASYNC |
1537 CRYPTO_ALG_KERN_DRIVER_ONLY,
1538 .cra_blocksize = SHA512_BLOCK_SIZE,
1539 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1540 .cra_init = safexcel_ahash_cra_init,
1541 .cra_exit = safexcel_ahash_cra_exit,
1542 .cra_module = THIS_MODULE,
1543 },
1544 },
1545 },
1546};
1547
1548static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
1549 unsigned int keylen)
1550{
1551 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
1552 SHA512_DIGEST_SIZE);
1553}
1554
1555static int safexcel_hmac_sha384_init(struct ahash_request *areq)
1556{
David Brazdil0f672f62019-12-10 10:32:29 +00001557 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001558 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1559
David Brazdil0f672f62019-12-10 10:32:29 +00001560 memset(req, 0, sizeof(*req));
1561
1562 /* Start from ipad precompute */
1563 memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1564 /* Already processed the key^ipad part now! */
1565 req->len = SHA512_BLOCK_SIZE;
1566 req->processed = SHA512_BLOCK_SIZE;
1567
1568 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1569 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1570 req->state_sz = SHA512_DIGEST_SIZE;
1571 req->block_sz = SHA512_BLOCK_SIZE;
1572 req->hmac = true;
1573
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574 return 0;
1575}
1576
1577static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
1578{
1579 int ret = safexcel_hmac_sha384_init(areq);
1580
1581 if (ret)
1582 return ret;
1583
1584 return safexcel_ahash_finup(areq);
1585}
1586
1587struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
1588 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001589 .algo_mask = SAFEXCEL_ALG_SHA2_512,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001590 .alg.ahash = {
1591 .init = safexcel_hmac_sha384_init,
1592 .update = safexcel_ahash_update,
1593 .final = safexcel_ahash_final,
1594 .finup = safexcel_ahash_finup,
1595 .digest = safexcel_hmac_sha384_digest,
1596 .setkey = safexcel_hmac_sha384_setkey,
1597 .export = safexcel_ahash_export,
1598 .import = safexcel_ahash_import,
1599 .halg = {
1600 .digestsize = SHA384_DIGEST_SIZE,
1601 .statesize = sizeof(struct safexcel_ahash_export_state),
1602 .base = {
1603 .cra_name = "hmac(sha384)",
1604 .cra_driver_name = "safexcel-hmac-sha384",
David Brazdil0f672f62019-12-10 10:32:29 +00001605 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001606 .cra_flags = CRYPTO_ALG_ASYNC |
1607 CRYPTO_ALG_KERN_DRIVER_ONLY,
1608 .cra_blocksize = SHA384_BLOCK_SIZE,
1609 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1610 .cra_init = safexcel_ahash_cra_init,
1611 .cra_exit = safexcel_ahash_cra_exit,
1612 .cra_module = THIS_MODULE,
1613 },
1614 },
1615 },
1616};
1617
1618static int safexcel_md5_init(struct ahash_request *areq)
1619{
1620 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1621 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1622
1623 memset(req, 0, sizeof(*req));
1624
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001625 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1626 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1627 req->state_sz = MD5_DIGEST_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00001628 req->block_sz = MD5_HMAC_BLOCK_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001629
1630 return 0;
1631}
1632
1633static int safexcel_md5_digest(struct ahash_request *areq)
1634{
1635 int ret = safexcel_md5_init(areq);
1636
1637 if (ret)
1638 return ret;
1639
1640 return safexcel_ahash_finup(areq);
1641}
1642
1643struct safexcel_alg_template safexcel_alg_md5 = {
1644 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001645 .algo_mask = SAFEXCEL_ALG_MD5,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001646 .alg.ahash = {
1647 .init = safexcel_md5_init,
1648 .update = safexcel_ahash_update,
1649 .final = safexcel_ahash_final,
1650 .finup = safexcel_ahash_finup,
1651 .digest = safexcel_md5_digest,
1652 .export = safexcel_ahash_export,
1653 .import = safexcel_ahash_import,
1654 .halg = {
1655 .digestsize = MD5_DIGEST_SIZE,
1656 .statesize = sizeof(struct safexcel_ahash_export_state),
1657 .base = {
1658 .cra_name = "md5",
1659 .cra_driver_name = "safexcel-md5",
David Brazdil0f672f62019-12-10 10:32:29 +00001660 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001661 .cra_flags = CRYPTO_ALG_ASYNC |
1662 CRYPTO_ALG_KERN_DRIVER_ONLY,
1663 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1664 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1665 .cra_init = safexcel_ahash_cra_init,
1666 .cra_exit = safexcel_ahash_cra_exit,
1667 .cra_module = THIS_MODULE,
1668 },
1669 },
1670 },
1671};
1672
1673static int safexcel_hmac_md5_init(struct ahash_request *areq)
1674{
David Brazdil0f672f62019-12-10 10:32:29 +00001675 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001676 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1677
David Brazdil0f672f62019-12-10 10:32:29 +00001678 memset(req, 0, sizeof(*req));
1679
1680 /* Start from ipad precompute */
1681 memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
1682 /* Already processed the key^ipad part now! */
1683 req->len = MD5_HMAC_BLOCK_SIZE;
1684 req->processed = MD5_HMAC_BLOCK_SIZE;
1685
1686 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1687 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1688 req->state_sz = MD5_DIGEST_SIZE;
1689 req->block_sz = MD5_HMAC_BLOCK_SIZE;
1690 req->len_is_le = true; /* MD5 is little endian! ... */
1691 req->hmac = true;
1692
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001693 return 0;
1694}
1695
1696static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1697 unsigned int keylen)
1698{
1699 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
1700 MD5_DIGEST_SIZE);
1701}
1702
1703static int safexcel_hmac_md5_digest(struct ahash_request *areq)
1704{
1705 int ret = safexcel_hmac_md5_init(areq);
1706
1707 if (ret)
1708 return ret;
1709
1710 return safexcel_ahash_finup(areq);
1711}
1712
1713struct safexcel_alg_template safexcel_alg_hmac_md5 = {
1714 .type = SAFEXCEL_ALG_TYPE_AHASH,
David Brazdil0f672f62019-12-10 10:32:29 +00001715 .algo_mask = SAFEXCEL_ALG_MD5,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001716 .alg.ahash = {
1717 .init = safexcel_hmac_md5_init,
1718 .update = safexcel_ahash_update,
1719 .final = safexcel_ahash_final,
1720 .finup = safexcel_ahash_finup,
1721 .digest = safexcel_hmac_md5_digest,
1722 .setkey = safexcel_hmac_md5_setkey,
1723 .export = safexcel_ahash_export,
1724 .import = safexcel_ahash_import,
1725 .halg = {
1726 .digestsize = MD5_DIGEST_SIZE,
1727 .statesize = sizeof(struct safexcel_ahash_export_state),
1728 .base = {
1729 .cra_name = "hmac(md5)",
1730 .cra_driver_name = "safexcel-hmac-md5",
David Brazdil0f672f62019-12-10 10:32:29 +00001731 .cra_priority = SAFEXCEL_CRA_PRIORITY,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001732 .cra_flags = CRYPTO_ALG_ASYNC |
1733 CRYPTO_ALG_KERN_DRIVER_ONLY,
1734 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1735 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1736 .cra_init = safexcel_ahash_cra_init,
1737 .cra_exit = safexcel_ahash_cra_exit,
1738 .cra_module = THIS_MODULE,
1739 },
1740 },
1741 },
1742};