blob: bc71bdf44a9ff195ee4d64b72e5fd8a1fff940e7 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
David Brazdil0f672f62019-12-10 10:32:29 +00002/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <crypto/sm3.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <crypto/internal/hash.h>
11
12#include "cc_driver.h"
13#include "cc_request_mgr.h"
14#include "cc_buffer_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define CC_MAX_HASH_SEQ_LEN 12
19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
David Brazdil0f672f62019-12-10 10:32:29 +000020#define CC_SM3_HASH_LEN_SIZE 8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021
22struct cc_hash_handle {
23 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
24 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
25 struct list_head hash_list;
26};
27
David Brazdil0f672f62019-12-10 10:32:29 +000028static const u32 cc_digest_len_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
David Brazdil0f672f62019-12-10 10:32:29 +000030static const u32 cc_md5_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000032static const u32 cc_sha1_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000034static const u32 cc_sha224_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000035 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000037static const u32 cc_sha256_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000040static const u32 cc_digest_len_sha512_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
David Brazdil0f672f62019-12-10 10:32:29 +000042static u64 cc_sha384_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
44 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000045static u64 cc_sha512_init[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
47 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
David Brazdil0f672f62019-12-10 10:32:29 +000048static const u32 cc_sm3_init[] = {
49 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
50 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
53 unsigned int *seq_size);
54
55static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
56 unsigned int *seq_size);
57
58static const void *cc_larval_digest(struct device *dev, u32 mode);
59
60struct cc_hash_alg {
61 struct list_head entry;
62 int hash_mode;
63 int hw_mode;
64 int inter_digestsize;
65 struct cc_drvdata *drvdata;
66 struct ahash_alg ahash_alg;
67};
68
69struct hash_key_req_ctx {
70 u32 keylen;
71 dma_addr_t key_dma_addr;
David Brazdil0f672f62019-12-10 10:32:29 +000072 u8 *key;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073};
74
75/* hash per-session context */
76struct cc_hash_ctx {
77 struct cc_drvdata *drvdata;
78 /* holds the origin digest; the digest after "setkey" if HMAC,*
79 * the initial digest if HASH.
80 */
81 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
82 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
83
84 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
85 dma_addr_t digest_buff_dma_addr;
86 /* use for hmac with key large then mode block size */
87 struct hash_key_req_ctx key_params;
88 int hash_mode;
89 int hw_mode;
90 int inter_digestsize;
David Brazdil0f672f62019-12-10 10:32:29 +000091 unsigned int hash_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092 struct completion setkey_comp;
93 bool is_hmac;
94};
95
96static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
97 unsigned int flow_mode, struct cc_hw_desc desc[],
98 bool is_not_last_data, unsigned int *seq_size);
99
100static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
101{
102 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
103 mode == DRV_HASH_SHA512) {
104 set_bytes_swap(desc, 1);
105 } else {
106 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
107 }
108}
109
110static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
111 unsigned int digestsize)
112{
113 state->digest_result_dma_addr =
114 dma_map_single(dev, state->digest_result_buff,
115 digestsize, DMA_BIDIRECTIONAL);
116 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
117 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
118 digestsize);
119 return -ENOMEM;
120 }
121 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
122 digestsize, state->digest_result_buff,
123 &state->digest_result_dma_addr);
124
125 return 0;
126}
127
128static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
129 struct cc_hash_ctx *ctx)
130{
131 bool is_hmac = ctx->is_hmac;
132
133 memset(state, 0, sizeof(*state));
134
135 if (is_hmac) {
136 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
137 ctx->hw_mode != DRV_CIPHER_CMAC) {
138 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
139 ctx->inter_digestsize,
140 DMA_BIDIRECTIONAL);
141
142 memcpy(state->digest_buff, ctx->digest_buff,
143 ctx->inter_digestsize);
144 if (ctx->hash_mode == DRV_HASH_SHA512 ||
145 ctx->hash_mode == DRV_HASH_SHA384)
146 memcpy(state->digest_bytes_len,
David Brazdil0f672f62019-12-10 10:32:29 +0000147 cc_digest_len_sha512_init,
148 ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149 else
David Brazdil0f672f62019-12-10 10:32:29 +0000150 memcpy(state->digest_bytes_len,
151 cc_digest_len_init,
152 ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153 }
154
155 if (ctx->hash_mode != DRV_HASH_NULL) {
156 dma_sync_single_for_cpu(dev,
157 ctx->opad_tmp_keys_dma_addr,
158 ctx->inter_digestsize,
159 DMA_BIDIRECTIONAL);
160 memcpy(state->opad_digest_buff,
161 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
162 }
163 } else { /*hash*/
164 /* Copy the initial digests if hash flow. */
165 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
166
167 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
168 }
169}
170
171static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
172 struct cc_hash_ctx *ctx)
173{
174 bool is_hmac = ctx->is_hmac;
175
176 state->digest_buff_dma_addr =
177 dma_map_single(dev, state->digest_buff,
178 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
179 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
180 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
181 ctx->inter_digestsize, state->digest_buff);
182 return -EINVAL;
183 }
184 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
185 ctx->inter_digestsize, state->digest_buff,
186 &state->digest_buff_dma_addr);
187
188 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
189 state->digest_bytes_len_dma_addr =
190 dma_map_single(dev, state->digest_bytes_len,
191 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
192 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
193 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
194 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
195 goto unmap_digest_buf;
196 }
197 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
198 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
199 &state->digest_bytes_len_dma_addr);
200 }
201
202 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
203 state->opad_digest_dma_addr =
204 dma_map_single(dev, state->opad_digest_buff,
205 ctx->inter_digestsize,
206 DMA_BIDIRECTIONAL);
207 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
208 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
209 ctx->inter_digestsize,
210 state->opad_digest_buff);
211 goto unmap_digest_len;
212 }
213 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
214 ctx->inter_digestsize, state->opad_digest_buff,
215 &state->opad_digest_dma_addr);
216 }
217
218 return 0;
219
220unmap_digest_len:
221 if (state->digest_bytes_len_dma_addr) {
222 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
223 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
224 state->digest_bytes_len_dma_addr = 0;
225 }
226unmap_digest_buf:
227 if (state->digest_buff_dma_addr) {
228 dma_unmap_single(dev, state->digest_buff_dma_addr,
229 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
230 state->digest_buff_dma_addr = 0;
231 }
232
233 return -EINVAL;
234}
235
236static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
237 struct cc_hash_ctx *ctx)
238{
239 if (state->digest_buff_dma_addr) {
240 dma_unmap_single(dev, state->digest_buff_dma_addr,
241 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
242 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
243 &state->digest_buff_dma_addr);
244 state->digest_buff_dma_addr = 0;
245 }
246 if (state->digest_bytes_len_dma_addr) {
247 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
248 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
250 &state->digest_bytes_len_dma_addr);
251 state->digest_bytes_len_dma_addr = 0;
252 }
253 if (state->opad_digest_dma_addr) {
254 dma_unmap_single(dev, state->opad_digest_dma_addr,
255 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
256 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
257 &state->opad_digest_dma_addr);
258 state->opad_digest_dma_addr = 0;
259 }
260}
261
262static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
263 unsigned int digestsize, u8 *result)
264{
265 if (state->digest_result_dma_addr) {
266 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
267 DMA_BIDIRECTIONAL);
268 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
269 state->digest_result_buff,
270 &state->digest_result_dma_addr, digestsize);
271 memcpy(result, state->digest_result_buff, digestsize);
272 }
273 state->digest_result_dma_addr = 0;
274}
275
276static void cc_update_complete(struct device *dev, void *cc_req, int err)
277{
278 struct ahash_request *req = (struct ahash_request *)cc_req;
279 struct ahash_req_ctx *state = ahash_request_ctx(req);
280 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
281 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
282
283 dev_dbg(dev, "req=%pK\n", req);
284
David Brazdil0f672f62019-12-10 10:32:29 +0000285 if (err != -EINPROGRESS) {
286 /* Not a BACKLOG notification */
287 cc_unmap_hash_request(dev, state, req->src, false);
288 cc_unmap_req(dev, state, ctx);
289 }
290
291 ahash_request_complete(req, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292}
293
294static void cc_digest_complete(struct device *dev, void *cc_req, int err)
295{
296 struct ahash_request *req = (struct ahash_request *)cc_req;
297 struct ahash_req_ctx *state = ahash_request_ctx(req);
298 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
299 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
300 u32 digestsize = crypto_ahash_digestsize(tfm);
301
302 dev_dbg(dev, "req=%pK\n", req);
303
David Brazdil0f672f62019-12-10 10:32:29 +0000304 if (err != -EINPROGRESS) {
305 /* Not a BACKLOG notification */
306 cc_unmap_hash_request(dev, state, req->src, false);
307 cc_unmap_result(dev, state, digestsize, req->result);
308 cc_unmap_req(dev, state, ctx);
309 }
310
311 ahash_request_complete(req, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000312}
313
314static void cc_hash_complete(struct device *dev, void *cc_req, int err)
315{
316 struct ahash_request *req = (struct ahash_request *)cc_req;
317 struct ahash_req_ctx *state = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 u32 digestsize = crypto_ahash_digestsize(tfm);
321
322 dev_dbg(dev, "req=%pK\n", req);
323
David Brazdil0f672f62019-12-10 10:32:29 +0000324 if (err != -EINPROGRESS) {
325 /* Not a BACKLOG notification */
326 cc_unmap_hash_request(dev, state, req->src, false);
327 cc_unmap_result(dev, state, digestsize, req->result);
328 cc_unmap_req(dev, state, ctx);
329 }
330
331 ahash_request_complete(req, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000332}
333
334static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
335 int idx)
336{
337 struct ahash_req_ctx *state = ahash_request_ctx(req);
338 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
339 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
340 u32 digestsize = crypto_ahash_digestsize(tfm);
341
342 /* Get final MAC result */
343 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000344 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 /* TODO */
346 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
347 NS_BIT, 1);
348 set_queue_last_ind(ctx->drvdata, &desc[idx]);
349 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
350 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
351 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
352 cc_set_endianity(ctx->hash_mode, &desc[idx]);
353 idx++;
354
355 return idx;
356}
357
358static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
359 int idx)
360{
361 struct ahash_req_ctx *state = ahash_request_ctx(req);
362 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
363 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
364 u32 digestsize = crypto_ahash_digestsize(tfm);
365
366 /* store the hash digest result in the context */
367 hw_desc_init(&desc[idx]);
368 set_cipher_mode(&desc[idx], ctx->hw_mode);
369 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
370 NS_BIT, 0);
371 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
372 cc_set_endianity(ctx->hash_mode, &desc[idx]);
373 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
374 idx++;
375
376 /* Loading hash opad xor key state */
377 hw_desc_init(&desc[idx]);
378 set_cipher_mode(&desc[idx], ctx->hw_mode);
379 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
380 ctx->inter_digestsize, NS_BIT);
381 set_flow_mode(&desc[idx], S_DIN_to_HASH);
382 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
383 idx++;
384
385 /* Load the hash current length */
386 hw_desc_init(&desc[idx]);
387 set_cipher_mode(&desc[idx], ctx->hw_mode);
388 set_din_sram(&desc[idx],
389 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
David Brazdil0f672f62019-12-10 10:32:29 +0000390 ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
392 set_flow_mode(&desc[idx], S_DIN_to_HASH);
393 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
394 idx++;
395
396 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
397 hw_desc_init(&desc[idx]);
398 set_din_no_dma(&desc[idx], 0, 0xfffff0);
399 set_dout_no_dma(&desc[idx], 0, 0, 1);
400 idx++;
401
402 /* Perform HASH update */
403 hw_desc_init(&desc[idx]);
404 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
405 digestsize, NS_BIT);
406 set_flow_mode(&desc[idx], DIN_HASH);
407 idx++;
408
409 return idx;
410}
411
412static int cc_hash_digest(struct ahash_request *req)
413{
414 struct ahash_req_ctx *state = ahash_request_ctx(req);
415 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
417 u32 digestsize = crypto_ahash_digestsize(tfm);
418 struct scatterlist *src = req->src;
419 unsigned int nbytes = req->nbytes;
420 u8 *result = req->result;
421 struct device *dev = drvdata_to_dev(ctx->drvdata);
422 bool is_hmac = ctx->is_hmac;
423 struct cc_crypto_req cc_req = {};
424 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
425 cc_sram_addr_t larval_digest_addr =
426 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
427 int idx = 0;
428 int rc = 0;
429 gfp_t flags = cc_gfp_flags(&req->base);
430
431 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
432 nbytes);
433
434 cc_init_req(dev, state, ctx);
435
436 if (cc_map_req(dev, state, ctx)) {
437 dev_err(dev, "map_ahash_source() failed\n");
438 return -ENOMEM;
439 }
440
441 if (cc_map_result(dev, state, digestsize)) {
442 dev_err(dev, "map_ahash_digest() failed\n");
443 cc_unmap_req(dev, state, ctx);
444 return -ENOMEM;
445 }
446
447 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
448 flags)) {
449 dev_err(dev, "map_ahash_request_final() failed\n");
450 cc_unmap_result(dev, state, digestsize, result);
451 cc_unmap_req(dev, state, ctx);
452 return -ENOMEM;
453 }
454
455 /* Setup request structure */
456 cc_req.user_cb = cc_digest_complete;
457 cc_req.user_arg = req;
458
459 /* If HMAC then load hash IPAD xor key, if HASH then load initial
460 * digest
461 */
462 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000463 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 if (is_hmac) {
465 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
466 ctx->inter_digestsize, NS_BIT);
467 } else {
468 set_din_sram(&desc[idx], larval_digest_addr,
469 ctx->inter_digestsize);
470 }
471 set_flow_mode(&desc[idx], S_DIN_to_HASH);
472 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
473 idx++;
474
475 /* Load the hash current length */
476 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000477 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000478
479 if (is_hmac) {
480 set_din_type(&desc[idx], DMA_DLLI,
481 state->digest_bytes_len_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000482 ctx->hash_len, NS_BIT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000484 set_din_const(&desc[idx], 0, ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485 if (nbytes)
486 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
487 else
488 set_cipher_do(&desc[idx], DO_PAD);
489 }
490 set_flow_mode(&desc[idx], S_DIN_to_HASH);
491 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
492 idx++;
493
494 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
495
496 if (is_hmac) {
497 /* HW last hash block padding (aka. "DO_PAD") */
498 hw_desc_init(&desc[idx]);
499 set_cipher_mode(&desc[idx], ctx->hw_mode);
500 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000501 ctx->hash_len, NS_BIT, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
503 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
504 set_cipher_do(&desc[idx], DO_PAD);
505 idx++;
506
507 idx = cc_fin_hmac(desc, req, idx);
508 }
509
510 idx = cc_fin_result(desc, req, idx);
511
512 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
513 if (rc != -EINPROGRESS && rc != -EBUSY) {
514 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
515 cc_unmap_hash_request(dev, state, src, true);
516 cc_unmap_result(dev, state, digestsize, result);
517 cc_unmap_req(dev, state, ctx);
518 }
519 return rc;
520}
521
522static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
523 struct ahash_req_ctx *state, unsigned int idx)
524{
525 /* Restore hash digest */
526 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000527 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
529 ctx->inter_digestsize, NS_BIT);
530 set_flow_mode(&desc[idx], S_DIN_to_HASH);
531 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
532 idx++;
533
534 /* Restore hash current length */
535 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000536 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
538 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000539 ctx->hash_len, NS_BIT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540 set_flow_mode(&desc[idx], S_DIN_to_HASH);
541 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
542 idx++;
543
544 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
545
546 return idx;
547}
548
549static int cc_hash_update(struct ahash_request *req)
550{
551 struct ahash_req_ctx *state = ahash_request_ctx(req);
552 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
554 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
555 struct scatterlist *src = req->src;
556 unsigned int nbytes = req->nbytes;
557 struct device *dev = drvdata_to_dev(ctx->drvdata);
558 struct cc_crypto_req cc_req = {};
559 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
560 u32 idx = 0;
561 int rc;
562 gfp_t flags = cc_gfp_flags(&req->base);
563
564 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
565 "hmac" : "hash", nbytes);
566
567 if (nbytes == 0) {
568 /* no real updates required */
569 return 0;
570 }
571
572 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
573 block_size, flags);
574 if (rc) {
575 if (rc == 1) {
576 dev_dbg(dev, " data size not require HW update %x\n",
577 nbytes);
578 /* No hardware updates are required */
579 return 0;
580 }
581 dev_err(dev, "map_ahash_request_update() failed\n");
582 return -ENOMEM;
583 }
584
585 if (cc_map_req(dev, state, ctx)) {
586 dev_err(dev, "map_ahash_source() failed\n");
587 cc_unmap_hash_request(dev, state, src, true);
588 return -EINVAL;
589 }
590
591 /* Setup request structure */
592 cc_req.user_cb = cc_update_complete;
593 cc_req.user_arg = req;
594
595 idx = cc_restore_hash(desc, ctx, state, idx);
596
597 /* store the hash digest result in context */
598 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000599 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000600 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
601 ctx->inter_digestsize, NS_BIT, 0);
602 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
603 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
604 idx++;
605
606 /* store current hash length in context */
607 hw_desc_init(&desc[idx]);
David Brazdil0f672f62019-12-10 10:32:29 +0000608 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000609 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000610 ctx->hash_len, NS_BIT, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611 set_queue_last_ind(ctx->drvdata, &desc[idx]);
612 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
613 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
614 idx++;
615
616 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
617 if (rc != -EINPROGRESS && rc != -EBUSY) {
618 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
619 cc_unmap_hash_request(dev, state, src, true);
620 cc_unmap_req(dev, state, ctx);
621 }
622 return rc;
623}
624
625static int cc_do_finup(struct ahash_request *req, bool update)
626{
627 struct ahash_req_ctx *state = ahash_request_ctx(req);
628 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
629 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
630 u32 digestsize = crypto_ahash_digestsize(tfm);
631 struct scatterlist *src = req->src;
632 unsigned int nbytes = req->nbytes;
633 u8 *result = req->result;
634 struct device *dev = drvdata_to_dev(ctx->drvdata);
635 bool is_hmac = ctx->is_hmac;
636 struct cc_crypto_req cc_req = {};
637 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
638 unsigned int idx = 0;
639 int rc;
640 gfp_t flags = cc_gfp_flags(&req->base);
641
642 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
643 update ? "finup" : "final", nbytes);
644
645 if (cc_map_req(dev, state, ctx)) {
646 dev_err(dev, "map_ahash_source() failed\n");
647 return -EINVAL;
648 }
649
650 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
651 flags)) {
652 dev_err(dev, "map_ahash_request_final() failed\n");
653 cc_unmap_req(dev, state, ctx);
654 return -ENOMEM;
655 }
656 if (cc_map_result(dev, state, digestsize)) {
657 dev_err(dev, "map_ahash_digest() failed\n");
658 cc_unmap_hash_request(dev, state, src, true);
659 cc_unmap_req(dev, state, ctx);
660 return -ENOMEM;
661 }
662
663 /* Setup request structure */
664 cc_req.user_cb = cc_hash_complete;
665 cc_req.user_arg = req;
666
667 idx = cc_restore_hash(desc, ctx, state, idx);
668
669 /* Pad the hash */
670 hw_desc_init(&desc[idx]);
671 set_cipher_do(&desc[idx], DO_PAD);
David Brazdil0f672f62019-12-10 10:32:29 +0000672 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000674 ctx->hash_len, NS_BIT, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000675 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
676 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
677 idx++;
678
679 if (is_hmac)
680 idx = cc_fin_hmac(desc, req, idx);
681
682 idx = cc_fin_result(desc, req, idx);
683
684 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
685 if (rc != -EINPROGRESS && rc != -EBUSY) {
686 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
687 cc_unmap_hash_request(dev, state, src, true);
688 cc_unmap_result(dev, state, digestsize, result);
689 cc_unmap_req(dev, state, ctx);
690 }
691 return rc;
692}
693
694static int cc_hash_finup(struct ahash_request *req)
695{
696 return cc_do_finup(req, true);
697}
698
699
700static int cc_hash_final(struct ahash_request *req)
701{
702 return cc_do_finup(req, false);
703}
704
705static int cc_hash_init(struct ahash_request *req)
706{
707 struct ahash_req_ctx *state = ahash_request_ctx(req);
708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
709 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
710 struct device *dev = drvdata_to_dev(ctx->drvdata);
711
712 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
713
714 cc_init_req(dev, state, ctx);
715
716 return 0;
717}
718
719static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
720 unsigned int keylen)
721{
722 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
723 struct cc_crypto_req cc_req = {};
724 struct cc_hash_ctx *ctx = NULL;
725 int blocksize = 0;
726 int digestsize = 0;
727 int i, idx = 0, rc = 0;
728 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
729 cc_sram_addr_t larval_addr;
730 struct device *dev;
731
732 ctx = crypto_ahash_ctx(ahash);
733 dev = drvdata_to_dev(ctx->drvdata);
734 dev_dbg(dev, "start keylen: %d", keylen);
735
736 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
737 digestsize = crypto_ahash_digestsize(ahash);
738
739 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
740
741 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
742 * any NON-ZERO value utilizes HMAC flow
743 */
744 ctx->key_params.keylen = keylen;
745 ctx->key_params.key_dma_addr = 0;
746 ctx->is_hmac = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000747 ctx->key_params.key = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748
749 if (keylen) {
David Brazdil0f672f62019-12-10 10:32:29 +0000750 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
751 if (!ctx->key_params.key)
752 return -ENOMEM;
753
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754 ctx->key_params.key_dma_addr =
David Brazdil0f672f62019-12-10 10:32:29 +0000755 dma_map_single(dev, (void *)ctx->key_params.key, keylen,
756 DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000757 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
758 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000759 ctx->key_params.key, keylen);
760 kzfree(ctx->key_params.key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761 return -ENOMEM;
762 }
763 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
764 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
765
766 if (keylen > blocksize) {
767 /* Load hash initial state */
768 hw_desc_init(&desc[idx]);
769 set_cipher_mode(&desc[idx], ctx->hw_mode);
770 set_din_sram(&desc[idx], larval_addr,
771 ctx->inter_digestsize);
772 set_flow_mode(&desc[idx], S_DIN_to_HASH);
773 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
774 idx++;
775
776 /* Load the hash current length*/
777 hw_desc_init(&desc[idx]);
778 set_cipher_mode(&desc[idx], ctx->hw_mode);
David Brazdil0f672f62019-12-10 10:32:29 +0000779 set_din_const(&desc[idx], 0, ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
781 set_flow_mode(&desc[idx], S_DIN_to_HASH);
782 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
783 idx++;
784
785 hw_desc_init(&desc[idx]);
786 set_din_type(&desc[idx], DMA_DLLI,
787 ctx->key_params.key_dma_addr, keylen,
788 NS_BIT);
789 set_flow_mode(&desc[idx], DIN_HASH);
790 idx++;
791
792 /* Get hashed key */
793 hw_desc_init(&desc[idx]);
794 set_cipher_mode(&desc[idx], ctx->hw_mode);
795 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
796 digestsize, NS_BIT, 0);
797 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
798 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
799 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
800 cc_set_endianity(ctx->hash_mode, &desc[idx]);
801 idx++;
802
803 hw_desc_init(&desc[idx]);
804 set_din_const(&desc[idx], 0, (blocksize - digestsize));
805 set_flow_mode(&desc[idx], BYPASS);
806 set_dout_dlli(&desc[idx],
807 (ctx->opad_tmp_keys_dma_addr +
808 digestsize),
809 (blocksize - digestsize), NS_BIT, 0);
810 idx++;
811 } else {
812 hw_desc_init(&desc[idx]);
813 set_din_type(&desc[idx], DMA_DLLI,
814 ctx->key_params.key_dma_addr, keylen,
815 NS_BIT);
816 set_flow_mode(&desc[idx], BYPASS);
817 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
818 keylen, NS_BIT, 0);
819 idx++;
820
821 if ((blocksize - keylen)) {
822 hw_desc_init(&desc[idx]);
823 set_din_const(&desc[idx], 0,
824 (blocksize - keylen));
825 set_flow_mode(&desc[idx], BYPASS);
826 set_dout_dlli(&desc[idx],
827 (ctx->opad_tmp_keys_dma_addr +
828 keylen), (blocksize - keylen),
829 NS_BIT, 0);
830 idx++;
831 }
832 }
833 } else {
834 hw_desc_init(&desc[idx]);
835 set_din_const(&desc[idx], 0, blocksize);
836 set_flow_mode(&desc[idx], BYPASS);
837 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
838 blocksize, NS_BIT, 0);
839 idx++;
840 }
841
842 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
843 if (rc) {
844 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
845 goto out;
846 }
847
848 /* calc derived HMAC key */
849 for (idx = 0, i = 0; i < 2; i++) {
850 /* Load hash initial state */
851 hw_desc_init(&desc[idx]);
852 set_cipher_mode(&desc[idx], ctx->hw_mode);
853 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
854 set_flow_mode(&desc[idx], S_DIN_to_HASH);
855 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
856 idx++;
857
858 /* Load the hash current length*/
859 hw_desc_init(&desc[idx]);
860 set_cipher_mode(&desc[idx], ctx->hw_mode);
David Brazdil0f672f62019-12-10 10:32:29 +0000861 set_din_const(&desc[idx], 0, ctx->hash_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000862 set_flow_mode(&desc[idx], S_DIN_to_HASH);
863 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
864 idx++;
865
866 /* Prepare ipad key */
867 hw_desc_init(&desc[idx]);
868 set_xor_val(&desc[idx], hmac_pad_const[i]);
869 set_cipher_mode(&desc[idx], ctx->hw_mode);
870 set_flow_mode(&desc[idx], S_DIN_to_HASH);
871 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
872 idx++;
873
874 /* Perform HASH update */
875 hw_desc_init(&desc[idx]);
876 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
877 blocksize, NS_BIT);
878 set_cipher_mode(&desc[idx], ctx->hw_mode);
879 set_xor_active(&desc[idx]);
880 set_flow_mode(&desc[idx], DIN_HASH);
881 idx++;
882
883 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
884 * of the first HASH "update" state)
885 */
886 hw_desc_init(&desc[idx]);
887 set_cipher_mode(&desc[idx], ctx->hw_mode);
888 if (i > 0) /* Not first iteration */
889 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
890 ctx->inter_digestsize, NS_BIT, 0);
891 else /* First iteration */
892 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
893 ctx->inter_digestsize, NS_BIT, 0);
894 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
895 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
896 idx++;
897 }
898
899 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
900
901out:
902 if (rc)
903 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
904
905 if (ctx->key_params.key_dma_addr) {
906 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
907 ctx->key_params.keylen, DMA_TO_DEVICE);
908 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
909 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
910 }
David Brazdil0f672f62019-12-10 10:32:29 +0000911
912 kzfree(ctx->key_params.key);
913
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 return rc;
915}
916
917static int cc_xcbc_setkey(struct crypto_ahash *ahash,
918 const u8 *key, unsigned int keylen)
919{
920 struct cc_crypto_req cc_req = {};
921 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
922 struct device *dev = drvdata_to_dev(ctx->drvdata);
923 int rc = 0;
924 unsigned int idx = 0;
925 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
926
927 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
928
929 switch (keylen) {
930 case AES_KEYSIZE_128:
931 case AES_KEYSIZE_192:
932 case AES_KEYSIZE_256:
933 break;
934 default:
935 return -EINVAL;
936 }
937
938 ctx->key_params.keylen = keylen;
939
David Brazdil0f672f62019-12-10 10:32:29 +0000940 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
941 if (!ctx->key_params.key)
942 return -ENOMEM;
943
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000944 ctx->key_params.key_dma_addr =
David Brazdil0f672f62019-12-10 10:32:29 +0000945 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000946 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
947 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
948 key, keylen);
David Brazdil0f672f62019-12-10 10:32:29 +0000949 kzfree(ctx->key_params.key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000950 return -ENOMEM;
951 }
952 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
953 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
954
955 ctx->is_hmac = true;
956 /* 1. Load the AES key */
957 hw_desc_init(&desc[idx]);
958 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
959 keylen, NS_BIT);
960 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
961 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
962 set_key_size_aes(&desc[idx], keylen);
963 set_flow_mode(&desc[idx], S_DIN_to_AES);
964 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
965 idx++;
966
967 hw_desc_init(&desc[idx]);
968 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
969 set_flow_mode(&desc[idx], DIN_AES_DOUT);
970 set_dout_dlli(&desc[idx],
971 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
972 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
973 idx++;
974
975 hw_desc_init(&desc[idx]);
976 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
977 set_flow_mode(&desc[idx], DIN_AES_DOUT);
978 set_dout_dlli(&desc[idx],
979 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
980 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
981 idx++;
982
983 hw_desc_init(&desc[idx]);
984 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
985 set_flow_mode(&desc[idx], DIN_AES_DOUT);
986 set_dout_dlli(&desc[idx],
987 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
988 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
989 idx++;
990
991 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
992
993 if (rc)
994 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
995
996 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
997 ctx->key_params.keylen, DMA_TO_DEVICE);
998 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
999 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1000
David Brazdil0f672f62019-12-10 10:32:29 +00001001 kzfree(ctx->key_params.key);
1002
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001003 return rc;
1004}
1005
1006static int cc_cmac_setkey(struct crypto_ahash *ahash,
1007 const u8 *key, unsigned int keylen)
1008{
1009 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1010 struct device *dev = drvdata_to_dev(ctx->drvdata);
1011
1012 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1013
1014 ctx->is_hmac = true;
1015
1016 switch (keylen) {
1017 case AES_KEYSIZE_128:
1018 case AES_KEYSIZE_192:
1019 case AES_KEYSIZE_256:
1020 break;
1021 default:
1022 return -EINVAL;
1023 }
1024
1025 ctx->key_params.keylen = keylen;
1026
1027 /* STAT_PHASE_1: Copy key to ctx */
1028
1029 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1030 keylen, DMA_TO_DEVICE);
1031
1032 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1033 if (keylen == 24) {
1034 memset(ctx->opad_tmp_keys_buff + 24, 0,
1035 CC_AES_KEY_SIZE_MAX - 24);
1036 }
1037
1038 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1039 keylen, DMA_TO_DEVICE);
1040
1041 ctx->key_params.keylen = keylen;
1042
1043 return 0;
1044}
1045
1046static void cc_free_ctx(struct cc_hash_ctx *ctx)
1047{
1048 struct device *dev = drvdata_to_dev(ctx->drvdata);
1049
1050 if (ctx->digest_buff_dma_addr) {
1051 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1052 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1053 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1054 &ctx->digest_buff_dma_addr);
1055 ctx->digest_buff_dma_addr = 0;
1056 }
1057 if (ctx->opad_tmp_keys_dma_addr) {
1058 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1059 sizeof(ctx->opad_tmp_keys_buff),
1060 DMA_BIDIRECTIONAL);
1061 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1062 &ctx->opad_tmp_keys_dma_addr);
1063 ctx->opad_tmp_keys_dma_addr = 0;
1064 }
1065
1066 ctx->key_params.keylen = 0;
1067}
1068
1069static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1070{
1071 struct device *dev = drvdata_to_dev(ctx->drvdata);
1072
1073 ctx->key_params.keylen = 0;
1074
1075 ctx->digest_buff_dma_addr =
1076 dma_map_single(dev, (void *)ctx->digest_buff,
1077 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1078 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1079 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1080 sizeof(ctx->digest_buff), ctx->digest_buff);
1081 goto fail;
1082 }
1083 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1084 sizeof(ctx->digest_buff), ctx->digest_buff,
1085 &ctx->digest_buff_dma_addr);
1086
1087 ctx->opad_tmp_keys_dma_addr =
1088 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1089 sizeof(ctx->opad_tmp_keys_buff),
1090 DMA_BIDIRECTIONAL);
1091 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1092 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1093 sizeof(ctx->opad_tmp_keys_buff),
1094 ctx->opad_tmp_keys_buff);
1095 goto fail;
1096 }
1097 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1098 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1099 &ctx->opad_tmp_keys_dma_addr);
1100
1101 ctx->is_hmac = false;
1102 return 0;
1103
1104fail:
1105 cc_free_ctx(ctx);
1106 return -ENOMEM;
1107}
1108
David Brazdil0f672f62019-12-10 10:32:29 +00001109static int cc_get_hash_len(struct crypto_tfm *tfm)
1110{
1111 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1112
1113 if (ctx->hash_mode == DRV_HASH_SM3)
1114 return CC_SM3_HASH_LEN_SIZE;
1115 else
1116 return cc_get_default_hash_len(ctx->drvdata);
1117}
1118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119static int cc_cra_init(struct crypto_tfm *tfm)
1120{
1121 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1122 struct hash_alg_common *hash_alg_common =
1123 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1124 struct ahash_alg *ahash_alg =
1125 container_of(hash_alg_common, struct ahash_alg, halg);
1126 struct cc_hash_alg *cc_alg =
1127 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1128
1129 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1130 sizeof(struct ahash_req_ctx));
1131
1132 ctx->hash_mode = cc_alg->hash_mode;
1133 ctx->hw_mode = cc_alg->hw_mode;
1134 ctx->inter_digestsize = cc_alg->inter_digestsize;
1135 ctx->drvdata = cc_alg->drvdata;
David Brazdil0f672f62019-12-10 10:32:29 +00001136 ctx->hash_len = cc_get_hash_len(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137 return cc_alloc_ctx(ctx);
1138}
1139
1140static void cc_cra_exit(struct crypto_tfm *tfm)
1141{
1142 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1143 struct device *dev = drvdata_to_dev(ctx->drvdata);
1144
1145 dev_dbg(dev, "cc_cra_exit");
1146 cc_free_ctx(ctx);
1147}
1148
1149static int cc_mac_update(struct ahash_request *req)
1150{
1151 struct ahash_req_ctx *state = ahash_request_ctx(req);
1152 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1153 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1154 struct device *dev = drvdata_to_dev(ctx->drvdata);
1155 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1156 struct cc_crypto_req cc_req = {};
1157 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1158 int rc;
1159 u32 idx = 0;
1160 gfp_t flags = cc_gfp_flags(&req->base);
1161
1162 if (req->nbytes == 0) {
1163 /* no real updates required */
1164 return 0;
1165 }
1166
1167 state->xcbc_count++;
1168
1169 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1170 req->nbytes, block_size, flags);
1171 if (rc) {
1172 if (rc == 1) {
1173 dev_dbg(dev, " data size not require HW update %x\n",
1174 req->nbytes);
1175 /* No hardware updates are required */
1176 return 0;
1177 }
1178 dev_err(dev, "map_ahash_request_update() failed\n");
1179 return -ENOMEM;
1180 }
1181
1182 if (cc_map_req(dev, state, ctx)) {
1183 dev_err(dev, "map_ahash_source() failed\n");
1184 return -EINVAL;
1185 }
1186
1187 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1188 cc_setup_xcbc(req, desc, &idx);
1189 else
1190 cc_setup_cmac(req, desc, &idx);
1191
1192 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1193
1194 /* store the hash digest result in context */
1195 hw_desc_init(&desc[idx]);
1196 set_cipher_mode(&desc[idx], ctx->hw_mode);
1197 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1198 ctx->inter_digestsize, NS_BIT, 1);
1199 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1200 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1201 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1202 idx++;
1203
1204 /* Setup request structure */
1205 cc_req.user_cb = (void *)cc_update_complete;
1206 cc_req.user_arg = (void *)req;
1207
1208 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1209 if (rc != -EINPROGRESS && rc != -EBUSY) {
1210 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1211 cc_unmap_hash_request(dev, state, req->src, true);
1212 cc_unmap_req(dev, state, ctx);
1213 }
1214 return rc;
1215}
1216
1217static int cc_mac_final(struct ahash_request *req)
1218{
1219 struct ahash_req_ctx *state = ahash_request_ctx(req);
1220 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1221 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1222 struct device *dev = drvdata_to_dev(ctx->drvdata);
1223 struct cc_crypto_req cc_req = {};
1224 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1225 int idx = 0;
1226 int rc = 0;
1227 u32 key_size, key_len;
1228 u32 digestsize = crypto_ahash_digestsize(tfm);
1229 gfp_t flags = cc_gfp_flags(&req->base);
1230 u32 rem_cnt = *cc_hash_buf_cnt(state);
1231
1232 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1233 key_size = CC_AES_128_BIT_KEY_SIZE;
1234 key_len = CC_AES_128_BIT_KEY_SIZE;
1235 } else {
1236 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1237 ctx->key_params.keylen;
1238 key_len = ctx->key_params.keylen;
1239 }
1240
1241 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1242
1243 if (cc_map_req(dev, state, ctx)) {
1244 dev_err(dev, "map_ahash_source() failed\n");
1245 return -EINVAL;
1246 }
1247
1248 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1249 req->nbytes, 0, flags)) {
1250 dev_err(dev, "map_ahash_request_final() failed\n");
1251 cc_unmap_req(dev, state, ctx);
1252 return -ENOMEM;
1253 }
1254
1255 if (cc_map_result(dev, state, digestsize)) {
1256 dev_err(dev, "map_ahash_digest() failed\n");
1257 cc_unmap_hash_request(dev, state, req->src, true);
1258 cc_unmap_req(dev, state, ctx);
1259 return -ENOMEM;
1260 }
1261
1262 /* Setup request structure */
1263 cc_req.user_cb = (void *)cc_hash_complete;
1264 cc_req.user_arg = (void *)req;
1265
1266 if (state->xcbc_count && rem_cnt == 0) {
1267 /* Load key for ECB decryption */
1268 hw_desc_init(&desc[idx]);
1269 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1270 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1271 set_din_type(&desc[idx], DMA_DLLI,
1272 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1273 key_size, NS_BIT);
1274 set_key_size_aes(&desc[idx], key_len);
1275 set_flow_mode(&desc[idx], S_DIN_to_AES);
1276 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1277 idx++;
1278
1279 /* Initiate decryption of block state to previous
1280 * block_state-XOR-M[n]
1281 */
1282 hw_desc_init(&desc[idx]);
1283 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1284 CC_AES_BLOCK_SIZE, NS_BIT);
1285 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1286 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1287 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1288 idx++;
1289
1290 /* Memory Barrier: wait for axi write to complete */
1291 hw_desc_init(&desc[idx]);
1292 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1293 set_dout_no_dma(&desc[idx], 0, 0, 1);
1294 idx++;
1295 }
1296
1297 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1298 cc_setup_xcbc(req, desc, &idx);
1299 else
1300 cc_setup_cmac(req, desc, &idx);
1301
1302 if (state->xcbc_count == 0) {
1303 hw_desc_init(&desc[idx]);
1304 set_cipher_mode(&desc[idx], ctx->hw_mode);
1305 set_key_size_aes(&desc[idx], key_len);
1306 set_cmac_size0_mode(&desc[idx]);
1307 set_flow_mode(&desc[idx], S_DIN_to_AES);
1308 idx++;
1309 } else if (rem_cnt > 0) {
1310 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1311 } else {
1312 hw_desc_init(&desc[idx]);
1313 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1314 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1315 idx++;
1316 }
1317
1318 /* Get final MAC result */
1319 hw_desc_init(&desc[idx]);
1320 /* TODO */
1321 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322 digestsize, NS_BIT, 1);
1323 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326 set_cipher_mode(&desc[idx], ctx->hw_mode);
1327 idx++;
1328
1329 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330 if (rc != -EINPROGRESS && rc != -EBUSY) {
1331 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332 cc_unmap_hash_request(dev, state, req->src, true);
1333 cc_unmap_result(dev, state, digestsize, req->result);
1334 cc_unmap_req(dev, state, ctx);
1335 }
1336 return rc;
1337}
1338
1339static int cc_mac_finup(struct ahash_request *req)
1340{
1341 struct ahash_req_ctx *state = ahash_request_ctx(req);
1342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1344 struct device *dev = drvdata_to_dev(ctx->drvdata);
1345 struct cc_crypto_req cc_req = {};
1346 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347 int idx = 0;
1348 int rc = 0;
1349 u32 key_len = 0;
1350 u32 digestsize = crypto_ahash_digestsize(tfm);
1351 gfp_t flags = cc_gfp_flags(&req->base);
1352
1353 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354 if (state->xcbc_count > 0 && req->nbytes == 0) {
1355 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356 return cc_mac_final(req);
1357 }
1358
1359 if (cc_map_req(dev, state, ctx)) {
1360 dev_err(dev, "map_ahash_source() failed\n");
1361 return -EINVAL;
1362 }
1363
1364 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365 req->nbytes, 1, flags)) {
1366 dev_err(dev, "map_ahash_request_final() failed\n");
1367 cc_unmap_req(dev, state, ctx);
1368 return -ENOMEM;
1369 }
1370 if (cc_map_result(dev, state, digestsize)) {
1371 dev_err(dev, "map_ahash_digest() failed\n");
1372 cc_unmap_hash_request(dev, state, req->src, true);
1373 cc_unmap_req(dev, state, ctx);
1374 return -ENOMEM;
1375 }
1376
1377 /* Setup request structure */
1378 cc_req.user_cb = (void *)cc_hash_complete;
1379 cc_req.user_arg = (void *)req;
1380
1381 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382 key_len = CC_AES_128_BIT_KEY_SIZE;
1383 cc_setup_xcbc(req, desc, &idx);
1384 } else {
1385 key_len = ctx->key_params.keylen;
1386 cc_setup_cmac(req, desc, &idx);
1387 }
1388
1389 if (req->nbytes == 0) {
1390 hw_desc_init(&desc[idx]);
1391 set_cipher_mode(&desc[idx], ctx->hw_mode);
1392 set_key_size_aes(&desc[idx], key_len);
1393 set_cmac_size0_mode(&desc[idx]);
1394 set_flow_mode(&desc[idx], S_DIN_to_AES);
1395 idx++;
1396 } else {
1397 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398 }
1399
1400 /* Get final MAC result */
1401 hw_desc_init(&desc[idx]);
1402 /* TODO */
1403 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1404 digestsize, NS_BIT, 1);
1405 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1406 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1407 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1408 set_cipher_mode(&desc[idx], ctx->hw_mode);
1409 idx++;
1410
1411 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1412 if (rc != -EINPROGRESS && rc != -EBUSY) {
1413 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1414 cc_unmap_hash_request(dev, state, req->src, true);
1415 cc_unmap_result(dev, state, digestsize, req->result);
1416 cc_unmap_req(dev, state, ctx);
1417 }
1418 return rc;
1419}
1420
1421static int cc_mac_digest(struct ahash_request *req)
1422{
1423 struct ahash_req_ctx *state = ahash_request_ctx(req);
1424 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1425 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1426 struct device *dev = drvdata_to_dev(ctx->drvdata);
1427 u32 digestsize = crypto_ahash_digestsize(tfm);
1428 struct cc_crypto_req cc_req = {};
1429 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1430 u32 key_len;
1431 unsigned int idx = 0;
1432 int rc;
1433 gfp_t flags = cc_gfp_flags(&req->base);
1434
1435 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1436
1437 cc_init_req(dev, state, ctx);
1438
1439 if (cc_map_req(dev, state, ctx)) {
1440 dev_err(dev, "map_ahash_source() failed\n");
1441 return -ENOMEM;
1442 }
1443 if (cc_map_result(dev, state, digestsize)) {
1444 dev_err(dev, "map_ahash_digest() failed\n");
1445 cc_unmap_req(dev, state, ctx);
1446 return -ENOMEM;
1447 }
1448
1449 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1450 req->nbytes, 1, flags)) {
1451 dev_err(dev, "map_ahash_request_final() failed\n");
1452 cc_unmap_req(dev, state, ctx);
1453 return -ENOMEM;
1454 }
1455
1456 /* Setup request structure */
1457 cc_req.user_cb = (void *)cc_digest_complete;
1458 cc_req.user_arg = (void *)req;
1459
1460 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1461 key_len = CC_AES_128_BIT_KEY_SIZE;
1462 cc_setup_xcbc(req, desc, &idx);
1463 } else {
1464 key_len = ctx->key_params.keylen;
1465 cc_setup_cmac(req, desc, &idx);
1466 }
1467
1468 if (req->nbytes == 0) {
1469 hw_desc_init(&desc[idx]);
1470 set_cipher_mode(&desc[idx], ctx->hw_mode);
1471 set_key_size_aes(&desc[idx], key_len);
1472 set_cmac_size0_mode(&desc[idx]);
1473 set_flow_mode(&desc[idx], S_DIN_to_AES);
1474 idx++;
1475 } else {
1476 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1477 }
1478
1479 /* Get final MAC result */
1480 hw_desc_init(&desc[idx]);
1481 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1482 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1483 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1484 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1485 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1487 set_cipher_mode(&desc[idx], ctx->hw_mode);
1488 idx++;
1489
1490 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1491 if (rc != -EINPROGRESS && rc != -EBUSY) {
1492 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1493 cc_unmap_hash_request(dev, state, req->src, true);
1494 cc_unmap_result(dev, state, digestsize, req->result);
1495 cc_unmap_req(dev, state, ctx);
1496 }
1497 return rc;
1498}
1499
1500static int cc_hash_export(struct ahash_request *req, void *out)
1501{
1502 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1503 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1504 struct ahash_req_ctx *state = ahash_request_ctx(req);
1505 u8 *curr_buff = cc_hash_buf(state);
1506 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1507 const u32 tmp = CC_EXPORT_MAGIC;
1508
1509 memcpy(out, &tmp, sizeof(u32));
1510 out += sizeof(u32);
1511
1512 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1513 out += ctx->inter_digestsize;
1514
David Brazdil0f672f62019-12-10 10:32:29 +00001515 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1516 out += ctx->hash_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001517
1518 memcpy(out, &curr_buff_cnt, sizeof(u32));
1519 out += sizeof(u32);
1520
1521 memcpy(out, curr_buff, curr_buff_cnt);
1522
1523 return 0;
1524}
1525
1526static int cc_hash_import(struct ahash_request *req, const void *in)
1527{
1528 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1529 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1530 struct device *dev = drvdata_to_dev(ctx->drvdata);
1531 struct ahash_req_ctx *state = ahash_request_ctx(req);
1532 u32 tmp;
1533
1534 memcpy(&tmp, in, sizeof(u32));
1535 if (tmp != CC_EXPORT_MAGIC)
1536 return -EINVAL;
1537 in += sizeof(u32);
1538
1539 cc_init_req(dev, state, ctx);
1540
1541 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1542 in += ctx->inter_digestsize;
1543
David Brazdil0f672f62019-12-10 10:32:29 +00001544 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1545 in += ctx->hash_len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546
1547 /* Sanity check the data as much as possible */
1548 memcpy(&tmp, in, sizeof(u32));
1549 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1550 return -EINVAL;
1551 in += sizeof(u32);
1552
1553 state->buf_cnt[0] = tmp;
1554 memcpy(state->buffers[0], in, tmp);
1555
1556 return 0;
1557}
1558
1559struct cc_hash_template {
1560 char name[CRYPTO_MAX_ALG_NAME];
1561 char driver_name[CRYPTO_MAX_ALG_NAME];
1562 char mac_name[CRYPTO_MAX_ALG_NAME];
1563 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1564 unsigned int blocksize;
David Brazdil0f672f62019-12-10 10:32:29 +00001565 bool is_mac;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001566 bool synchronize;
1567 struct ahash_alg template_ahash;
1568 int hash_mode;
1569 int hw_mode;
1570 int inter_digestsize;
1571 struct cc_drvdata *drvdata;
1572 u32 min_hw_rev;
David Brazdil0f672f62019-12-10 10:32:29 +00001573 enum cc_std_body std_body;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574};
1575
1576#define CC_STATE_SIZE(_x) \
1577 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578
1579/* hash descriptors */
1580static struct cc_hash_template driver_hash[] = {
1581 //Asynchronize hash template
1582 {
1583 .name = "sha1",
1584 .driver_name = "sha1-ccree",
1585 .mac_name = "hmac(sha1)",
1586 .mac_driver_name = "hmac-sha1-ccree",
1587 .blocksize = SHA1_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001588 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001589 .synchronize = false,
1590 .template_ahash = {
1591 .init = cc_hash_init,
1592 .update = cc_hash_update,
1593 .final = cc_hash_final,
1594 .finup = cc_hash_finup,
1595 .digest = cc_hash_digest,
1596 .export = cc_hash_export,
1597 .import = cc_hash_import,
1598 .setkey = cc_hash_setkey,
1599 .halg = {
1600 .digestsize = SHA1_DIGEST_SIZE,
1601 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1602 },
1603 },
1604 .hash_mode = DRV_HASH_SHA1,
1605 .hw_mode = DRV_HASH_HW_SHA1,
1606 .inter_digestsize = SHA1_DIGEST_SIZE,
1607 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001608 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001609 },
1610 {
1611 .name = "sha256",
1612 .driver_name = "sha256-ccree",
1613 .mac_name = "hmac(sha256)",
1614 .mac_driver_name = "hmac-sha256-ccree",
1615 .blocksize = SHA256_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001616 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001617 .template_ahash = {
1618 .init = cc_hash_init,
1619 .update = cc_hash_update,
1620 .final = cc_hash_final,
1621 .finup = cc_hash_finup,
1622 .digest = cc_hash_digest,
1623 .export = cc_hash_export,
1624 .import = cc_hash_import,
1625 .setkey = cc_hash_setkey,
1626 .halg = {
1627 .digestsize = SHA256_DIGEST_SIZE,
1628 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1629 },
1630 },
1631 .hash_mode = DRV_HASH_SHA256,
1632 .hw_mode = DRV_HASH_HW_SHA256,
1633 .inter_digestsize = SHA256_DIGEST_SIZE,
1634 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001635 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001636 },
1637 {
1638 .name = "sha224",
1639 .driver_name = "sha224-ccree",
1640 .mac_name = "hmac(sha224)",
1641 .mac_driver_name = "hmac-sha224-ccree",
1642 .blocksize = SHA224_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001643 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001644 .template_ahash = {
1645 .init = cc_hash_init,
1646 .update = cc_hash_update,
1647 .final = cc_hash_final,
1648 .finup = cc_hash_finup,
1649 .digest = cc_hash_digest,
1650 .export = cc_hash_export,
1651 .import = cc_hash_import,
1652 .setkey = cc_hash_setkey,
1653 .halg = {
1654 .digestsize = SHA224_DIGEST_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001655 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001656 },
1657 },
1658 .hash_mode = DRV_HASH_SHA224,
1659 .hw_mode = DRV_HASH_HW_SHA256,
1660 .inter_digestsize = SHA256_DIGEST_SIZE,
1661 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001662 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001663 },
1664 {
1665 .name = "sha384",
1666 .driver_name = "sha384-ccree",
1667 .mac_name = "hmac(sha384)",
1668 .mac_driver_name = "hmac-sha384-ccree",
1669 .blocksize = SHA384_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001670 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001671 .template_ahash = {
1672 .init = cc_hash_init,
1673 .update = cc_hash_update,
1674 .final = cc_hash_final,
1675 .finup = cc_hash_finup,
1676 .digest = cc_hash_digest,
1677 .export = cc_hash_export,
1678 .import = cc_hash_import,
1679 .setkey = cc_hash_setkey,
1680 .halg = {
1681 .digestsize = SHA384_DIGEST_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001682 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 },
1684 },
1685 .hash_mode = DRV_HASH_SHA384,
1686 .hw_mode = DRV_HASH_HW_SHA512,
1687 .inter_digestsize = SHA512_DIGEST_SIZE,
1688 .min_hw_rev = CC_HW_REV_712,
David Brazdil0f672f62019-12-10 10:32:29 +00001689 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001690 },
1691 {
1692 .name = "sha512",
1693 .driver_name = "sha512-ccree",
1694 .mac_name = "hmac(sha512)",
1695 .mac_driver_name = "hmac-sha512-ccree",
1696 .blocksize = SHA512_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001697 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001698 .template_ahash = {
1699 .init = cc_hash_init,
1700 .update = cc_hash_update,
1701 .final = cc_hash_final,
1702 .finup = cc_hash_finup,
1703 .digest = cc_hash_digest,
1704 .export = cc_hash_export,
1705 .import = cc_hash_import,
1706 .setkey = cc_hash_setkey,
1707 .halg = {
1708 .digestsize = SHA512_DIGEST_SIZE,
1709 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1710 },
1711 },
1712 .hash_mode = DRV_HASH_SHA512,
1713 .hw_mode = DRV_HASH_HW_SHA512,
1714 .inter_digestsize = SHA512_DIGEST_SIZE,
1715 .min_hw_rev = CC_HW_REV_712,
David Brazdil0f672f62019-12-10 10:32:29 +00001716 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001717 },
1718 {
1719 .name = "md5",
1720 .driver_name = "md5-ccree",
1721 .mac_name = "hmac(md5)",
1722 .mac_driver_name = "hmac-md5-ccree",
1723 .blocksize = MD5_HMAC_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001724 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001725 .template_ahash = {
1726 .init = cc_hash_init,
1727 .update = cc_hash_update,
1728 .final = cc_hash_final,
1729 .finup = cc_hash_finup,
1730 .digest = cc_hash_digest,
1731 .export = cc_hash_export,
1732 .import = cc_hash_import,
1733 .setkey = cc_hash_setkey,
1734 .halg = {
1735 .digestsize = MD5_DIGEST_SIZE,
1736 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1737 },
1738 },
1739 .hash_mode = DRV_HASH_MD5,
1740 .hw_mode = DRV_HASH_HW_MD5,
1741 .inter_digestsize = MD5_DIGEST_SIZE,
1742 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001743 .std_body = CC_STD_NIST,
1744 },
1745 {
1746 .name = "sm3",
1747 .driver_name = "sm3-ccree",
1748 .blocksize = SM3_BLOCK_SIZE,
1749 .is_mac = false,
1750 .template_ahash = {
1751 .init = cc_hash_init,
1752 .update = cc_hash_update,
1753 .final = cc_hash_final,
1754 .finup = cc_hash_finup,
1755 .digest = cc_hash_digest,
1756 .export = cc_hash_export,
1757 .import = cc_hash_import,
1758 .setkey = cc_hash_setkey,
1759 .halg = {
1760 .digestsize = SM3_DIGEST_SIZE,
1761 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1762 },
1763 },
1764 .hash_mode = DRV_HASH_SM3,
1765 .hw_mode = DRV_HASH_HW_SM3,
1766 .inter_digestsize = SM3_DIGEST_SIZE,
1767 .min_hw_rev = CC_HW_REV_713,
1768 .std_body = CC_STD_OSCCA,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001769 },
1770 {
1771 .mac_name = "xcbc(aes)",
1772 .mac_driver_name = "xcbc-aes-ccree",
1773 .blocksize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001774 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001775 .template_ahash = {
1776 .init = cc_hash_init,
1777 .update = cc_mac_update,
1778 .final = cc_mac_final,
1779 .finup = cc_mac_finup,
1780 .digest = cc_mac_digest,
1781 .setkey = cc_xcbc_setkey,
1782 .export = cc_hash_export,
1783 .import = cc_hash_import,
1784 .halg = {
1785 .digestsize = AES_BLOCK_SIZE,
1786 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1787 },
1788 },
1789 .hash_mode = DRV_HASH_NULL,
1790 .hw_mode = DRV_CIPHER_XCBC_MAC,
1791 .inter_digestsize = AES_BLOCK_SIZE,
1792 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001793 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001794 },
1795 {
1796 .mac_name = "cmac(aes)",
1797 .mac_driver_name = "cmac-aes-ccree",
1798 .blocksize = AES_BLOCK_SIZE,
David Brazdil0f672f62019-12-10 10:32:29 +00001799 .is_mac = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001800 .template_ahash = {
1801 .init = cc_hash_init,
1802 .update = cc_mac_update,
1803 .final = cc_mac_final,
1804 .finup = cc_mac_finup,
1805 .digest = cc_mac_digest,
1806 .setkey = cc_cmac_setkey,
1807 .export = cc_hash_export,
1808 .import = cc_hash_import,
1809 .halg = {
1810 .digestsize = AES_BLOCK_SIZE,
1811 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1812 },
1813 },
1814 .hash_mode = DRV_HASH_NULL,
1815 .hw_mode = DRV_CIPHER_CMAC,
1816 .inter_digestsize = AES_BLOCK_SIZE,
1817 .min_hw_rev = CC_HW_REV_630,
David Brazdil0f672f62019-12-10 10:32:29 +00001818 .std_body = CC_STD_NIST,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001819 },
1820};
1821
1822static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1823 struct device *dev, bool keyed)
1824{
1825 struct cc_hash_alg *t_crypto_alg;
1826 struct crypto_alg *alg;
1827 struct ahash_alg *halg;
1828
1829 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1830 if (!t_crypto_alg)
1831 return ERR_PTR(-ENOMEM);
1832
1833 t_crypto_alg->ahash_alg = template->template_ahash;
1834 halg = &t_crypto_alg->ahash_alg;
1835 alg = &halg->halg.base;
1836
1837 if (keyed) {
1838 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1839 template->mac_name);
1840 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1841 template->mac_driver_name);
1842 } else {
1843 halg->setkey = NULL;
1844 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1845 template->name);
1846 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1847 template->driver_name);
1848 }
1849 alg->cra_module = THIS_MODULE;
1850 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1851 alg->cra_priority = CC_CRA_PRIO;
1852 alg->cra_blocksize = template->blocksize;
1853 alg->cra_alignmask = 0;
1854 alg->cra_exit = cc_cra_exit;
1855
1856 alg->cra_init = cc_cra_init;
1857 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1858
1859 t_crypto_alg->hash_mode = template->hash_mode;
1860 t_crypto_alg->hw_mode = template->hw_mode;
1861 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1862
1863 return t_crypto_alg;
1864}
1865
1866int cc_init_hash_sram(struct cc_drvdata *drvdata)
1867{
1868 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1869 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1870 unsigned int larval_seq_len = 0;
1871 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1872 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
David Brazdil0f672f62019-12-10 10:32:29 +00001873 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001874 int rc = 0;
1875
1876 /* Copy-to-sram digest-len */
David Brazdil0f672f62019-12-10 10:32:29 +00001877 cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
1878 ARRAY_SIZE(cc_digest_len_init), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001879 &larval_seq_len);
1880 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1881 if (rc)
1882 goto init_digest_const_err;
1883
David Brazdil0f672f62019-12-10 10:32:29 +00001884 sram_buff_ofs += sizeof(cc_digest_len_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001885 larval_seq_len = 0;
1886
1887 if (large_sha_supported) {
1888 /* Copy-to-sram digest-len for sha384/512 */
David Brazdil0f672f62019-12-10 10:32:29 +00001889 cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
1890 ARRAY_SIZE(cc_digest_len_sha512_init),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001891 larval_seq, &larval_seq_len);
1892 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1893 if (rc)
1894 goto init_digest_const_err;
1895
David Brazdil0f672f62019-12-10 10:32:29 +00001896 sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001897 larval_seq_len = 0;
1898 }
1899
1900 /* The initial digests offset */
1901 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1902
1903 /* Copy-to-sram initial SHA* digests */
David Brazdil0f672f62019-12-10 10:32:29 +00001904 cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001905 larval_seq, &larval_seq_len);
1906 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1907 if (rc)
1908 goto init_digest_const_err;
David Brazdil0f672f62019-12-10 10:32:29 +00001909 sram_buff_ofs += sizeof(cc_md5_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001910 larval_seq_len = 0;
1911
David Brazdil0f672f62019-12-10 10:32:29 +00001912 cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
1913 ARRAY_SIZE(cc_sha1_init), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001914 &larval_seq_len);
1915 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1916 if (rc)
1917 goto init_digest_const_err;
David Brazdil0f672f62019-12-10 10:32:29 +00001918 sram_buff_ofs += sizeof(cc_sha1_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001919 larval_seq_len = 0;
1920
David Brazdil0f672f62019-12-10 10:32:29 +00001921 cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
1922 ARRAY_SIZE(cc_sha224_init), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001923 &larval_seq_len);
1924 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1925 if (rc)
1926 goto init_digest_const_err;
David Brazdil0f672f62019-12-10 10:32:29 +00001927 sram_buff_ofs += sizeof(cc_sha224_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001928 larval_seq_len = 0;
1929
David Brazdil0f672f62019-12-10 10:32:29 +00001930 cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
1931 ARRAY_SIZE(cc_sha256_init), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001932 &larval_seq_len);
1933 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1934 if (rc)
1935 goto init_digest_const_err;
David Brazdil0f672f62019-12-10 10:32:29 +00001936 sram_buff_ofs += sizeof(cc_sha256_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001937 larval_seq_len = 0;
1938
David Brazdil0f672f62019-12-10 10:32:29 +00001939 if (sm3_supported) {
1940 cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
1941 ARRAY_SIZE(cc_sm3_init), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001942 &larval_seq_len);
1943 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1944 if (rc)
1945 goto init_digest_const_err;
David Brazdil0f672f62019-12-10 10:32:29 +00001946 sram_buff_ofs += sizeof(cc_sm3_init);
1947 larval_seq_len = 0;
1948 }
1949
1950 if (large_sha_supported) {
1951 cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
1952 (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
1953 &larval_seq_len);
1954 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1955 if (rc)
1956 goto init_digest_const_err;
1957 sram_buff_ofs += sizeof(cc_sha384_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001958 larval_seq_len = 0;
1959
David Brazdil0f672f62019-12-10 10:32:29 +00001960 cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
1961 (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001962 &larval_seq_len);
1963 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1964 if (rc)
1965 goto init_digest_const_err;
1966 }
1967
1968init_digest_const_err:
1969 return rc;
1970}
1971
1972static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1973{
1974 int i;
1975 u32 tmp;
1976
1977 for (i = 0; i < size; i += 2) {
1978 tmp = buf[i];
1979 buf[i] = buf[i + 1];
1980 buf[i + 1] = tmp;
1981 }
1982}
1983
1984/*
1985 * Due to the way the HW works we need to swap every
1986 * double word in the SHA384 and SHA512 larval hashes
1987 */
1988void __init cc_hash_global_init(void)
1989{
David Brazdil0f672f62019-12-10 10:32:29 +00001990 cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
1991 cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001992}
1993
1994int cc_hash_alloc(struct cc_drvdata *drvdata)
1995{
1996 struct cc_hash_handle *hash_handle;
1997 cc_sram_addr_t sram_buff;
1998 u32 sram_size_to_alloc;
1999 struct device *dev = drvdata_to_dev(drvdata);
2000 int rc = 0;
2001 int alg;
2002
2003 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2004 if (!hash_handle)
2005 return -ENOMEM;
2006
2007 INIT_LIST_HEAD(&hash_handle->hash_list);
2008 drvdata->hash_handle = hash_handle;
2009
David Brazdil0f672f62019-12-10 10:32:29 +00002010 sram_size_to_alloc = sizeof(cc_digest_len_init) +
2011 sizeof(cc_md5_init) +
2012 sizeof(cc_sha1_init) +
2013 sizeof(cc_sha224_init) +
2014 sizeof(cc_sha256_init);
2015
2016 if (drvdata->hw_rev >= CC_HW_REV_713)
2017 sram_size_to_alloc += sizeof(cc_sm3_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002018
2019 if (drvdata->hw_rev >= CC_HW_REV_712)
David Brazdil0f672f62019-12-10 10:32:29 +00002020 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
2021 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002022
2023 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2024 if (sram_buff == NULL_SRAM_ADDR) {
2025 dev_err(dev, "SRAM pool exhausted\n");
2026 rc = -ENOMEM;
2027 goto fail;
2028 }
2029
2030 /* The initial digest-len offset */
2031 hash_handle->digest_len_sram_addr = sram_buff;
2032
2033 /*must be set before the alg registration as it is being used there*/
2034 rc = cc_init_hash_sram(drvdata);
2035 if (rc) {
2036 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2037 goto fail;
2038 }
2039
2040 /* ahash registration */
2041 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2042 struct cc_hash_alg *t_alg;
2043 int hw_mode = driver_hash[alg].hw_mode;
2044
David Brazdil0f672f62019-12-10 10:32:29 +00002045 /* Check that the HW revision and variants are suitable */
2046 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2047 !(drvdata->std_bodies & driver_hash[alg].std_body))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002048 continue;
2049
David Brazdil0f672f62019-12-10 10:32:29 +00002050 if (driver_hash[alg].is_mac) {
2051 /* register hmac version */
2052 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2053 if (IS_ERR(t_alg)) {
2054 rc = PTR_ERR(t_alg);
2055 dev_err(dev, "%s alg allocation failed\n",
2056 driver_hash[alg].driver_name);
2057 goto fail;
2058 }
2059 t_alg->drvdata = drvdata;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002060
David Brazdil0f672f62019-12-10 10:32:29 +00002061 rc = crypto_register_ahash(&t_alg->ahash_alg);
2062 if (rc) {
2063 dev_err(dev, "%s alg registration failed\n",
2064 driver_hash[alg].driver_name);
2065 kfree(t_alg);
2066 goto fail;
2067 } else {
2068 list_add_tail(&t_alg->entry,
2069 &hash_handle->hash_list);
2070 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002071 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002072 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2073 hw_mode == DRV_CIPHER_CMAC)
2074 continue;
2075
2076 /* register hash version */
2077 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2078 if (IS_ERR(t_alg)) {
2079 rc = PTR_ERR(t_alg);
2080 dev_err(dev, "%s alg allocation failed\n",
2081 driver_hash[alg].driver_name);
2082 goto fail;
2083 }
2084 t_alg->drvdata = drvdata;
2085
2086 rc = crypto_register_ahash(&t_alg->ahash_alg);
2087 if (rc) {
2088 dev_err(dev, "%s alg registration failed\n",
2089 driver_hash[alg].driver_name);
2090 kfree(t_alg);
2091 goto fail;
2092 } else {
2093 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2094 }
2095 }
2096
2097 return 0;
2098
2099fail:
2100 kfree(drvdata->hash_handle);
2101 drvdata->hash_handle = NULL;
2102 return rc;
2103}
2104
2105int cc_hash_free(struct cc_drvdata *drvdata)
2106{
2107 struct cc_hash_alg *t_hash_alg, *hash_n;
2108 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2109
2110 if (hash_handle) {
2111 list_for_each_entry_safe(t_hash_alg, hash_n,
2112 &hash_handle->hash_list, entry) {
2113 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2114 list_del(&t_hash_alg->entry);
2115 kfree(t_hash_alg);
2116 }
2117
2118 kfree(hash_handle);
2119 drvdata->hash_handle = NULL;
2120 }
2121 return 0;
2122}
2123
2124static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2125 unsigned int *seq_size)
2126{
2127 unsigned int idx = *seq_size;
2128 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2129 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2130 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2131
2132 /* Setup XCBC MAC K1 */
2133 hw_desc_init(&desc[idx]);
2134 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2135 XCBC_MAC_K1_OFFSET),
2136 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2137 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
David Brazdil0f672f62019-12-10 10:32:29 +00002138 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002139 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2140 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2141 set_flow_mode(&desc[idx], S_DIN_to_AES);
2142 idx++;
2143
2144 /* Setup XCBC MAC K2 */
2145 hw_desc_init(&desc[idx]);
2146 set_din_type(&desc[idx], DMA_DLLI,
2147 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2148 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2149 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2150 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2151 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2152 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2153 set_flow_mode(&desc[idx], S_DIN_to_AES);
2154 idx++;
2155
2156 /* Setup XCBC MAC K3 */
2157 hw_desc_init(&desc[idx]);
2158 set_din_type(&desc[idx], DMA_DLLI,
2159 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2160 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2161 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2162 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2163 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2164 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2165 set_flow_mode(&desc[idx], S_DIN_to_AES);
2166 idx++;
2167
2168 /* Loading MAC state */
2169 hw_desc_init(&desc[idx]);
2170 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2171 CC_AES_BLOCK_SIZE, NS_BIT);
2172 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2173 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2174 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2175 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2176 set_flow_mode(&desc[idx], S_DIN_to_AES);
2177 idx++;
2178 *seq_size = idx;
2179}
2180
2181static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2182 unsigned int *seq_size)
2183{
2184 unsigned int idx = *seq_size;
2185 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2186 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2187 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2188
2189 /* Setup CMAC Key */
2190 hw_desc_init(&desc[idx]);
2191 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2192 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2193 ctx->key_params.keylen), NS_BIT);
2194 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2195 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2196 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2197 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2198 set_flow_mode(&desc[idx], S_DIN_to_AES);
2199 idx++;
2200
2201 /* Load MAC state */
2202 hw_desc_init(&desc[idx]);
2203 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2204 CC_AES_BLOCK_SIZE, NS_BIT);
2205 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2206 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2207 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2208 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2209 set_flow_mode(&desc[idx], S_DIN_to_AES);
2210 idx++;
2211 *seq_size = idx;
2212}
2213
2214static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2215 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2216 struct cc_hw_desc desc[], bool is_not_last_data,
2217 unsigned int *seq_size)
2218{
2219 unsigned int idx = *seq_size;
2220 struct device *dev = drvdata_to_dev(ctx->drvdata);
2221
2222 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2223 hw_desc_init(&desc[idx]);
2224 set_din_type(&desc[idx], DMA_DLLI,
2225 sg_dma_address(areq_ctx->curr_sg),
2226 areq_ctx->curr_sg->length, NS_BIT);
2227 set_flow_mode(&desc[idx], flow_mode);
2228 idx++;
2229 } else {
2230 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2231 dev_dbg(dev, " NULL mode\n");
2232 /* nothing to build */
2233 return;
2234 }
2235 /* bypass */
2236 hw_desc_init(&desc[idx]);
2237 set_din_type(&desc[idx], DMA_DLLI,
2238 areq_ctx->mlli_params.mlli_dma_addr,
2239 areq_ctx->mlli_params.mlli_len, NS_BIT);
2240 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2241 areq_ctx->mlli_params.mlli_len);
2242 set_flow_mode(&desc[idx], BYPASS);
2243 idx++;
2244 /* process */
2245 hw_desc_init(&desc[idx]);
2246 set_din_type(&desc[idx], DMA_MLLI,
2247 ctx->drvdata->mlli_sram_addr,
2248 areq_ctx->mlli_nents, NS_BIT);
2249 set_flow_mode(&desc[idx], flow_mode);
2250 idx++;
2251 }
2252 if (is_not_last_data)
2253 set_din_not_last_indication(&desc[(idx - 1)]);
2254 /* return updated desc sequence size */
2255 *seq_size = idx;
2256}
2257
2258static const void *cc_larval_digest(struct device *dev, u32 mode)
2259{
2260 switch (mode) {
2261 case DRV_HASH_MD5:
David Brazdil0f672f62019-12-10 10:32:29 +00002262 return cc_md5_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002263 case DRV_HASH_SHA1:
David Brazdil0f672f62019-12-10 10:32:29 +00002264 return cc_sha1_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002265 case DRV_HASH_SHA224:
David Brazdil0f672f62019-12-10 10:32:29 +00002266 return cc_sha224_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002267 case DRV_HASH_SHA256:
David Brazdil0f672f62019-12-10 10:32:29 +00002268 return cc_sha256_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002269 case DRV_HASH_SHA384:
David Brazdil0f672f62019-12-10 10:32:29 +00002270 return cc_sha384_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002271 case DRV_HASH_SHA512:
David Brazdil0f672f62019-12-10 10:32:29 +00002272 return cc_sha512_init;
2273 case DRV_HASH_SM3:
2274 return cc_sm3_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002275 default:
2276 dev_err(dev, "Invalid hash mode (%d)\n", mode);
David Brazdil0f672f62019-12-10 10:32:29 +00002277 return cc_md5_init;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 }
2279}
2280
2281/*!
2282 * Gets the address of the initial digest in SRAM
2283 * according to the given hash mode
2284 *
2285 * \param drvdata
2286 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2287 *
2288 * \return u32 The address of the initial digest in SRAM
2289 */
2290cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2291{
2292 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2293 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2294 struct device *dev = drvdata_to_dev(_drvdata);
David Brazdil0f672f62019-12-10 10:32:29 +00002295 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2296 cc_sram_addr_t addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002297
2298 switch (mode) {
2299 case DRV_HASH_NULL:
2300 break; /*Ignore*/
2301 case DRV_HASH_MD5:
2302 return (hash_handle->larval_digest_sram_addr);
2303 case DRV_HASH_SHA1:
2304 return (hash_handle->larval_digest_sram_addr +
David Brazdil0f672f62019-12-10 10:32:29 +00002305 sizeof(cc_md5_init));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002306 case DRV_HASH_SHA224:
2307 return (hash_handle->larval_digest_sram_addr +
David Brazdil0f672f62019-12-10 10:32:29 +00002308 sizeof(cc_md5_init) +
2309 sizeof(cc_sha1_init));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002310 case DRV_HASH_SHA256:
2311 return (hash_handle->larval_digest_sram_addr +
David Brazdil0f672f62019-12-10 10:32:29 +00002312 sizeof(cc_md5_init) +
2313 sizeof(cc_sha1_init) +
2314 sizeof(cc_sha224_init));
2315 case DRV_HASH_SM3:
2316 return (hash_handle->larval_digest_sram_addr +
2317 sizeof(cc_md5_init) +
2318 sizeof(cc_sha1_init) +
2319 sizeof(cc_sha224_init) +
2320 sizeof(cc_sha256_init));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002321 case DRV_HASH_SHA384:
David Brazdil0f672f62019-12-10 10:32:29 +00002322 addr = (hash_handle->larval_digest_sram_addr +
2323 sizeof(cc_md5_init) +
2324 sizeof(cc_sha1_init) +
2325 sizeof(cc_sha224_init) +
2326 sizeof(cc_sha256_init));
2327 if (sm3_supported)
2328 addr += sizeof(cc_sm3_init);
2329 return addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002330 case DRV_HASH_SHA512:
David Brazdil0f672f62019-12-10 10:32:29 +00002331 addr = (hash_handle->larval_digest_sram_addr +
2332 sizeof(cc_md5_init) +
2333 sizeof(cc_sha1_init) +
2334 sizeof(cc_sha224_init) +
2335 sizeof(cc_sha256_init) +
2336 sizeof(cc_sha384_init));
2337 if (sm3_supported)
2338 addr += sizeof(cc_sm3_init);
2339 return addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002340 default:
2341 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2342 }
2343
2344 /*This is valid wrong value to avoid kernel crash*/
2345 return hash_handle->larval_digest_sram_addr;
2346}
2347
2348cc_sram_addr_t
2349cc_digest_len_addr(void *drvdata, u32 mode)
2350{
2351 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2352 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2353 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2354
2355 switch (mode) {
2356 case DRV_HASH_SHA1:
2357 case DRV_HASH_SHA224:
2358 case DRV_HASH_SHA256:
2359 case DRV_HASH_MD5:
2360 return digest_len_addr;
2361#if (CC_DEV_SHA_MAX > 256)
2362 case DRV_HASH_SHA384:
2363 case DRV_HASH_SHA512:
David Brazdil0f672f62019-12-10 10:32:29 +00002364 return digest_len_addr + sizeof(cc_digest_len_init);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002365#endif
2366 default:
2367 return digest_len_addr; /*to avoid kernel crash*/
2368 }
2369}