blob: 892823ef4a019c90870e5bf6523e3c60a9359b46 [file] [log] [blame]
Olivier Deprez157378f2022-04-04 15:47:50 +02001
David Brazdil0f672f62019-12-10 10:32:29 +00002/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003/*
4 * Copyright 2016 Broadcom
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6
7#ifndef _CIPHER_H
8#define _CIPHER_H
9
10#include <linux/atomic.h>
11#include <linux/mailbox/brcm-message.h>
12#include <linux/mailbox_client.h>
13#include <crypto/aes.h>
14#include <crypto/internal/hash.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020015#include <crypto/internal/skcipher.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016#include <crypto/aead.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <crypto/arc4.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <crypto/gcm.h>
19#include <crypto/sha.h>
20#include <crypto/sha3.h>
21
22#include "spu.h"
23#include "spum.h"
24#include "spu2.h"
25
26/* Driver supports up to MAX_SPUS SPU blocks */
27#define MAX_SPUS 16
28
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029#define ARC4_STATE_SIZE 4
30
31#define CCM_AES_IV_SIZE 16
32#define CCM_ESP_IV_SIZE 8
33#define RFC4543_ICV_SIZE 16
34
35#define MAX_KEY_SIZE ARC4_MAX_KEY_SIZE
36#define MAX_IV_SIZE AES_BLOCK_SIZE
37#define MAX_DIGEST_SIZE SHA3_512_DIGEST_SIZE
38#define MAX_ASSOC_SIZE 512
39
40/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */
41#define GCM_ESP_SALT_SIZE 4
42#define CCM_ESP_SALT_SIZE 3
43#define MAX_SALT_SIZE GCM_ESP_SALT_SIZE
44#define GCM_ESP_SALT_OFFSET 0
45#define CCM_ESP_SALT_OFFSET 1
46
47#define GCM_ESP_DIGESTSIZE 16
48
49#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
50
51/*
52 * Maximum number of bytes from a non-final hash request that can be deferred
53 * until more data is available. With new crypto API framework, this
54 * can be no more than one block of data.
55 */
56#define HASH_CARRY_MAX MAX_HASH_BLOCK_SIZE
57
58/* Force at least 4-byte alignment of all SPU message fields */
59#define SPU_MSG_ALIGN 4
60
61/* Number of times to resend mailbox message if mb queue is full */
62#define SPU_MB_RETRY_MAX 1000
63
64/* op_counts[] indexes */
65enum op_type {
66 SPU_OP_CIPHER,
67 SPU_OP_HASH,
68 SPU_OP_HMAC,
69 SPU_OP_AEAD,
70 SPU_OP_NUM
71};
72
73enum spu_spu_type {
74 SPU_TYPE_SPUM,
75 SPU_TYPE_SPU2,
76};
77
78/*
79 * SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus,
80 * respectively.
81 */
82enum spu_spu_subtype {
83 SPU_SUBTYPE_SPUM_NS2,
84 SPU_SUBTYPE_SPUM_NSP,
85 SPU_SUBTYPE_SPU2_V1,
86 SPU_SUBTYPE_SPU2_V2
87};
88
89struct spu_type_subtype {
90 enum spu_spu_type type;
91 enum spu_spu_subtype subtype;
92};
93
94struct cipher_op {
95 enum spu_cipher_alg alg;
96 enum spu_cipher_mode mode;
97};
98
99struct auth_op {
100 enum hash_alg alg;
101 enum hash_mode mode;
102};
103
104struct iproc_alg_s {
105 u32 type;
106 union {
Olivier Deprez157378f2022-04-04 15:47:50 +0200107 struct skcipher_alg skcipher;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108 struct ahash_alg hash;
109 struct aead_alg aead;
110 } alg;
111 struct cipher_op cipher_info;
112 struct auth_op auth_info;
113 bool auth_first;
114 bool registered;
115};
116
117/*
118 * Buffers for a SPU request/reply message pair. All part of one structure to
119 * allow a single alloc per request.
120 */
121struct spu_msg_buf {
122 /* Request message fragments */
123
124 /*
125 * SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC,
126 * and BD header. For SPU2, holds FMD, OMD.
127 */
128 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
129
130 /* IV or counter. Size to include salt. Also used for XTS tweek. */
131 u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
132
133 /* Hash digest. request and response. */
134 u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)];
135
136 /* SPU request message padding */
137 u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
138
139 /* SPU-M request message STATUS field */
140 u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)];
141
142 /* Response message fragments */
143
144 /* SPU response message header */
145 u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
146
147 /* SPU response message STATUS field padding */
148 u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)];
149
150 /* SPU response message STATUS field */
151 u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
152
153 union {
Olivier Deprez157378f2022-04-04 15:47:50 +0200154 /* Buffers only used for skcipher */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155 struct {
156 /*
157 * Field used for either SUPDT when RC4 is used
158 * -OR- tweak value when XTS/AES is used
159 */
160 u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)];
161 } c;
162
163 /* Buffers only used for aead */
164 struct {
165 /* SPU response pad for GCM data */
166 u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
167
168 /* SPU request msg padding for GCM AAD */
169 u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
170
171 /* SPU response data to be discarded */
172 u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE,
173 SPU_MSG_ALIGN)];
174 } a;
175 };
176};
177
178struct iproc_ctx_s {
179 u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
180 unsigned int enckeylen;
181
182 u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
183 unsigned int authkeylen;
184
185 u8 salt[MAX_SALT_SIZE];
186 unsigned int salt_len;
187 unsigned int salt_offset;
188 u8 iv[MAX_IV_SIZE];
189
190 unsigned int digestsize;
191
192 struct iproc_alg_s *alg;
193 bool is_esp;
194
195 struct cipher_op cipher;
196 enum spu_cipher_type cipher_type;
197
198 struct auth_op auth;
199 bool auth_first;
200
201 /*
202 * The maximum length in bytes of the payload in a SPU message for this
203 * context. For SPU-M, the payload is the combination of AAD and data.
204 * For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF
205 * indicates that there is no limit to the length of the SPU message
206 * payload.
207 */
208 unsigned int max_payload;
209
210 struct crypto_aead *fallback_cipher;
211
212 /* auth_type is determined during processing of request */
213
214 u8 ipad[MAX_HASH_BLOCK_SIZE];
215 u8 opad[MAX_HASH_BLOCK_SIZE];
216
217 /*
218 * Buffer to hold SPU message header template. Template is created at
Olivier Deprez157378f2022-04-04 15:47:50 +0200219 * setkey time for skcipher requests, since most of the fields in the
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 * header are known at that time. At request time, just fill in a few
221 * missing pieces related to length of data in the request and IVs, etc.
222 */
223 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
224
225 /* Length of SPU request header */
226 u16 spu_req_hdr_len;
227
228 /* Expected length of SPU response header */
229 u16 spu_resp_hdr_len;
230
231 /*
232 * shash descriptor - needed to perform incremental hashing in
233 * in software, when hw doesn't support it.
234 */
235 struct shash_desc *shash;
236
237 bool is_rfc4543; /* RFC 4543 style of GMAC */
238};
239
240/* state from iproc_reqctx_s necessary for hash state export/import */
241struct spu_hash_export_s {
242 unsigned int total_todo;
243 unsigned int total_sent;
244 u8 hash_carry[HASH_CARRY_MAX];
245 unsigned int hash_carry_len;
246 u8 incr_hash[MAX_DIGEST_SIZE];
247 bool is_sw_hmac;
248};
249
250struct iproc_reqctx_s {
251 /* general context */
252 struct crypto_async_request *parent;
253
254 /* only valid after enqueue() */
255 struct iproc_ctx_s *ctx;
256
257 u8 chan_idx; /* Mailbox channel to be used to submit this request */
258
259 /* total todo, rx'd, and sent for this request */
260 unsigned int total_todo;
Olivier Deprez157378f2022-04-04 15:47:50 +0200261 unsigned int total_received; /* only valid for skcipher */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 unsigned int total_sent;
263
264 /*
265 * num bytes sent to hw from the src sg in this request. This can differ
266 * from total_sent for incremental hashing. total_sent includes previous
267 * init() and update() data. src_sent does not.
268 */
269 unsigned int src_sent;
270
271 /*
272 * For AEAD requests, start of associated data. This will typically
273 * point to the beginning of the src scatterlist from the request,
274 * since assoc data is at the beginning of the src scatterlist rather
275 * than in its own sg.
276 */
277 struct scatterlist *assoc;
278
279 /*
280 * scatterlist entry and offset to start of data for next chunk. Crypto
281 * API src scatterlist for AEAD starts with AAD, if present. For first
282 * chunk, src_sg is sg entry at beginning of input data (after AAD).
283 * src_skip begins at the offset in that sg entry where data begins.
284 */
285 struct scatterlist *src_sg;
286 int src_nents; /* Number of src entries with data */
287 u32 src_skip; /* bytes of current sg entry already used */
288
289 /*
290 * Same for destination. For AEAD, if there is AAD, output data must
291 * be written at offset following AAD.
292 */
293 struct scatterlist *dst_sg;
294 int dst_nents; /* Number of dst entries with data */
295 u32 dst_skip; /* bytes of current sg entry already written */
296
297 /* Mailbox message used to send this request to PDC driver */
298 struct brcm_message mb_mssg;
299
300 bool bd_suppress; /* suppress BD field in SPU response? */
301
302 /* cipher context */
303 bool is_encrypt;
304
305 /*
306 * CBC mode: IV. CTR mode: counter. Else empty. Used as a DMA
307 * buffer for AEAD requests. So allocate as DMAable memory. If IV
308 * concatenated with salt, includes the salt.
309 */
310 u8 *iv_ctr;
311 /* Length of IV or counter, in bytes */
312 unsigned int iv_ctr_len;
313
314 /*
315 * Hash requests can be of any size, whether initial, update, or final.
316 * A non-final request must be submitted to the SPU as an integral
317 * number of blocks. This may leave data at the end of the request
318 * that is not a full block. Since the request is non-final, it cannot
319 * be padded. So, we write the remainder to this hash_carry buffer and
320 * hold it until the next request arrives. The carry data is then
321 * submitted at the beginning of the data in the next SPU msg.
322 * hash_carry_len is the number of bytes currently in hash_carry. These
323 * fields are only used for ahash requests.
324 */
325 u8 hash_carry[HASH_CARRY_MAX];
326 unsigned int hash_carry_len;
327 unsigned int is_final; /* is this the final for the hash op? */
328
329 /*
330 * Digest from incremental hash is saved here to include in next hash
331 * operation. Cannot be stored in req->result for truncated hashes,
332 * since result may be sized for final digest. Cannot be saved in
333 * msg_buf because that gets deleted between incremental hash ops
334 * and is not saved as part of export().
335 */
336 u8 incr_hash[MAX_DIGEST_SIZE];
337
338 /* hmac context */
339 bool is_sw_hmac;
340
341 /* aead context */
342 struct crypto_tfm *old_tfm;
343 crypto_completion_t old_complete;
344 void *old_data;
345
346 gfp_t gfp;
347
348 /* Buffers used to build SPU request and response messages */
349 struct spu_msg_buf msg_buf;
350};
351
352/*
353 * Structure encapsulates a set of function pointers specific to the type of
354 * SPU hardware running. These functions handling creation and parsing of
355 * SPU request messages and SPU response messages. Includes hardware-specific
356 * values read from device tree.
357 */
358struct spu_hw {
359 void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len);
360 u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg,
361 enum spu_cipher_mode cipher_mode,
362 unsigned int blocksize);
363 u32 (*spu_payload_length)(u8 *spu_hdr);
364 u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len,
365 bool is_hash);
366 u16 (*spu_hash_pad_len)(enum hash_alg hash_alg,
367 enum hash_mode hash_mode, u32 chunksize,
368 u16 hash_block_size);
369 u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode,
370 unsigned int data_size);
371 u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode,
372 unsigned int assoc_len,
373 unsigned int iv_len, bool is_encrypt);
374 u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode,
375 u16 iv_len);
376 enum hash_type (*spu_hash_type)(u32 src_sent);
377 u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg,
378 enum hash_type);
379 u32 (*spu_create_request)(u8 *spu_hdr,
380 struct spu_request_opts *req_opts,
381 struct spu_cipher_parms *cipher_parms,
382 struct spu_hash_parms *hash_parms,
383 struct spu_aead_parms *aead_parms,
384 unsigned int data_size);
385 u16 (*spu_cipher_req_init)(u8 *spu_hdr,
386 struct spu_cipher_parms *cipher_parms);
387 void (*spu_cipher_req_finish)(u8 *spu_hdr,
388 u16 spu_req_hdr_len,
389 unsigned int is_inbound,
390 struct spu_cipher_parms *cipher_parms,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 unsigned int data_size);
392 void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
393 u32 hash_pad_len, enum hash_alg auth_alg,
394 enum hash_mode auth_mode,
395 unsigned int total_sent, u32 status_padding);
396 u8 (*spu_xts_tweak_in_payload)(void);
397 u8 (*spu_tx_status_len)(void);
398 u8 (*spu_rx_status_len)(void);
399 int (*spu_status_process)(u8 *statp);
400 void (*spu_ccm_update_iv)(unsigned int digestsize,
401 struct spu_cipher_parms *cipher_parms,
402 unsigned int assoclen, unsigned int chunksize,
403 bool is_encrypt, bool is_esp);
404 u32 (*spu_wordalign_padlen)(u32 data_size);
405
406 /* The base virtual address of the SPU hw registers */
407 void __iomem *reg_vbase[MAX_SPUS];
408
409 /* Version of the SPU hardware */
410 enum spu_spu_type spu_type;
411
412 /* Sub-version of the SPU hardware */
413 enum spu_spu_subtype spu_subtype;
414
415 /* The number of SPUs on this platform */
416 u32 num_spu;
417
418 /* The number of SPU channels on this platform */
419 u32 num_chan;
420};
421
Olivier Deprez0e641232021-09-23 10:07:05 +0200422struct bcm_device_private {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423 struct platform_device *pdev;
424
425 struct spu_hw spu;
426
427 atomic_t session_count; /* number of streams active */
428 atomic_t stream_count; /* monotonic counter for streamID's */
429
430 /* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */
431 u8 bcm_hdr_len;
432
433 /* The index of the channel to use for the next crypto request */
434 atomic_t next_chan;
435
436 struct dentry *debugfs_dir;
437 struct dentry *debugfs_stats;
438
439 /* Number of request bytes processed and result bytes returned */
440 atomic64_t bytes_in;
441 atomic64_t bytes_out;
442
443 /* Number of operations of each type */
444 atomic_t op_counts[SPU_OP_NUM];
445
446 atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST];
447 atomic_t hash_cnt[HASH_ALG_LAST];
448 atomic_t hmac_cnt[HASH_ALG_LAST];
449 atomic_t aead_cnt[AEAD_TYPE_LAST];
450
451 /* Number of calls to setkey() for each operation type */
452 atomic_t setkey_cnt[SPU_OP_NUM];
453
454 /* Number of times request was resubmitted because mb was full */
455 atomic_t mb_no_spc;
456
457 /* Number of mailbox send failures */
458 atomic_t mb_send_fail;
459
460 /* Number of ICV check failures for AEAD messages */
461 atomic_t bad_icv;
462
463 struct mbox_client mcl;
464
465 /* Array of mailbox channel pointers, one for each channel */
466 struct mbox_chan **mbox;
467};
468
Olivier Deprez0e641232021-09-23 10:07:05 +0200469extern struct bcm_device_private iproc_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470
471#endif