David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Cryptographic API. |
| 4 | * |
| 5 | * Support for OMAP AES HW acceleration. |
| 6 | * |
| 7 | * Copyright (c) 2010 Nokia Corporation |
| 8 | * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> |
| 9 | * Copyright (c) 2011 Texas Instruments Incorporated |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #define pr_fmt(fmt) "%20s: " fmt, __func__ |
| 13 | #define prn(num) pr_debug(#num "=%d\n", num) |
| 14 | #define prx(num) pr_debug(#num "=%x\n", num) |
| 15 | |
| 16 | #include <linux/err.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/scatterlist.h> |
| 23 | #include <linux/dma-mapping.h> |
| 24 | #include <linux/dmaengine.h> |
| 25 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/of.h> |
| 27 | #include <linux/of_device.h> |
| 28 | #include <linux/of_address.h> |
| 29 | #include <linux/io.h> |
| 30 | #include <linux/crypto.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <crypto/scatterwalk.h> |
| 33 | #include <crypto/aes.h> |
| 34 | #include <crypto/gcm.h> |
| 35 | #include <crypto/engine.h> |
| 36 | #include <crypto/internal/skcipher.h> |
| 37 | #include <crypto/internal/aead.h> |
| 38 | |
| 39 | #include "omap-crypto.h" |
| 40 | #include "omap-aes.h" |
| 41 | |
| 42 | /* keep registered devices data here */ |
| 43 | static LIST_HEAD(dev_list); |
| 44 | static DEFINE_SPINLOCK(list_lock); |
| 45 | |
| 46 | static int aes_fallback_sz = 200; |
| 47 | |
| 48 | #ifdef DEBUG |
| 49 | #define omap_aes_read(dd, offset) \ |
| 50 | ({ \ |
| 51 | int _read_ret; \ |
| 52 | _read_ret = __raw_readl(dd->io_base + offset); \ |
| 53 | pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ |
| 54 | offset, _read_ret); \ |
| 55 | _read_ret; \ |
| 56 | }) |
| 57 | #else |
| 58 | inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) |
| 59 | { |
| 60 | return __raw_readl(dd->io_base + offset); |
| 61 | } |
| 62 | #endif |
| 63 | |
| 64 | #ifdef DEBUG |
| 65 | #define omap_aes_write(dd, offset, value) \ |
| 66 | do { \ |
| 67 | pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ |
| 68 | offset, value); \ |
| 69 | __raw_writel(value, dd->io_base + offset); \ |
| 70 | } while (0) |
| 71 | #else |
| 72 | inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, |
| 73 | u32 value) |
| 74 | { |
| 75 | __raw_writel(value, dd->io_base + offset); |
| 76 | } |
| 77 | #endif |
| 78 | |
| 79 | static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, |
| 80 | u32 value, u32 mask) |
| 81 | { |
| 82 | u32 val; |
| 83 | |
| 84 | val = omap_aes_read(dd, offset); |
| 85 | val &= ~mask; |
| 86 | val |= value; |
| 87 | omap_aes_write(dd, offset, val); |
| 88 | } |
| 89 | |
| 90 | static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, |
| 91 | u32 *value, int count) |
| 92 | { |
| 93 | for (; count--; value++, offset += 4) |
| 94 | omap_aes_write(dd, offset, *value); |
| 95 | } |
| 96 | |
| 97 | static int omap_aes_hw_init(struct omap_aes_dev *dd) |
| 98 | { |
| 99 | int err; |
| 100 | |
| 101 | if (!(dd->flags & FLAGS_INIT)) { |
| 102 | dd->flags |= FLAGS_INIT; |
| 103 | dd->err = 0; |
| 104 | } |
| 105 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 106 | err = pm_runtime_resume_and_get(dd->dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | if (err < 0) { |
| 108 | dev_err(dd->dev, "failed to get sync: %d\n", err); |
| 109 | return err; |
| 110 | } |
| 111 | |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | void omap_aes_clear_copy_flags(struct omap_aes_dev *dd) |
| 116 | { |
| 117 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT); |
| 118 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT); |
| 119 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT); |
| 120 | } |
| 121 | |
| 122 | int omap_aes_write_ctrl(struct omap_aes_dev *dd) |
| 123 | { |
| 124 | struct omap_aes_reqctx *rctx; |
| 125 | unsigned int key32; |
| 126 | int i, err; |
| 127 | u32 val; |
| 128 | |
| 129 | err = omap_aes_hw_init(dd); |
| 130 | if (err) |
| 131 | return err; |
| 132 | |
| 133 | key32 = dd->ctx->keylen / sizeof(u32); |
| 134 | |
| 135 | /* RESET the key as previous HASH keys should not get affected*/ |
| 136 | if (dd->flags & FLAGS_GCM) |
| 137 | for (i = 0; i < 0x40; i = i + 4) |
| 138 | omap_aes_write(dd, i, 0x0); |
| 139 | |
| 140 | for (i = 0; i < key32; i++) { |
| 141 | omap_aes_write(dd, AES_REG_KEY(dd, i), |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 142 | (__force u32)cpu_to_le32(dd->ctx->key[i])); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 143 | } |
| 144 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 145 | if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) |
| 146 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | |
| 148 | if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) { |
| 149 | rctx = aead_request_ctx(dd->aead_req); |
| 150 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4); |
| 151 | } |
| 152 | |
| 153 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); |
| 154 | if (dd->flags & FLAGS_CBC) |
| 155 | val |= AES_REG_CTRL_CBC; |
| 156 | |
| 157 | if (dd->flags & (FLAGS_CTR | FLAGS_GCM)) |
| 158 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; |
| 159 | |
| 160 | if (dd->flags & FLAGS_GCM) |
| 161 | val |= AES_REG_CTRL_GCM; |
| 162 | |
| 163 | if (dd->flags & FLAGS_ENCRYPT) |
| 164 | val |= AES_REG_CTRL_DIRECTION; |
| 165 | |
| 166 | omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK); |
| 167 | |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) |
| 172 | { |
| 173 | u32 mask, val; |
| 174 | |
| 175 | val = dd->pdata->dma_start; |
| 176 | |
| 177 | if (dd->dma_lch_out != NULL) |
| 178 | val |= dd->pdata->dma_enable_out; |
| 179 | if (dd->dma_lch_in != NULL) |
| 180 | val |= dd->pdata->dma_enable_in; |
| 181 | |
| 182 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | |
| 183 | dd->pdata->dma_start; |
| 184 | |
| 185 | omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); |
| 186 | |
| 187 | } |
| 188 | |
| 189 | static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) |
| 190 | { |
| 191 | omap_aes_write(dd, AES_REG_LENGTH_N(0), length); |
| 192 | omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); |
| 193 | if (dd->flags & FLAGS_GCM) |
| 194 | omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len); |
| 195 | |
| 196 | omap_aes_dma_trigger_omap2(dd, length); |
| 197 | } |
| 198 | |
| 199 | static void omap_aes_dma_stop(struct omap_aes_dev *dd) |
| 200 | { |
| 201 | u32 mask; |
| 202 | |
| 203 | mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | |
| 204 | dd->pdata->dma_start; |
| 205 | |
| 206 | omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); |
| 207 | } |
| 208 | |
| 209 | struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx) |
| 210 | { |
| 211 | struct omap_aes_dev *dd; |
| 212 | |
| 213 | spin_lock_bh(&list_lock); |
| 214 | dd = list_first_entry(&dev_list, struct omap_aes_dev, list); |
| 215 | list_move_tail(&dd->list, &dev_list); |
| 216 | rctx->dd = dd; |
| 217 | spin_unlock_bh(&list_lock); |
| 218 | |
| 219 | return dd; |
| 220 | } |
| 221 | |
| 222 | static void omap_aes_dma_out_callback(void *data) |
| 223 | { |
| 224 | struct omap_aes_dev *dd = data; |
| 225 | |
| 226 | /* dma_lch_out - completed */ |
| 227 | tasklet_schedule(&dd->done_task); |
| 228 | } |
| 229 | |
| 230 | static int omap_aes_dma_init(struct omap_aes_dev *dd) |
| 231 | { |
| 232 | int err; |
| 233 | |
| 234 | dd->dma_lch_out = NULL; |
| 235 | dd->dma_lch_in = NULL; |
| 236 | |
| 237 | dd->dma_lch_in = dma_request_chan(dd->dev, "rx"); |
| 238 | if (IS_ERR(dd->dma_lch_in)) { |
| 239 | dev_err(dd->dev, "Unable to request in DMA channel\n"); |
| 240 | return PTR_ERR(dd->dma_lch_in); |
| 241 | } |
| 242 | |
| 243 | dd->dma_lch_out = dma_request_chan(dd->dev, "tx"); |
| 244 | if (IS_ERR(dd->dma_lch_out)) { |
| 245 | dev_err(dd->dev, "Unable to request out DMA channel\n"); |
| 246 | err = PTR_ERR(dd->dma_lch_out); |
| 247 | goto err_dma_out; |
| 248 | } |
| 249 | |
| 250 | return 0; |
| 251 | |
| 252 | err_dma_out: |
| 253 | dma_release_channel(dd->dma_lch_in); |
| 254 | |
| 255 | return err; |
| 256 | } |
| 257 | |
| 258 | static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) |
| 259 | { |
| 260 | if (dd->pio_only) |
| 261 | return; |
| 262 | |
| 263 | dma_release_channel(dd->dma_lch_out); |
| 264 | dma_release_channel(dd->dma_lch_in); |
| 265 | } |
| 266 | |
| 267 | static int omap_aes_crypt_dma(struct omap_aes_dev *dd, |
| 268 | struct scatterlist *in_sg, |
| 269 | struct scatterlist *out_sg, |
| 270 | int in_sg_len, int out_sg_len) |
| 271 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 272 | struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 273 | struct dma_slave_config cfg; |
| 274 | int ret; |
| 275 | |
| 276 | if (dd->pio_only) { |
| 277 | scatterwalk_start(&dd->in_walk, dd->in_sg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 278 | if (out_sg_len) |
| 279 | scatterwalk_start(&dd->out_walk, dd->out_sg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | |
| 281 | /* Enable DATAIN interrupt and let it take |
| 282 | care of the rest */ |
| 283 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); |
| 288 | |
| 289 | memset(&cfg, 0, sizeof(cfg)); |
| 290 | |
| 291 | cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); |
| 292 | cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); |
| 293 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 294 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 295 | cfg.src_maxburst = DST_MAXBURST; |
| 296 | cfg.dst_maxburst = DST_MAXBURST; |
| 297 | |
| 298 | /* IN */ |
| 299 | ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); |
| 300 | if (ret) { |
| 301 | dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", |
| 302 | ret); |
| 303 | return ret; |
| 304 | } |
| 305 | |
| 306 | tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, |
| 307 | DMA_MEM_TO_DEV, |
| 308 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 309 | if (!tx_in) { |
| 310 | dev_err(dd->dev, "IN prep_slave_sg() failed\n"); |
| 311 | return -EINVAL; |
| 312 | } |
| 313 | |
| 314 | /* No callback necessary */ |
| 315 | tx_in->callback_param = dd; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 316 | tx_in->callback = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 317 | |
| 318 | /* OUT */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 319 | if (out_sg_len) { |
| 320 | ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); |
| 321 | if (ret) { |
| 322 | dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", |
| 323 | ret); |
| 324 | return ret; |
| 325 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 327 | tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, |
| 328 | out_sg_len, |
| 329 | DMA_DEV_TO_MEM, |
| 330 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 331 | if (!tx_out) { |
| 332 | dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); |
| 333 | return -EINVAL; |
| 334 | } |
| 335 | |
| 336 | cb_desc = tx_out; |
| 337 | } else { |
| 338 | cb_desc = tx_in; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | if (dd->flags & FLAGS_GCM) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 342 | cb_desc->callback = omap_aes_gcm_dma_out_callback; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 343 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 344 | cb_desc->callback = omap_aes_dma_out_callback; |
| 345 | cb_desc->callback_param = dd; |
| 346 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 347 | |
| 348 | dmaengine_submit(tx_in); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 349 | if (tx_out) |
| 350 | dmaengine_submit(tx_out); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 351 | |
| 352 | dma_async_issue_pending(dd->dma_lch_in); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 353 | if (out_sg_len) |
| 354 | dma_async_issue_pending(dd->dma_lch_out); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 355 | |
| 356 | /* start DMA */ |
| 357 | dd->pdata->trigger(dd, dd->total); |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) |
| 363 | { |
| 364 | int err; |
| 365 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 366 | pr_debug("total: %zu\n", dd->total); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 367 | |
| 368 | if (!dd->pio_only) { |
| 369 | err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, |
| 370 | DMA_TO_DEVICE); |
| 371 | if (!err) { |
| 372 | dev_err(dd->dev, "dma_map_sg() error\n"); |
| 373 | return -EINVAL; |
| 374 | } |
| 375 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 376 | if (dd->out_sg_len) { |
| 377 | err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, |
| 378 | DMA_FROM_DEVICE); |
| 379 | if (!err) { |
| 380 | dev_err(dd->dev, "dma_map_sg() error\n"); |
| 381 | return -EINVAL; |
| 382 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | } |
| 384 | } |
| 385 | |
| 386 | err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len, |
| 387 | dd->out_sg_len); |
| 388 | if (err && !dd->pio_only) { |
| 389 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | if (dd->out_sg_len) |
| 391 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, |
| 392 | DMA_FROM_DEVICE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | return err; |
| 396 | } |
| 397 | |
| 398 | static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) |
| 399 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 400 | struct skcipher_request *req = dd->req; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 401 | |
| 402 | pr_debug("err: %d\n", err); |
| 403 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 404 | crypto_finalize_skcipher_request(dd->engine, req, err); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 405 | |
| 406 | pm_runtime_mark_last_busy(dd->dev); |
| 407 | pm_runtime_put_autosuspend(dd->dev); |
| 408 | } |
| 409 | |
| 410 | int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
| 411 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 412 | pr_debug("total: %zu\n", dd->total); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 413 | |
| 414 | omap_aes_dma_stop(dd); |
| 415 | |
| 416 | |
| 417 | return 0; |
| 418 | } |
| 419 | |
| 420 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 421 | struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 422 | { |
| 423 | if (req) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 424 | return crypto_transfer_skcipher_request_to_engine(dd->engine, req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | |
| 426 | return 0; |
| 427 | } |
| 428 | |
| 429 | static int omap_aes_prepare_req(struct crypto_engine *engine, |
| 430 | void *areq) |
| 431 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 432 | struct skcipher_request *req = container_of(areq, struct skcipher_request, base); |
| 433 | struct omap_aes_ctx *ctx = crypto_skcipher_ctx( |
| 434 | crypto_skcipher_reqtfm(req)); |
| 435 | struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | struct omap_aes_dev *dd = rctx->dd; |
| 437 | int ret; |
| 438 | u16 flags; |
| 439 | |
| 440 | if (!dd) |
| 441 | return -ENODEV; |
| 442 | |
| 443 | /* assign new request to device */ |
| 444 | dd->req = req; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 445 | dd->total = req->cryptlen; |
| 446 | dd->total_save = req->cryptlen; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 447 | dd->in_sg = req->src; |
| 448 | dd->out_sg = req->dst; |
| 449 | dd->orig_out = req->dst; |
| 450 | |
| 451 | flags = OMAP_CRYPTO_COPY_DATA; |
| 452 | if (req->src == req->dst) |
| 453 | flags |= OMAP_CRYPTO_FORCE_COPY; |
| 454 | |
| 455 | ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE, |
| 456 | dd->in_sgl, flags, |
| 457 | FLAGS_IN_DATA_ST_SHIFT, &dd->flags); |
| 458 | if (ret) |
| 459 | return ret; |
| 460 | |
| 461 | ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE, |
| 462 | &dd->out_sgl, 0, |
| 463 | FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); |
| 464 | if (ret) |
| 465 | return ret; |
| 466 | |
| 467 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); |
| 468 | if (dd->in_sg_len < 0) |
| 469 | return dd->in_sg_len; |
| 470 | |
| 471 | dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total); |
| 472 | if (dd->out_sg_len < 0) |
| 473 | return dd->out_sg_len; |
| 474 | |
| 475 | rctx->mode &= FLAGS_MODE_MASK; |
| 476 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; |
| 477 | |
| 478 | dd->ctx = ctx; |
| 479 | rctx->dd = dd; |
| 480 | |
| 481 | return omap_aes_write_ctrl(dd); |
| 482 | } |
| 483 | |
| 484 | static int omap_aes_crypt_req(struct crypto_engine *engine, |
| 485 | void *areq) |
| 486 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 487 | struct skcipher_request *req = container_of(areq, struct skcipher_request, base); |
| 488 | struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 489 | struct omap_aes_dev *dd = rctx->dd; |
| 490 | |
| 491 | if (!dd) |
| 492 | return -ENODEV; |
| 493 | |
| 494 | return omap_aes_crypt_dma_start(dd); |
| 495 | } |
| 496 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 497 | static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf) |
| 498 | { |
| 499 | int i; |
| 500 | |
| 501 | for (i = 0; i < 4; i++) |
| 502 | ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i)); |
| 503 | } |
| 504 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 505 | static void omap_aes_done_task(unsigned long data) |
| 506 | { |
| 507 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
| 508 | |
| 509 | pr_debug("enter done_task\n"); |
| 510 | |
| 511 | if (!dd->pio_only) { |
| 512 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, |
| 513 | DMA_FROM_DEVICE); |
| 514 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); |
| 515 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, |
| 516 | DMA_FROM_DEVICE); |
| 517 | omap_aes_crypt_dma_stop(dd); |
| 518 | } |
| 519 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 520 | omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | FLAGS_IN_DATA_ST_SHIFT, dd->flags); |
| 522 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 523 | omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | FLAGS_OUT_DATA_ST_SHIFT, dd->flags); |
| 525 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 526 | /* Update IV output */ |
| 527 | if (dd->flags & (FLAGS_CBC | FLAGS_CTR)) |
| 528 | omap_aes_copy_ivout(dd, dd->req->iv); |
| 529 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 530 | omap_aes_finish_req(dd, 0); |
| 531 | |
| 532 | pr_debug("exit\n"); |
| 533 | } |
| 534 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 535 | static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 536 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 537 | struct omap_aes_ctx *ctx = crypto_skcipher_ctx( |
| 538 | crypto_skcipher_reqtfm(req)); |
| 539 | struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 540 | struct omap_aes_dev *dd; |
| 541 | int ret; |
| 542 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 543 | if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR)) |
| 544 | return -EINVAL; |
| 545 | |
| 546 | pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 547 | !!(mode & FLAGS_ENCRYPT), |
| 548 | !!(mode & FLAGS_CBC)); |
| 549 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 550 | if (req->cryptlen < aes_fallback_sz) { |
| 551 | skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); |
| 552 | skcipher_request_set_callback(&rctx->fallback_req, |
| 553 | req->base.flags, |
| 554 | req->base.complete, |
| 555 | req->base.data); |
| 556 | skcipher_request_set_crypt(&rctx->fallback_req, req->src, |
| 557 | req->dst, req->cryptlen, req->iv); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 558 | |
| 559 | if (mode & FLAGS_ENCRYPT) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 560 | ret = crypto_skcipher_encrypt(&rctx->fallback_req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 561 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 562 | ret = crypto_skcipher_decrypt(&rctx->fallback_req); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 563 | return ret; |
| 564 | } |
| 565 | dd = omap_aes_find_dev(rctx); |
| 566 | if (!dd) |
| 567 | return -ENODEV; |
| 568 | |
| 569 | rctx->mode = mode; |
| 570 | |
| 571 | return omap_aes_handle_queue(dd, req); |
| 572 | } |
| 573 | |
| 574 | /* ********************** ALG API ************************************ */ |
| 575 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 576 | static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | unsigned int keylen) |
| 578 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 579 | struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 580 | int ret; |
| 581 | |
| 582 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && |
| 583 | keylen != AES_KEYSIZE_256) |
| 584 | return -EINVAL; |
| 585 | |
| 586 | pr_debug("enter, keylen: %d\n", keylen); |
| 587 | |
| 588 | memcpy(ctx->key, key, keylen); |
| 589 | ctx->keylen = keylen; |
| 590 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 591 | crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); |
| 592 | crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 593 | CRYPTO_TFM_REQ_MASK); |
| 594 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 595 | ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 596 | if (!ret) |
| 597 | return 0; |
| 598 | |
| 599 | return 0; |
| 600 | } |
| 601 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 602 | static int omap_aes_ecb_encrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 603 | { |
| 604 | return omap_aes_crypt(req, FLAGS_ENCRYPT); |
| 605 | } |
| 606 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 607 | static int omap_aes_ecb_decrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 608 | { |
| 609 | return omap_aes_crypt(req, 0); |
| 610 | } |
| 611 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 612 | static int omap_aes_cbc_encrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 613 | { |
| 614 | return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); |
| 615 | } |
| 616 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 617 | static int omap_aes_cbc_decrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 618 | { |
| 619 | return omap_aes_crypt(req, FLAGS_CBC); |
| 620 | } |
| 621 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 622 | static int omap_aes_ctr_encrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 623 | { |
| 624 | return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); |
| 625 | } |
| 626 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 627 | static int omap_aes_ctr_decrypt(struct skcipher_request *req) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 628 | { |
| 629 | return omap_aes_crypt(req, FLAGS_CTR); |
| 630 | } |
| 631 | |
| 632 | static int omap_aes_prepare_req(struct crypto_engine *engine, |
| 633 | void *req); |
| 634 | static int omap_aes_crypt_req(struct crypto_engine *engine, |
| 635 | void *req); |
| 636 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 637 | static int omap_aes_init_tfm(struct crypto_skcipher *tfm) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 638 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 639 | const char *name = crypto_tfm_alg_name(&tfm->base); |
| 640 | struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 641 | struct crypto_skcipher *blk; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 642 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 643 | blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 644 | if (IS_ERR(blk)) |
| 645 | return PTR_ERR(blk); |
| 646 | |
| 647 | ctx->fallback = blk; |
| 648 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 649 | crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + |
| 650 | crypto_skcipher_reqsize(blk)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 651 | |
| 652 | ctx->enginectx.op.prepare_request = omap_aes_prepare_req; |
| 653 | ctx->enginectx.op.unprepare_request = NULL; |
| 654 | ctx->enginectx.op.do_one_request = omap_aes_crypt_req; |
| 655 | |
| 656 | return 0; |
| 657 | } |
| 658 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 659 | static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 660 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 661 | struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 662 | |
| 663 | if (ctx->fallback) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 664 | crypto_free_skcipher(ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 665 | |
| 666 | ctx->fallback = NULL; |
| 667 | } |
| 668 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 669 | /* ********************** ALGS ************************************ */ |
| 670 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 671 | static struct skcipher_alg algs_ecb_cbc[] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 672 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 673 | .base.cra_name = "ecb(aes)", |
| 674 | .base.cra_driver_name = "ecb-aes-omap", |
| 675 | .base.cra_priority = 300, |
| 676 | .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 677 | CRYPTO_ALG_ASYNC | |
| 678 | CRYPTO_ALG_NEED_FALLBACK, |
| 679 | .base.cra_blocksize = AES_BLOCK_SIZE, |
| 680 | .base.cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 681 | .base.cra_module = THIS_MODULE, |
| 682 | |
| 683 | .min_keysize = AES_MIN_KEY_SIZE, |
| 684 | .max_keysize = AES_MAX_KEY_SIZE, |
| 685 | .setkey = omap_aes_setkey, |
| 686 | .encrypt = omap_aes_ecb_encrypt, |
| 687 | .decrypt = omap_aes_ecb_decrypt, |
| 688 | .init = omap_aes_init_tfm, |
| 689 | .exit = omap_aes_exit_tfm, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 690 | }, |
| 691 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 692 | .base.cra_name = "cbc(aes)", |
| 693 | .base.cra_driver_name = "cbc-aes-omap", |
| 694 | .base.cra_priority = 300, |
| 695 | .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 696 | CRYPTO_ALG_ASYNC | |
| 697 | CRYPTO_ALG_NEED_FALLBACK, |
| 698 | .base.cra_blocksize = AES_BLOCK_SIZE, |
| 699 | .base.cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 700 | .base.cra_module = THIS_MODULE, |
| 701 | |
| 702 | .min_keysize = AES_MIN_KEY_SIZE, |
| 703 | .max_keysize = AES_MAX_KEY_SIZE, |
| 704 | .ivsize = AES_BLOCK_SIZE, |
| 705 | .setkey = omap_aes_setkey, |
| 706 | .encrypt = omap_aes_cbc_encrypt, |
| 707 | .decrypt = omap_aes_cbc_decrypt, |
| 708 | .init = omap_aes_init_tfm, |
| 709 | .exit = omap_aes_exit_tfm, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 710 | } |
| 711 | }; |
| 712 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 713 | static struct skcipher_alg algs_ctr[] = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 714 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 715 | .base.cra_name = "ctr(aes)", |
| 716 | .base.cra_driver_name = "ctr-aes-omap", |
| 717 | .base.cra_priority = 300, |
| 718 | .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
| 719 | CRYPTO_ALG_ASYNC | |
| 720 | CRYPTO_ALG_NEED_FALLBACK, |
| 721 | .base.cra_blocksize = 1, |
| 722 | .base.cra_ctxsize = sizeof(struct omap_aes_ctx), |
| 723 | .base.cra_module = THIS_MODULE, |
| 724 | |
| 725 | .min_keysize = AES_MIN_KEY_SIZE, |
| 726 | .max_keysize = AES_MAX_KEY_SIZE, |
| 727 | .ivsize = AES_BLOCK_SIZE, |
| 728 | .setkey = omap_aes_setkey, |
| 729 | .encrypt = omap_aes_ctr_encrypt, |
| 730 | .decrypt = omap_aes_ctr_decrypt, |
| 731 | .init = omap_aes_init_tfm, |
| 732 | .exit = omap_aes_exit_tfm, |
| 733 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 734 | }; |
| 735 | |
| 736 | static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { |
| 737 | { |
| 738 | .algs_list = algs_ecb_cbc, |
| 739 | .size = ARRAY_SIZE(algs_ecb_cbc), |
| 740 | }, |
| 741 | }; |
| 742 | |
| 743 | static struct aead_alg algs_aead_gcm[] = { |
| 744 | { |
| 745 | .base = { |
| 746 | .cra_name = "gcm(aes)", |
| 747 | .cra_driver_name = "gcm-aes-omap", |
| 748 | .cra_priority = 300, |
| 749 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 750 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 751 | .cra_blocksize = 1, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 752 | .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 753 | .cra_alignmask = 0xf, |
| 754 | .cra_module = THIS_MODULE, |
| 755 | }, |
| 756 | .init = omap_aes_gcm_cra_init, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 757 | .ivsize = GCM_AES_IV_SIZE, |
| 758 | .maxauthsize = AES_BLOCK_SIZE, |
| 759 | .setkey = omap_aes_gcm_setkey, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 760 | .setauthsize = omap_aes_gcm_setauthsize, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 761 | .encrypt = omap_aes_gcm_encrypt, |
| 762 | .decrypt = omap_aes_gcm_decrypt, |
| 763 | }, |
| 764 | { |
| 765 | .base = { |
| 766 | .cra_name = "rfc4106(gcm(aes))", |
| 767 | .cra_driver_name = "rfc4106-gcm-aes-omap", |
| 768 | .cra_priority = 300, |
| 769 | .cra_flags = CRYPTO_ALG_ASYNC | |
| 770 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
| 771 | .cra_blocksize = 1, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 772 | .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 773 | .cra_alignmask = 0xf, |
| 774 | .cra_module = THIS_MODULE, |
| 775 | }, |
| 776 | .init = omap_aes_gcm_cra_init, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 777 | .maxauthsize = AES_BLOCK_SIZE, |
| 778 | .ivsize = GCM_RFC4106_IV_SIZE, |
| 779 | .setkey = omap_aes_4106gcm_setkey, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 780 | .setauthsize = omap_aes_4106gcm_setauthsize, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 781 | .encrypt = omap_aes_4106gcm_encrypt, |
| 782 | .decrypt = omap_aes_4106gcm_decrypt, |
| 783 | }, |
| 784 | }; |
| 785 | |
| 786 | static struct omap_aes_aead_algs omap_aes_aead_info = { |
| 787 | .algs_list = algs_aead_gcm, |
| 788 | .size = ARRAY_SIZE(algs_aead_gcm), |
| 789 | }; |
| 790 | |
| 791 | static const struct omap_aes_pdata omap_aes_pdata_omap2 = { |
| 792 | .algs_info = omap_aes_algs_info_ecb_cbc, |
| 793 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), |
| 794 | .trigger = omap_aes_dma_trigger_omap2, |
| 795 | .key_ofs = 0x1c, |
| 796 | .iv_ofs = 0x20, |
| 797 | .ctrl_ofs = 0x30, |
| 798 | .data_ofs = 0x34, |
| 799 | .rev_ofs = 0x44, |
| 800 | .mask_ofs = 0x48, |
| 801 | .dma_enable_in = BIT(2), |
| 802 | .dma_enable_out = BIT(3), |
| 803 | .dma_start = BIT(5), |
| 804 | .major_mask = 0xf0, |
| 805 | .major_shift = 4, |
| 806 | .minor_mask = 0x0f, |
| 807 | .minor_shift = 0, |
| 808 | }; |
| 809 | |
| 810 | #ifdef CONFIG_OF |
| 811 | static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { |
| 812 | { |
| 813 | .algs_list = algs_ecb_cbc, |
| 814 | .size = ARRAY_SIZE(algs_ecb_cbc), |
| 815 | }, |
| 816 | { |
| 817 | .algs_list = algs_ctr, |
| 818 | .size = ARRAY_SIZE(algs_ctr), |
| 819 | }, |
| 820 | }; |
| 821 | |
| 822 | static const struct omap_aes_pdata omap_aes_pdata_omap3 = { |
| 823 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, |
| 824 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), |
| 825 | .trigger = omap_aes_dma_trigger_omap2, |
| 826 | .key_ofs = 0x1c, |
| 827 | .iv_ofs = 0x20, |
| 828 | .ctrl_ofs = 0x30, |
| 829 | .data_ofs = 0x34, |
| 830 | .rev_ofs = 0x44, |
| 831 | .mask_ofs = 0x48, |
| 832 | .dma_enable_in = BIT(2), |
| 833 | .dma_enable_out = BIT(3), |
| 834 | .dma_start = BIT(5), |
| 835 | .major_mask = 0xf0, |
| 836 | .major_shift = 4, |
| 837 | .minor_mask = 0x0f, |
| 838 | .minor_shift = 0, |
| 839 | }; |
| 840 | |
| 841 | static const struct omap_aes_pdata omap_aes_pdata_omap4 = { |
| 842 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, |
| 843 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), |
| 844 | .aead_algs_info = &omap_aes_aead_info, |
| 845 | .trigger = omap_aes_dma_trigger_omap4, |
| 846 | .key_ofs = 0x3c, |
| 847 | .iv_ofs = 0x40, |
| 848 | .ctrl_ofs = 0x50, |
| 849 | .data_ofs = 0x60, |
| 850 | .rev_ofs = 0x80, |
| 851 | .mask_ofs = 0x84, |
| 852 | .irq_status_ofs = 0x8c, |
| 853 | .irq_enable_ofs = 0x90, |
| 854 | .dma_enable_in = BIT(5), |
| 855 | .dma_enable_out = BIT(6), |
| 856 | .major_mask = 0x0700, |
| 857 | .major_shift = 8, |
| 858 | .minor_mask = 0x003f, |
| 859 | .minor_shift = 0, |
| 860 | }; |
| 861 | |
| 862 | static irqreturn_t omap_aes_irq(int irq, void *dev_id) |
| 863 | { |
| 864 | struct omap_aes_dev *dd = dev_id; |
| 865 | u32 status, i; |
| 866 | u32 *src, *dst; |
| 867 | |
| 868 | status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); |
| 869 | if (status & AES_REG_IRQ_DATA_IN) { |
| 870 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); |
| 871 | |
| 872 | BUG_ON(!dd->in_sg); |
| 873 | |
| 874 | BUG_ON(_calc_walked(in) > dd->in_sg->length); |
| 875 | |
| 876 | src = sg_virt(dd->in_sg) + _calc_walked(in); |
| 877 | |
| 878 | for (i = 0; i < AES_BLOCK_WORDS; i++) { |
| 879 | omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); |
| 880 | |
| 881 | scatterwalk_advance(&dd->in_walk, 4); |
| 882 | if (dd->in_sg->length == _calc_walked(in)) { |
| 883 | dd->in_sg = sg_next(dd->in_sg); |
| 884 | if (dd->in_sg) { |
| 885 | scatterwalk_start(&dd->in_walk, |
| 886 | dd->in_sg); |
| 887 | src = sg_virt(dd->in_sg) + |
| 888 | _calc_walked(in); |
| 889 | } |
| 890 | } else { |
| 891 | src++; |
| 892 | } |
| 893 | } |
| 894 | |
| 895 | /* Clear IRQ status */ |
| 896 | status &= ~AES_REG_IRQ_DATA_IN; |
| 897 | omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); |
| 898 | |
| 899 | /* Enable DATA_OUT interrupt */ |
| 900 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); |
| 901 | |
| 902 | } else if (status & AES_REG_IRQ_DATA_OUT) { |
| 903 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); |
| 904 | |
| 905 | BUG_ON(!dd->out_sg); |
| 906 | |
| 907 | BUG_ON(_calc_walked(out) > dd->out_sg->length); |
| 908 | |
| 909 | dst = sg_virt(dd->out_sg) + _calc_walked(out); |
| 910 | |
| 911 | for (i = 0; i < AES_BLOCK_WORDS; i++) { |
| 912 | *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); |
| 913 | scatterwalk_advance(&dd->out_walk, 4); |
| 914 | if (dd->out_sg->length == _calc_walked(out)) { |
| 915 | dd->out_sg = sg_next(dd->out_sg); |
| 916 | if (dd->out_sg) { |
| 917 | scatterwalk_start(&dd->out_walk, |
| 918 | dd->out_sg); |
| 919 | dst = sg_virt(dd->out_sg) + |
| 920 | _calc_walked(out); |
| 921 | } |
| 922 | } else { |
| 923 | dst++; |
| 924 | } |
| 925 | } |
| 926 | |
| 927 | dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total); |
| 928 | |
| 929 | /* Clear IRQ status */ |
| 930 | status &= ~AES_REG_IRQ_DATA_OUT; |
| 931 | omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); |
| 932 | |
| 933 | if (!dd->total) |
| 934 | /* All bytes read! */ |
| 935 | tasklet_schedule(&dd->done_task); |
| 936 | else |
| 937 | /* Enable DATA_IN interrupt for next block */ |
| 938 | omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); |
| 939 | } |
| 940 | |
| 941 | return IRQ_HANDLED; |
| 942 | } |
| 943 | |
| 944 | static const struct of_device_id omap_aes_of_match[] = { |
| 945 | { |
| 946 | .compatible = "ti,omap2-aes", |
| 947 | .data = &omap_aes_pdata_omap2, |
| 948 | }, |
| 949 | { |
| 950 | .compatible = "ti,omap3-aes", |
| 951 | .data = &omap_aes_pdata_omap3, |
| 952 | }, |
| 953 | { |
| 954 | .compatible = "ti,omap4-aes", |
| 955 | .data = &omap_aes_pdata_omap4, |
| 956 | }, |
| 957 | {}, |
| 958 | }; |
| 959 | MODULE_DEVICE_TABLE(of, omap_aes_of_match); |
| 960 | |
| 961 | static int omap_aes_get_res_of(struct omap_aes_dev *dd, |
| 962 | struct device *dev, struct resource *res) |
| 963 | { |
| 964 | struct device_node *node = dev->of_node; |
| 965 | int err = 0; |
| 966 | |
| 967 | dd->pdata = of_device_get_match_data(dev); |
| 968 | if (!dd->pdata) { |
| 969 | dev_err(dev, "no compatible OF match\n"); |
| 970 | err = -EINVAL; |
| 971 | goto err; |
| 972 | } |
| 973 | |
| 974 | err = of_address_to_resource(node, 0, res); |
| 975 | if (err < 0) { |
| 976 | dev_err(dev, "can't translate OF node address\n"); |
| 977 | err = -EINVAL; |
| 978 | goto err; |
| 979 | } |
| 980 | |
| 981 | err: |
| 982 | return err; |
| 983 | } |
| 984 | #else |
| 985 | static const struct of_device_id omap_aes_of_match[] = { |
| 986 | {}, |
| 987 | }; |
| 988 | |
| 989 | static int omap_aes_get_res_of(struct omap_aes_dev *dd, |
| 990 | struct device *dev, struct resource *res) |
| 991 | { |
| 992 | return -EINVAL; |
| 993 | } |
| 994 | #endif |
| 995 | |
| 996 | static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, |
| 997 | struct platform_device *pdev, struct resource *res) |
| 998 | { |
| 999 | struct device *dev = &pdev->dev; |
| 1000 | struct resource *r; |
| 1001 | int err = 0; |
| 1002 | |
| 1003 | /* Get the base address */ |
| 1004 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1005 | if (!r) { |
| 1006 | dev_err(dev, "no MEM resource info\n"); |
| 1007 | err = -ENODEV; |
| 1008 | goto err; |
| 1009 | } |
| 1010 | memcpy(res, r, sizeof(*res)); |
| 1011 | |
| 1012 | /* Only OMAP2/3 can be non-DT */ |
| 1013 | dd->pdata = &omap_aes_pdata_omap2; |
| 1014 | |
| 1015 | err: |
| 1016 | return err; |
| 1017 | } |
| 1018 | |
| 1019 | static ssize_t fallback_show(struct device *dev, struct device_attribute *attr, |
| 1020 | char *buf) |
| 1021 | { |
| 1022 | return sprintf(buf, "%d\n", aes_fallback_sz); |
| 1023 | } |
| 1024 | |
| 1025 | static ssize_t fallback_store(struct device *dev, struct device_attribute *attr, |
| 1026 | const char *buf, size_t size) |
| 1027 | { |
| 1028 | ssize_t status; |
| 1029 | long value; |
| 1030 | |
| 1031 | status = kstrtol(buf, 0, &value); |
| 1032 | if (status) |
| 1033 | return status; |
| 1034 | |
| 1035 | /* HW accelerator only works with buffers > 9 */ |
| 1036 | if (value < 9) { |
| 1037 | dev_err(dev, "minimum fallback size 9\n"); |
| 1038 | return -EINVAL; |
| 1039 | } |
| 1040 | |
| 1041 | aes_fallback_sz = value; |
| 1042 | |
| 1043 | return size; |
| 1044 | } |
| 1045 | |
| 1046 | static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr, |
| 1047 | char *buf) |
| 1048 | { |
| 1049 | struct omap_aes_dev *dd = dev_get_drvdata(dev); |
| 1050 | |
| 1051 | return sprintf(buf, "%d\n", dd->engine->queue.max_qlen); |
| 1052 | } |
| 1053 | |
| 1054 | static ssize_t queue_len_store(struct device *dev, |
| 1055 | struct device_attribute *attr, const char *buf, |
| 1056 | size_t size) |
| 1057 | { |
| 1058 | struct omap_aes_dev *dd; |
| 1059 | ssize_t status; |
| 1060 | long value; |
| 1061 | unsigned long flags; |
| 1062 | |
| 1063 | status = kstrtol(buf, 0, &value); |
| 1064 | if (status) |
| 1065 | return status; |
| 1066 | |
| 1067 | if (value < 1) |
| 1068 | return -EINVAL; |
| 1069 | |
| 1070 | /* |
| 1071 | * Changing the queue size in fly is safe, if size becomes smaller |
| 1072 | * than current size, it will just not accept new entries until |
| 1073 | * it has shrank enough. |
| 1074 | */ |
| 1075 | spin_lock_bh(&list_lock); |
| 1076 | list_for_each_entry(dd, &dev_list, list) { |
| 1077 | spin_lock_irqsave(&dd->lock, flags); |
| 1078 | dd->engine->queue.max_qlen = value; |
| 1079 | dd->aead_queue.base.max_qlen = value; |
| 1080 | spin_unlock_irqrestore(&dd->lock, flags); |
| 1081 | } |
| 1082 | spin_unlock_bh(&list_lock); |
| 1083 | |
| 1084 | return size; |
| 1085 | } |
| 1086 | |
| 1087 | static DEVICE_ATTR_RW(queue_len); |
| 1088 | static DEVICE_ATTR_RW(fallback); |
| 1089 | |
| 1090 | static struct attribute *omap_aes_attrs[] = { |
| 1091 | &dev_attr_queue_len.attr, |
| 1092 | &dev_attr_fallback.attr, |
| 1093 | NULL, |
| 1094 | }; |
| 1095 | |
| 1096 | static struct attribute_group omap_aes_attr_group = { |
| 1097 | .attrs = omap_aes_attrs, |
| 1098 | }; |
| 1099 | |
| 1100 | static int omap_aes_probe(struct platform_device *pdev) |
| 1101 | { |
| 1102 | struct device *dev = &pdev->dev; |
| 1103 | struct omap_aes_dev *dd; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1104 | struct skcipher_alg *algp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1105 | struct aead_alg *aalg; |
| 1106 | struct resource res; |
| 1107 | int err = -ENOMEM, i, j, irq = -1; |
| 1108 | u32 reg; |
| 1109 | |
| 1110 | dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); |
| 1111 | if (dd == NULL) { |
| 1112 | dev_err(dev, "unable to alloc data struct.\n"); |
| 1113 | goto err_data; |
| 1114 | } |
| 1115 | dd->dev = dev; |
| 1116 | platform_set_drvdata(pdev, dd); |
| 1117 | |
| 1118 | aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH); |
| 1119 | |
| 1120 | err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : |
| 1121 | omap_aes_get_res_pdev(dd, pdev, &res); |
| 1122 | if (err) |
| 1123 | goto err_res; |
| 1124 | |
| 1125 | dd->io_base = devm_ioremap_resource(dev, &res); |
| 1126 | if (IS_ERR(dd->io_base)) { |
| 1127 | err = PTR_ERR(dd->io_base); |
| 1128 | goto err_res; |
| 1129 | } |
| 1130 | dd->phys_base = res.start; |
| 1131 | |
| 1132 | pm_runtime_use_autosuspend(dev); |
| 1133 | pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); |
| 1134 | |
| 1135 | pm_runtime_enable(dev); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1136 | err = pm_runtime_resume_and_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1137 | if (err < 0) { |
| 1138 | dev_err(dev, "%s: failed to get_sync(%d)\n", |
| 1139 | __func__, err); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1140 | goto err_pm_disable; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1141 | } |
| 1142 | |
| 1143 | omap_aes_dma_stop(dd); |
| 1144 | |
| 1145 | reg = omap_aes_read(dd, AES_REG_REV(dd)); |
| 1146 | |
| 1147 | pm_runtime_put_sync(dev); |
| 1148 | |
| 1149 | dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", |
| 1150 | (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, |
| 1151 | (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); |
| 1152 | |
| 1153 | tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); |
| 1154 | |
| 1155 | err = omap_aes_dma_init(dd); |
| 1156 | if (err == -EPROBE_DEFER) { |
| 1157 | goto err_irq; |
| 1158 | } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { |
| 1159 | dd->pio_only = 1; |
| 1160 | |
| 1161 | irq = platform_get_irq(pdev, 0); |
| 1162 | if (irq < 0) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1163 | err = irq; |
| 1164 | goto err_irq; |
| 1165 | } |
| 1166 | |
| 1167 | err = devm_request_irq(dev, irq, omap_aes_irq, 0, |
| 1168 | dev_name(dev), dd); |
| 1169 | if (err) { |
| 1170 | dev_err(dev, "Unable to grab omap-aes IRQ\n"); |
| 1171 | goto err_irq; |
| 1172 | } |
| 1173 | } |
| 1174 | |
| 1175 | spin_lock_init(&dd->lock); |
| 1176 | |
| 1177 | INIT_LIST_HEAD(&dd->list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1178 | spin_lock_bh(&list_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1179 | list_add_tail(&dd->list, &dev_list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1180 | spin_unlock_bh(&list_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1181 | |
| 1182 | /* Initialize crypto engine */ |
| 1183 | dd->engine = crypto_engine_alloc_init(dev, 1); |
| 1184 | if (!dd->engine) { |
| 1185 | err = -ENOMEM; |
| 1186 | goto err_engine; |
| 1187 | } |
| 1188 | |
| 1189 | err = crypto_engine_start(dd->engine); |
| 1190 | if (err) |
| 1191 | goto err_engine; |
| 1192 | |
| 1193 | for (i = 0; i < dd->pdata->algs_info_size; i++) { |
| 1194 | if (!dd->pdata->algs_info[i].registered) { |
| 1195 | for (j = 0; j < dd->pdata->algs_info[i].size; j++) { |
| 1196 | algp = &dd->pdata->algs_info[i].algs_list[j]; |
| 1197 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1198 | pr_debug("reg alg: %s\n", algp->base.cra_name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1199 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1200 | err = crypto_register_skcipher(algp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1201 | if (err) |
| 1202 | goto err_algs; |
| 1203 | |
| 1204 | dd->pdata->algs_info[i].registered++; |
| 1205 | } |
| 1206 | } |
| 1207 | } |
| 1208 | |
| 1209 | if (dd->pdata->aead_algs_info && |
| 1210 | !dd->pdata->aead_algs_info->registered) { |
| 1211 | for (i = 0; i < dd->pdata->aead_algs_info->size; i++) { |
| 1212 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1213 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1214 | pr_debug("reg alg: %s\n", aalg->base.cra_name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1215 | |
| 1216 | err = crypto_register_aead(aalg); |
| 1217 | if (err) |
| 1218 | goto err_aead_algs; |
| 1219 | |
| 1220 | dd->pdata->aead_algs_info->registered++; |
| 1221 | } |
| 1222 | } |
| 1223 | |
| 1224 | err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group); |
| 1225 | if (err) { |
| 1226 | dev_err(dev, "could not create sysfs device attrs\n"); |
| 1227 | goto err_aead_algs; |
| 1228 | } |
| 1229 | |
| 1230 | return 0; |
| 1231 | err_aead_algs: |
| 1232 | for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { |
| 1233 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; |
| 1234 | crypto_unregister_aead(aalg); |
| 1235 | } |
| 1236 | err_algs: |
| 1237 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
| 1238 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1239 | crypto_unregister_skcipher( |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1240 | &dd->pdata->algs_info[i].algs_list[j]); |
| 1241 | |
| 1242 | err_engine: |
| 1243 | if (dd->engine) |
| 1244 | crypto_engine_exit(dd->engine); |
| 1245 | |
| 1246 | omap_aes_dma_cleanup(dd); |
| 1247 | err_irq: |
| 1248 | tasklet_kill(&dd->done_task); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1249 | err_pm_disable: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1250 | pm_runtime_disable(dev); |
| 1251 | err_res: |
| 1252 | dd = NULL; |
| 1253 | err_data: |
| 1254 | dev_err(dev, "initialization failed.\n"); |
| 1255 | return err; |
| 1256 | } |
| 1257 | |
| 1258 | static int omap_aes_remove(struct platform_device *pdev) |
| 1259 | { |
| 1260 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); |
| 1261 | struct aead_alg *aalg; |
| 1262 | int i, j; |
| 1263 | |
| 1264 | if (!dd) |
| 1265 | return -ENODEV; |
| 1266 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1267 | spin_lock_bh(&list_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1268 | list_del(&dd->list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1269 | spin_unlock_bh(&list_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1270 | |
| 1271 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1272 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { |
| 1273 | crypto_unregister_skcipher( |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1274 | &dd->pdata->algs_info[i].algs_list[j]); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1275 | dd->pdata->algs_info[i].registered--; |
| 1276 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1277 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1278 | for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1279 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; |
| 1280 | crypto_unregister_aead(aalg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1281 | dd->pdata->aead_algs_info->registered--; |
| 1282 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1283 | } |
| 1284 | |
| 1285 | crypto_engine_exit(dd->engine); |
| 1286 | |
| 1287 | tasklet_kill(&dd->done_task); |
| 1288 | omap_aes_dma_cleanup(dd); |
| 1289 | pm_runtime_disable(dd->dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1290 | |
| 1291 | sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1292 | |
| 1293 | return 0; |
| 1294 | } |
| 1295 | |
| 1296 | #ifdef CONFIG_PM_SLEEP |
| 1297 | static int omap_aes_suspend(struct device *dev) |
| 1298 | { |
| 1299 | pm_runtime_put_sync(dev); |
| 1300 | return 0; |
| 1301 | } |
| 1302 | |
| 1303 | static int omap_aes_resume(struct device *dev) |
| 1304 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1305 | pm_runtime_get_sync(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1306 | return 0; |
| 1307 | } |
| 1308 | #endif |
| 1309 | |
| 1310 | static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume); |
| 1311 | |
| 1312 | static struct platform_driver omap_aes_driver = { |
| 1313 | .probe = omap_aes_probe, |
| 1314 | .remove = omap_aes_remove, |
| 1315 | .driver = { |
| 1316 | .name = "omap-aes", |
| 1317 | .pm = &omap_aes_pm_ops, |
| 1318 | .of_match_table = omap_aes_of_match, |
| 1319 | }, |
| 1320 | }; |
| 1321 | |
| 1322 | module_platform_driver(omap_aes_driver); |
| 1323 | |
| 1324 | MODULE_DESCRIPTION("OMAP AES hw acceleration support."); |
| 1325 | MODULE_LICENSE("GPL v2"); |
| 1326 | MODULE_AUTHOR("Dmitry Kasatkin"); |
| 1327 | |