blob: 73a899e6f837ec3572498641c0efbd1ca952bcd3 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 */
4
5#include <linux/module.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/pci_ids.h>
9#include <linux/crypto.h>
10#include <linux/spinlock.h>
11#include <crypto/algapi.h>
12#include <crypto/aes.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020013#include <crypto/internal/skcipher.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014
15#include <linux/io.h>
16#include <linux/delay.h>
17
18#include "geode-aes.h"
19
20/* Static structures */
21
22static void __iomem *_iobase;
23static spinlock_t lock;
24
25/* Write a 128 bit field (either a writable key or IV) */
26static inline void
Olivier Deprez0e641232021-09-23 10:07:05 +020027_writefield(u32 offset, const void *value)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028{
29 int i;
30
31 for (i = 0; i < 4; i++)
Olivier Deprez0e641232021-09-23 10:07:05 +020032 iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033}
34
35/* Read a 128 bit field (either a writable key or IV) */
36static inline void
37_readfield(u32 offset, void *value)
38{
39 int i;
40
41 for (i = 0; i < 4; i++)
42 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
43}
44
45static int
Olivier Deprez0e641232021-09-23 10:07:05 +020046do_crypt(const void *src, void *dst, u32 len, u32 flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047{
48 u32 status;
49 u32 counter = AES_OP_TIMEOUT;
50
Olivier Deprez0e641232021-09-23 10:07:05 +020051 iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
53 iowrite32(len, _iobase + AES_LENA_REG);
54
55 /* Start the operation */
56 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
57
58 do {
59 status = ioread32(_iobase + AES_INTR_REG);
60 cpu_relax();
61 } while (!(status & AES_INTRA_PENDING) && --counter);
62
63 /* Clear the event */
64 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
65 return counter ? 0 : 1;
66}
67
Olivier Deprez0e641232021-09-23 10:07:05 +020068static void
69geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
70 void *dst, u32 len, u8 *iv, int mode, int dir)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071{
72 u32 flags = 0;
73 unsigned long iflags;
74 int ret;
75
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 /* If the source and destination is the same, then
77 * we need to turn on the coherent flags, otherwise
78 * we don't need to worry
79 */
80
81 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
82
Olivier Deprez0e641232021-09-23 10:07:05 +020083 if (dir == AES_DIR_ENCRYPT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 flags |= AES_CTRL_ENCRYPT;
85
86 /* Start the critical section */
87
88 spin_lock_irqsave(&lock, iflags);
89
Olivier Deprez0e641232021-09-23 10:07:05 +020090 if (mode == AES_MODE_CBC) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000091 flags |= AES_CTRL_CBC;
Olivier Deprez0e641232021-09-23 10:07:05 +020092 _writefield(AES_WRITEIV0_REG, iv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 }
94
Olivier Deprez0e641232021-09-23 10:07:05 +020095 flags |= AES_CTRL_WRKEY;
96 _writefield(AES_WRITEKEY0_REG, tctx->key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097
Olivier Deprez0e641232021-09-23 10:07:05 +020098 ret = do_crypt(src, dst, len, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 BUG_ON(ret);
100
Olivier Deprez0e641232021-09-23 10:07:05 +0200101 if (mode == AES_MODE_CBC)
102 _readfield(AES_WRITEIV0_REG, iv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103
104 spin_unlock_irqrestore(&lock, iflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105}
106
107/* CRYPTO-API Functions */
108
109static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
110 unsigned int len)
111{
Olivier Deprez0e641232021-09-23 10:07:05 +0200112 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113 unsigned int ret;
114
Olivier Deprez0e641232021-09-23 10:07:05 +0200115 tctx->keylen = len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116
117 if (len == AES_KEYSIZE_128) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200118 memcpy(tctx->key, key, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 return 0;
120 }
121
122 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
123 /* not supported at all */
124 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
125 return -EINVAL;
126 }
127
128 /*
129 * The requested key size is not supported by HW, do a fallback
130 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200131 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
132 tctx->fallback.cip->base.crt_flags |=
133 (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134
Olivier Deprez0e641232021-09-23 10:07:05 +0200135 ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136 if (ret) {
137 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
Olivier Deprez0e641232021-09-23 10:07:05 +0200138 tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
139 CRYPTO_TFM_RES_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140 }
141 return ret;
142}
143
Olivier Deprez0e641232021-09-23 10:07:05 +0200144static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
145 unsigned int len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146{
Olivier Deprez0e641232021-09-23 10:07:05 +0200147 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148 unsigned int ret;
149
Olivier Deprez0e641232021-09-23 10:07:05 +0200150 tctx->keylen = len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152 if (len == AES_KEYSIZE_128) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200153 memcpy(tctx->key, key, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154 return 0;
155 }
156
157 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
158 /* not supported at all */
Olivier Deprez0e641232021-09-23 10:07:05 +0200159 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160 return -EINVAL;
161 }
162
163 /*
164 * The requested key size is not supported by HW, do a fallback
165 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200166 crypto_skcipher_clear_flags(tctx->fallback.skcipher,
167 CRYPTO_TFM_REQ_MASK);
168 crypto_skcipher_set_flags(tctx->fallback.skcipher,
169 crypto_skcipher_get_flags(tfm) &
170 CRYPTO_TFM_REQ_MASK);
171 ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
172 crypto_skcipher_set_flags(tfm,
173 crypto_skcipher_get_flags(tctx->fallback.skcipher) &
174 CRYPTO_TFM_RES_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 return ret;
176}
177
178static void
179geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
180{
Olivier Deprez0e641232021-09-23 10:07:05 +0200181 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
Olivier Deprez0e641232021-09-23 10:07:05 +0200183 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
184 crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185 return;
186 }
187
Olivier Deprez0e641232021-09-23 10:07:05 +0200188 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
189 AES_MODE_ECB, AES_DIR_ENCRYPT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190}
191
192
193static void
194geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
195{
Olivier Deprez0e641232021-09-23 10:07:05 +0200196 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197
Olivier Deprez0e641232021-09-23 10:07:05 +0200198 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
199 crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200 return;
201 }
202
Olivier Deprez0e641232021-09-23 10:07:05 +0200203 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
204 AES_MODE_ECB, AES_DIR_DECRYPT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000205}
206
207static int fallback_init_cip(struct crypto_tfm *tfm)
208{
209 const char *name = crypto_tfm_alg_name(tfm);
Olivier Deprez0e641232021-09-23 10:07:05 +0200210 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211
Olivier Deprez0e641232021-09-23 10:07:05 +0200212 tctx->fallback.cip = crypto_alloc_cipher(name, 0,
213 CRYPTO_ALG_NEED_FALLBACK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214
Olivier Deprez0e641232021-09-23 10:07:05 +0200215 if (IS_ERR(tctx->fallback.cip)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
Olivier Deprez0e641232021-09-23 10:07:05 +0200217 return PTR_ERR(tctx->fallback.cip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 }
219
220 return 0;
221}
222
223static void fallback_exit_cip(struct crypto_tfm *tfm)
224{
Olivier Deprez0e641232021-09-23 10:07:05 +0200225 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000226
Olivier Deprez0e641232021-09-23 10:07:05 +0200227 crypto_free_cipher(tctx->fallback.cip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228}
229
230static struct crypto_alg geode_alg = {
231 .cra_name = "aes",
232 .cra_driver_name = "geode-aes",
233 .cra_priority = 300,
234 .cra_alignmask = 15,
235 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
236 CRYPTO_ALG_NEED_FALLBACK,
237 .cra_init = fallback_init_cip,
238 .cra_exit = fallback_exit_cip,
239 .cra_blocksize = AES_BLOCK_SIZE,
Olivier Deprez0e641232021-09-23 10:07:05 +0200240 .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 .cra_module = THIS_MODULE,
242 .cra_u = {
243 .cipher = {
244 .cia_min_keysize = AES_MIN_KEY_SIZE,
245 .cia_max_keysize = AES_MAX_KEY_SIZE,
246 .cia_setkey = geode_setkey_cip,
247 .cia_encrypt = geode_encrypt,
248 .cia_decrypt = geode_decrypt
249 }
250 }
251};
252
Olivier Deprez0e641232021-09-23 10:07:05 +0200253static int geode_init_skcipher(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254{
Olivier Deprez0e641232021-09-23 10:07:05 +0200255 const char *name = crypto_tfm_alg_name(&tfm->base);
256 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257
Olivier Deprez0e641232021-09-23 10:07:05 +0200258 tctx->fallback.skcipher =
259 crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
260 CRYPTO_ALG_ASYNC);
261 if (IS_ERR(tctx->fallback.skcipher)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
Olivier Deprez0e641232021-09-23 10:07:05 +0200263 return PTR_ERR(tctx->fallback.skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264 }
265
Olivier Deprez0e641232021-09-23 10:07:05 +0200266 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
267 crypto_skcipher_reqsize(tctx->fallback.skcipher));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268 return 0;
269}
270
Olivier Deprez0e641232021-09-23 10:07:05 +0200271static void geode_exit_skcipher(struct crypto_skcipher *tfm)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272{
Olivier Deprez0e641232021-09-23 10:07:05 +0200273 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274
Olivier Deprez0e641232021-09-23 10:07:05 +0200275 crypto_free_skcipher(tctx->fallback.skcipher);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276}
277
Olivier Deprez0e641232021-09-23 10:07:05 +0200278static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279{
Olivier Deprez0e641232021-09-23 10:07:05 +0200280 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281 const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
282 struct skcipher_walk walk;
283 unsigned int nbytes;
284 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285
Olivier Deprez0e641232021-09-23 10:07:05 +0200286 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
287 struct skcipher_request *subreq = skcipher_request_ctx(req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288
Olivier Deprez0e641232021-09-23 10:07:05 +0200289 *subreq = *req;
290 skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
291 if (dir == AES_DIR_DECRYPT)
292 return crypto_skcipher_decrypt(subreq);
293 else
294 return crypto_skcipher_encrypt(subreq);
295 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296
Olivier Deprez0e641232021-09-23 10:07:05 +0200297 err = skcipher_walk_virt(&walk, req, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298
Olivier Deprez0e641232021-09-23 10:07:05 +0200299 while ((nbytes = walk.nbytes) != 0) {
300 geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
301 round_down(nbytes, AES_BLOCK_SIZE),
302 walk.iv, mode, dir);
303 err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304 }
305
306 return err;
307}
308
Olivier Deprez0e641232021-09-23 10:07:05 +0200309static int geode_cbc_encrypt(struct skcipher_request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310{
Olivier Deprez0e641232021-09-23 10:07:05 +0200311 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000312}
313
Olivier Deprez0e641232021-09-23 10:07:05 +0200314static int geode_cbc_decrypt(struct skcipher_request *req)
315{
316 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
317}
318
319static int geode_ecb_encrypt(struct skcipher_request *req)
320{
321 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
322}
323
324static int geode_ecb_decrypt(struct skcipher_request *req)
325{
326 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
327}
328
329static struct skcipher_alg geode_skcipher_algs[] = {
330 {
331 .base.cra_name = "cbc(aes)",
332 .base.cra_driver_name = "cbc-aes-geode",
333 .base.cra_priority = 400,
334 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
335 CRYPTO_ALG_NEED_FALLBACK,
336 .base.cra_blocksize = AES_BLOCK_SIZE,
337 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
338 .base.cra_alignmask = 15,
339 .base.cra_module = THIS_MODULE,
340 .init = geode_init_skcipher,
341 .exit = geode_exit_skcipher,
342 .setkey = geode_setkey_skcipher,
343 .encrypt = geode_cbc_encrypt,
344 .decrypt = geode_cbc_decrypt,
345 .min_keysize = AES_MIN_KEY_SIZE,
346 .max_keysize = AES_MAX_KEY_SIZE,
347 .ivsize = AES_BLOCK_SIZE,
348 }, {
349 .base.cra_name = "ecb(aes)",
350 .base.cra_driver_name = "ecb-aes-geode",
351 .base.cra_priority = 400,
352 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
353 CRYPTO_ALG_NEED_FALLBACK,
354 .base.cra_blocksize = AES_BLOCK_SIZE,
355 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
356 .base.cra_alignmask = 15,
357 .base.cra_module = THIS_MODULE,
358 .init = geode_init_skcipher,
359 .exit = geode_exit_skcipher,
360 .setkey = geode_setkey_skcipher,
361 .encrypt = geode_ecb_encrypt,
362 .decrypt = geode_ecb_decrypt,
363 .min_keysize = AES_MIN_KEY_SIZE,
364 .max_keysize = AES_MAX_KEY_SIZE,
365 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366};
367
368static void geode_aes_remove(struct pci_dev *dev)
369{
370 crypto_unregister_alg(&geode_alg);
Olivier Deprez0e641232021-09-23 10:07:05 +0200371 crypto_unregister_skciphers(geode_skcipher_algs,
372 ARRAY_SIZE(geode_skcipher_algs));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000373
374 pci_iounmap(dev, _iobase);
375 _iobase = NULL;
376
377 pci_release_regions(dev);
378 pci_disable_device(dev);
379}
380
381
382static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
383{
384 int ret;
385
386 ret = pci_enable_device(dev);
387 if (ret)
388 return ret;
389
390 ret = pci_request_regions(dev, "geode-aes");
391 if (ret)
392 goto eenable;
393
394 _iobase = pci_iomap(dev, 0, 0);
395
396 if (_iobase == NULL) {
397 ret = -ENOMEM;
398 goto erequest;
399 }
400
401 spin_lock_init(&lock);
402
403 /* Clear any pending activity */
404 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
405
406 ret = crypto_register_alg(&geode_alg);
407 if (ret)
408 goto eiomap;
409
Olivier Deprez0e641232021-09-23 10:07:05 +0200410 ret = crypto_register_skciphers(geode_skcipher_algs,
411 ARRAY_SIZE(geode_skcipher_algs));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 if (ret)
413 goto ealg;
414
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415 dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
416 return 0;
417
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418 ealg:
419 crypto_unregister_alg(&geode_alg);
420
421 eiomap:
422 pci_iounmap(dev, _iobase);
423
424 erequest:
425 pci_release_regions(dev);
426
427 eenable:
428 pci_disable_device(dev);
429
430 dev_err(&dev->dev, "GEODE AES initialization failed.\n");
431 return ret;
432}
433
434static struct pci_device_id geode_aes_tbl[] = {
435 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
436 { 0, }
437};
438
439MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
440
441static struct pci_driver geode_aes_driver = {
442 .name = "Geode LX AES",
443 .id_table = geode_aes_tbl,
444 .probe = geode_aes_probe,
445 .remove = geode_aes_remove,
446};
447
448module_pci_driver(geode_aes_driver);
449
450MODULE_AUTHOR("Advanced Micro Devices, Inc.");
451MODULE_DESCRIPTION("Geode LX Hardware AES driver");
452MODULE_LICENSE("GPL");