Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Cryptographic API. |
| 4 | * |
| 5 | * s390 implementation of the AES Cipher Algorithm. |
| 6 | * |
| 7 | * s390 Version: |
| 8 | * Copyright IBM Corp. 2005, 2017 |
| 9 | * Author(s): Jan Glauber (jang@de.ibm.com) |
| 10 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback |
| 11 | * Patrick Steuer <patrick.steuer@de.ibm.com> |
| 12 | * Harald Freudenberger <freude@de.ibm.com> |
| 13 | * |
| 14 | * Derived from "crypto/aes_generic.c" |
| 15 | */ |
| 16 | |
| 17 | #define KMSG_COMPONENT "aes_s390" |
| 18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 19 | |
| 20 | #include <crypto/aes.h> |
| 21 | #include <crypto/algapi.h> |
| 22 | #include <crypto/ghash.h> |
| 23 | #include <crypto/internal/aead.h> |
| 24 | #include <crypto/internal/skcipher.h> |
| 25 | #include <crypto/scatterwalk.h> |
| 26 | #include <linux/err.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/cpufeature.h> |
| 29 | #include <linux/init.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 30 | #include <linux/mutex.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | #include <linux/fips.h> |
| 32 | #include <linux/string.h> |
| 33 | #include <crypto/xts.h> |
| 34 | #include <asm/cpacf.h> |
| 35 | |
| 36 | static u8 *ctrblk; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 37 | static DEFINE_MUTEX(ctrblk_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | |
| 39 | static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, |
| 40 | kma_functions; |
| 41 | |
| 42 | struct s390_aes_ctx { |
| 43 | u8 key[AES_MAX_KEY_SIZE]; |
| 44 | int key_len; |
| 45 | unsigned long fc; |
| 46 | union { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 47 | struct crypto_sync_skcipher *blk; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | struct crypto_cipher *cip; |
| 49 | } fallback; |
| 50 | }; |
| 51 | |
| 52 | struct s390_xts_ctx { |
| 53 | u8 key[32]; |
| 54 | u8 pcc_key[32]; |
| 55 | int key_len; |
| 56 | unsigned long fc; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 57 | struct crypto_sync_skcipher *fallback; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | }; |
| 59 | |
| 60 | struct gcm_sg_walk { |
| 61 | struct scatter_walk walk; |
| 62 | unsigned int walk_bytes; |
| 63 | u8 *walk_ptr; |
| 64 | unsigned int walk_bytes_remain; |
| 65 | u8 buf[AES_BLOCK_SIZE]; |
| 66 | unsigned int buf_bytes; |
| 67 | u8 *ptr; |
| 68 | unsigned int nbytes; |
| 69 | }; |
| 70 | |
| 71 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, |
| 72 | unsigned int key_len) |
| 73 | { |
| 74 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 75 | int ret; |
| 76 | |
| 77 | sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
| 78 | sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & |
| 79 | CRYPTO_TFM_REQ_MASK); |
| 80 | |
| 81 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); |
| 82 | if (ret) { |
| 83 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
| 84 | tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & |
| 85 | CRYPTO_TFM_RES_MASK); |
| 86 | } |
| 87 | return ret; |
| 88 | } |
| 89 | |
| 90 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 91 | unsigned int key_len) |
| 92 | { |
| 93 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 94 | unsigned long fc; |
| 95 | |
| 96 | /* Pick the correct function code based on the key length */ |
| 97 | fc = (key_len == 16) ? CPACF_KM_AES_128 : |
| 98 | (key_len == 24) ? CPACF_KM_AES_192 : |
| 99 | (key_len == 32) ? CPACF_KM_AES_256 : 0; |
| 100 | |
| 101 | /* Check if the function code is available */ |
| 102 | sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; |
| 103 | if (!sctx->fc) |
| 104 | return setkey_fallback_cip(tfm, in_key, key_len); |
| 105 | |
| 106 | sctx->key_len = key_len; |
| 107 | memcpy(sctx->key, in_key, key_len); |
| 108 | return 0; |
| 109 | } |
| 110 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 111 | static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | { |
| 113 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 114 | |
| 115 | if (unlikely(!sctx->fc)) { |
| 116 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); |
| 117 | return; |
| 118 | } |
| 119 | cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); |
| 120 | } |
| 121 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 122 | static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | { |
| 124 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 125 | |
| 126 | if (unlikely(!sctx->fc)) { |
| 127 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); |
| 128 | return; |
| 129 | } |
| 130 | cpacf_km(sctx->fc | CPACF_DECRYPT, |
| 131 | &sctx->key, out, in, AES_BLOCK_SIZE); |
| 132 | } |
| 133 | |
| 134 | static int fallback_init_cip(struct crypto_tfm *tfm) |
| 135 | { |
| 136 | const char *name = tfm->__crt_alg->cra_name; |
| 137 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 138 | |
| 139 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 140 | CRYPTO_ALG_NEED_FALLBACK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | |
| 142 | if (IS_ERR(sctx->fallback.cip)) { |
| 143 | pr_err("Allocating AES fallback algorithm %s failed\n", |
| 144 | name); |
| 145 | return PTR_ERR(sctx->fallback.cip); |
| 146 | } |
| 147 | |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | static void fallback_exit_cip(struct crypto_tfm *tfm) |
| 152 | { |
| 153 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 154 | |
| 155 | crypto_free_cipher(sctx->fallback.cip); |
| 156 | sctx->fallback.cip = NULL; |
| 157 | } |
| 158 | |
| 159 | static struct crypto_alg aes_alg = { |
| 160 | .cra_name = "aes", |
| 161 | .cra_driver_name = "aes-s390", |
| 162 | .cra_priority = 300, |
| 163 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | |
| 164 | CRYPTO_ALG_NEED_FALLBACK, |
| 165 | .cra_blocksize = AES_BLOCK_SIZE, |
| 166 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 167 | .cra_module = THIS_MODULE, |
| 168 | .cra_init = fallback_init_cip, |
| 169 | .cra_exit = fallback_exit_cip, |
| 170 | .cra_u = { |
| 171 | .cipher = { |
| 172 | .cia_min_keysize = AES_MIN_KEY_SIZE, |
| 173 | .cia_max_keysize = AES_MAX_KEY_SIZE, |
| 174 | .cia_setkey = aes_set_key, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 175 | .cia_encrypt = crypto_aes_encrypt, |
| 176 | .cia_decrypt = crypto_aes_decrypt, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | } |
| 178 | } |
| 179 | }; |
| 180 | |
| 181 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, |
| 182 | unsigned int len) |
| 183 | { |
| 184 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 185 | unsigned int ret; |
| 186 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 187 | crypto_sync_skcipher_clear_flags(sctx->fallback.blk, |
| 188 | CRYPTO_TFM_REQ_MASK); |
| 189 | crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 190 | CRYPTO_TFM_REQ_MASK); |
| 191 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 192 | ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | |
| 194 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 195 | tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) & |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 196 | CRYPTO_TFM_RES_MASK; |
| 197 | |
| 198 | return ret; |
| 199 | } |
| 200 | |
| 201 | static int fallback_blk_dec(struct blkcipher_desc *desc, |
| 202 | struct scatterlist *dst, struct scatterlist *src, |
| 203 | unsigned int nbytes) |
| 204 | { |
| 205 | unsigned int ret; |
| 206 | struct crypto_blkcipher *tfm = desc->tfm; |
| 207 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 208 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 210 | skcipher_request_set_sync_tfm(req, sctx->fallback.blk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); |
| 212 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
| 213 | |
| 214 | ret = crypto_skcipher_decrypt(req); |
| 215 | |
| 216 | skcipher_request_zero(req); |
| 217 | return ret; |
| 218 | } |
| 219 | |
| 220 | static int fallback_blk_enc(struct blkcipher_desc *desc, |
| 221 | struct scatterlist *dst, struct scatterlist *src, |
| 222 | unsigned int nbytes) |
| 223 | { |
| 224 | unsigned int ret; |
| 225 | struct crypto_blkcipher *tfm = desc->tfm; |
| 226 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 227 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 229 | skcipher_request_set_sync_tfm(req, sctx->fallback.blk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 230 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); |
| 231 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
| 232 | |
| 233 | ret = crypto_skcipher_encrypt(req); |
| 234 | return ret; |
| 235 | } |
| 236 | |
| 237 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 238 | unsigned int key_len) |
| 239 | { |
| 240 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 241 | unsigned long fc; |
| 242 | |
| 243 | /* Pick the correct function code based on the key length */ |
| 244 | fc = (key_len == 16) ? CPACF_KM_AES_128 : |
| 245 | (key_len == 24) ? CPACF_KM_AES_192 : |
| 246 | (key_len == 32) ? CPACF_KM_AES_256 : 0; |
| 247 | |
| 248 | /* Check if the function code is available */ |
| 249 | sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; |
| 250 | if (!sctx->fc) |
| 251 | return setkey_fallback_blk(tfm, in_key, key_len); |
| 252 | |
| 253 | sctx->key_len = key_len; |
| 254 | memcpy(sctx->key, in_key, key_len); |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
| 259 | struct blkcipher_walk *walk) |
| 260 | { |
| 261 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 262 | unsigned int nbytes, n; |
| 263 | int ret; |
| 264 | |
| 265 | ret = blkcipher_walk_virt(desc, walk); |
| 266 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
| 267 | /* only use complete blocks */ |
| 268 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 269 | cpacf_km(sctx->fc | modifier, sctx->key, |
| 270 | walk->dst.virt.addr, walk->src.virt.addr, n); |
| 271 | ret = blkcipher_walk_done(desc, walk, nbytes - n); |
| 272 | } |
| 273 | |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, |
| 278 | struct scatterlist *dst, struct scatterlist *src, |
| 279 | unsigned int nbytes) |
| 280 | { |
| 281 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 282 | struct blkcipher_walk walk; |
| 283 | |
| 284 | if (unlikely(!sctx->fc)) |
| 285 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 286 | |
| 287 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 288 | return ecb_aes_crypt(desc, 0, &walk); |
| 289 | } |
| 290 | |
| 291 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, |
| 292 | struct scatterlist *dst, struct scatterlist *src, |
| 293 | unsigned int nbytes) |
| 294 | { |
| 295 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 296 | struct blkcipher_walk walk; |
| 297 | |
| 298 | if (unlikely(!sctx->fc)) |
| 299 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 300 | |
| 301 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 302 | return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk); |
| 303 | } |
| 304 | |
| 305 | static int fallback_init_blk(struct crypto_tfm *tfm) |
| 306 | { |
| 307 | const char *name = tfm->__crt_alg->cra_name; |
| 308 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 309 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 310 | sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 311 | CRYPTO_ALG_NEED_FALLBACK); |
| 312 | |
| 313 | if (IS_ERR(sctx->fallback.blk)) { |
| 314 | pr_err("Allocating AES fallback algorithm %s failed\n", |
| 315 | name); |
| 316 | return PTR_ERR(sctx->fallback.blk); |
| 317 | } |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
| 322 | static void fallback_exit_blk(struct crypto_tfm *tfm) |
| 323 | { |
| 324 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 325 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 326 | crypto_free_sync_skcipher(sctx->fallback.blk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | } |
| 328 | |
| 329 | static struct crypto_alg ecb_aes_alg = { |
| 330 | .cra_name = "ecb(aes)", |
| 331 | .cra_driver_name = "ecb-aes-s390", |
| 332 | .cra_priority = 401, /* combo: aes + ecb + 1 */ |
| 333 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 334 | CRYPTO_ALG_NEED_FALLBACK, |
| 335 | .cra_blocksize = AES_BLOCK_SIZE, |
| 336 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 337 | .cra_type = &crypto_blkcipher_type, |
| 338 | .cra_module = THIS_MODULE, |
| 339 | .cra_init = fallback_init_blk, |
| 340 | .cra_exit = fallback_exit_blk, |
| 341 | .cra_u = { |
| 342 | .blkcipher = { |
| 343 | .min_keysize = AES_MIN_KEY_SIZE, |
| 344 | .max_keysize = AES_MAX_KEY_SIZE, |
| 345 | .setkey = ecb_aes_set_key, |
| 346 | .encrypt = ecb_aes_encrypt, |
| 347 | .decrypt = ecb_aes_decrypt, |
| 348 | } |
| 349 | } |
| 350 | }; |
| 351 | |
| 352 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 353 | unsigned int key_len) |
| 354 | { |
| 355 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 356 | unsigned long fc; |
| 357 | |
| 358 | /* Pick the correct function code based on the key length */ |
| 359 | fc = (key_len == 16) ? CPACF_KMC_AES_128 : |
| 360 | (key_len == 24) ? CPACF_KMC_AES_192 : |
| 361 | (key_len == 32) ? CPACF_KMC_AES_256 : 0; |
| 362 | |
| 363 | /* Check if the function code is available */ |
| 364 | sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; |
| 365 | if (!sctx->fc) |
| 366 | return setkey_fallback_blk(tfm, in_key, key_len); |
| 367 | |
| 368 | sctx->key_len = key_len; |
| 369 | memcpy(sctx->key, in_key, key_len); |
| 370 | return 0; |
| 371 | } |
| 372 | |
| 373 | static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
| 374 | struct blkcipher_walk *walk) |
| 375 | { |
| 376 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 377 | unsigned int nbytes, n; |
| 378 | int ret; |
| 379 | struct { |
| 380 | u8 iv[AES_BLOCK_SIZE]; |
| 381 | u8 key[AES_MAX_KEY_SIZE]; |
| 382 | } param; |
| 383 | |
| 384 | ret = blkcipher_walk_virt(desc, walk); |
| 385 | memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); |
| 386 | memcpy(param.key, sctx->key, sctx->key_len); |
| 387 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
| 388 | /* only use complete blocks */ |
| 389 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 390 | cpacf_kmc(sctx->fc | modifier, ¶m, |
| 391 | walk->dst.virt.addr, walk->src.virt.addr, n); |
| 392 | ret = blkcipher_walk_done(desc, walk, nbytes - n); |
| 393 | } |
| 394 | memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); |
| 395 | return ret; |
| 396 | } |
| 397 | |
| 398 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, |
| 399 | struct scatterlist *dst, struct scatterlist *src, |
| 400 | unsigned int nbytes) |
| 401 | { |
| 402 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 403 | struct blkcipher_walk walk; |
| 404 | |
| 405 | if (unlikely(!sctx->fc)) |
| 406 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 407 | |
| 408 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 409 | return cbc_aes_crypt(desc, 0, &walk); |
| 410 | } |
| 411 | |
| 412 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
| 413 | struct scatterlist *dst, struct scatterlist *src, |
| 414 | unsigned int nbytes) |
| 415 | { |
| 416 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 417 | struct blkcipher_walk walk; |
| 418 | |
| 419 | if (unlikely(!sctx->fc)) |
| 420 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 421 | |
| 422 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 423 | return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk); |
| 424 | } |
| 425 | |
| 426 | static struct crypto_alg cbc_aes_alg = { |
| 427 | .cra_name = "cbc(aes)", |
| 428 | .cra_driver_name = "cbc-aes-s390", |
| 429 | .cra_priority = 402, /* ecb-aes-s390 + 1 */ |
| 430 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 431 | CRYPTO_ALG_NEED_FALLBACK, |
| 432 | .cra_blocksize = AES_BLOCK_SIZE, |
| 433 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 434 | .cra_type = &crypto_blkcipher_type, |
| 435 | .cra_module = THIS_MODULE, |
| 436 | .cra_init = fallback_init_blk, |
| 437 | .cra_exit = fallback_exit_blk, |
| 438 | .cra_u = { |
| 439 | .blkcipher = { |
| 440 | .min_keysize = AES_MIN_KEY_SIZE, |
| 441 | .max_keysize = AES_MAX_KEY_SIZE, |
| 442 | .ivsize = AES_BLOCK_SIZE, |
| 443 | .setkey = cbc_aes_set_key, |
| 444 | .encrypt = cbc_aes_encrypt, |
| 445 | .decrypt = cbc_aes_decrypt, |
| 446 | } |
| 447 | } |
| 448 | }; |
| 449 | |
| 450 | static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, |
| 451 | unsigned int len) |
| 452 | { |
| 453 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); |
| 454 | unsigned int ret; |
| 455 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 456 | crypto_sync_skcipher_clear_flags(xts_ctx->fallback, |
| 457 | CRYPTO_TFM_REQ_MASK); |
| 458 | crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 459 | CRYPTO_TFM_REQ_MASK); |
| 460 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 461 | ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 462 | |
| 463 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 464 | tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) & |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 465 | CRYPTO_TFM_RES_MASK; |
| 466 | |
| 467 | return ret; |
| 468 | } |
| 469 | |
| 470 | static int xts_fallback_decrypt(struct blkcipher_desc *desc, |
| 471 | struct scatterlist *dst, struct scatterlist *src, |
| 472 | unsigned int nbytes) |
| 473 | { |
| 474 | struct crypto_blkcipher *tfm = desc->tfm; |
| 475 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 476 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 477 | unsigned int ret; |
| 478 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 479 | skcipher_request_set_sync_tfm(req, xts_ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 480 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); |
| 481 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
| 482 | |
| 483 | ret = crypto_skcipher_decrypt(req); |
| 484 | |
| 485 | skcipher_request_zero(req); |
| 486 | return ret; |
| 487 | } |
| 488 | |
| 489 | static int xts_fallback_encrypt(struct blkcipher_desc *desc, |
| 490 | struct scatterlist *dst, struct scatterlist *src, |
| 491 | unsigned int nbytes) |
| 492 | { |
| 493 | struct crypto_blkcipher *tfm = desc->tfm; |
| 494 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 495 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 496 | unsigned int ret; |
| 497 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 498 | skcipher_request_set_sync_tfm(req, xts_ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 499 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); |
| 500 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
| 501 | |
| 502 | ret = crypto_skcipher_encrypt(req); |
| 503 | |
| 504 | skcipher_request_zero(req); |
| 505 | return ret; |
| 506 | } |
| 507 | |
| 508 | static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 509 | unsigned int key_len) |
| 510 | { |
| 511 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); |
| 512 | unsigned long fc; |
| 513 | int err; |
| 514 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 515 | err = xts_fallback_setkey(tfm, in_key, key_len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | if (err) |
| 517 | return err; |
| 518 | |
| 519 | /* In fips mode only 128 bit or 256 bit keys are valid */ |
| 520 | if (fips_enabled && key_len != 32 && key_len != 64) { |
| 521 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 522 | return -EINVAL; |
| 523 | } |
| 524 | |
| 525 | /* Pick the correct function code based on the key length */ |
| 526 | fc = (key_len == 32) ? CPACF_KM_XTS_128 : |
| 527 | (key_len == 64) ? CPACF_KM_XTS_256 : 0; |
| 528 | |
| 529 | /* Check if the function code is available */ |
| 530 | xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; |
| 531 | if (!xts_ctx->fc) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 532 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 533 | |
| 534 | /* Split the XTS key into the two subkeys */ |
| 535 | key_len = key_len / 2; |
| 536 | xts_ctx->key_len = key_len; |
| 537 | memcpy(xts_ctx->key, in_key, key_len); |
| 538 | memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); |
| 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
| 543 | struct blkcipher_walk *walk) |
| 544 | { |
| 545 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); |
| 546 | unsigned int offset, nbytes, n; |
| 547 | int ret; |
| 548 | struct { |
| 549 | u8 key[32]; |
| 550 | u8 tweak[16]; |
| 551 | u8 block[16]; |
| 552 | u8 bit[16]; |
| 553 | u8 xts[16]; |
| 554 | } pcc_param; |
| 555 | struct { |
| 556 | u8 key[32]; |
| 557 | u8 init[16]; |
| 558 | } xts_param; |
| 559 | |
| 560 | ret = blkcipher_walk_virt(desc, walk); |
| 561 | offset = xts_ctx->key_len & 0x10; |
| 562 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
| 563 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); |
| 564 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); |
| 565 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); |
| 566 | memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); |
| 567 | cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); |
| 568 | |
| 569 | memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); |
| 570 | memcpy(xts_param.init, pcc_param.xts, 16); |
| 571 | |
| 572 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
| 573 | /* only use complete blocks */ |
| 574 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 575 | cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, |
| 576 | walk->dst.virt.addr, walk->src.virt.addr, n); |
| 577 | ret = blkcipher_walk_done(desc, walk, nbytes - n); |
| 578 | } |
| 579 | return ret; |
| 580 | } |
| 581 | |
| 582 | static int xts_aes_encrypt(struct blkcipher_desc *desc, |
| 583 | struct scatterlist *dst, struct scatterlist *src, |
| 584 | unsigned int nbytes) |
| 585 | { |
| 586 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); |
| 587 | struct blkcipher_walk walk; |
| 588 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 589 | if (!nbytes) |
| 590 | return -EINVAL; |
| 591 | |
| 592 | if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 593 | return xts_fallback_encrypt(desc, dst, src, nbytes); |
| 594 | |
| 595 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 596 | return xts_aes_crypt(desc, 0, &walk); |
| 597 | } |
| 598 | |
| 599 | static int xts_aes_decrypt(struct blkcipher_desc *desc, |
| 600 | struct scatterlist *dst, struct scatterlist *src, |
| 601 | unsigned int nbytes) |
| 602 | { |
| 603 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); |
| 604 | struct blkcipher_walk walk; |
| 605 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 606 | if (!nbytes) |
| 607 | return -EINVAL; |
| 608 | |
| 609 | if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 610 | return xts_fallback_decrypt(desc, dst, src, nbytes); |
| 611 | |
| 612 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 613 | return xts_aes_crypt(desc, CPACF_DECRYPT, &walk); |
| 614 | } |
| 615 | |
| 616 | static int xts_fallback_init(struct crypto_tfm *tfm) |
| 617 | { |
| 618 | const char *name = tfm->__crt_alg->cra_name; |
| 619 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); |
| 620 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 621 | xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 622 | CRYPTO_ALG_NEED_FALLBACK); |
| 623 | |
| 624 | if (IS_ERR(xts_ctx->fallback)) { |
| 625 | pr_err("Allocating XTS fallback algorithm %s failed\n", |
| 626 | name); |
| 627 | return PTR_ERR(xts_ctx->fallback); |
| 628 | } |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | static void xts_fallback_exit(struct crypto_tfm *tfm) |
| 633 | { |
| 634 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); |
| 635 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 636 | crypto_free_sync_skcipher(xts_ctx->fallback); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | static struct crypto_alg xts_aes_alg = { |
| 640 | .cra_name = "xts(aes)", |
| 641 | .cra_driver_name = "xts-aes-s390", |
| 642 | .cra_priority = 402, /* ecb-aes-s390 + 1 */ |
| 643 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 644 | CRYPTO_ALG_NEED_FALLBACK, |
| 645 | .cra_blocksize = AES_BLOCK_SIZE, |
| 646 | .cra_ctxsize = sizeof(struct s390_xts_ctx), |
| 647 | .cra_type = &crypto_blkcipher_type, |
| 648 | .cra_module = THIS_MODULE, |
| 649 | .cra_init = xts_fallback_init, |
| 650 | .cra_exit = xts_fallback_exit, |
| 651 | .cra_u = { |
| 652 | .blkcipher = { |
| 653 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
| 654 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
| 655 | .ivsize = AES_BLOCK_SIZE, |
| 656 | .setkey = xts_aes_set_key, |
| 657 | .encrypt = xts_aes_encrypt, |
| 658 | .decrypt = xts_aes_decrypt, |
| 659 | } |
| 660 | } |
| 661 | }; |
| 662 | |
| 663 | static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
| 664 | unsigned int key_len) |
| 665 | { |
| 666 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
| 667 | unsigned long fc; |
| 668 | |
| 669 | /* Pick the correct function code based on the key length */ |
| 670 | fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : |
| 671 | (key_len == 24) ? CPACF_KMCTR_AES_192 : |
| 672 | (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; |
| 673 | |
| 674 | /* Check if the function code is available */ |
| 675 | sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; |
| 676 | if (!sctx->fc) |
| 677 | return setkey_fallback_blk(tfm, in_key, key_len); |
| 678 | |
| 679 | sctx->key_len = key_len; |
| 680 | memcpy(sctx->key, in_key, key_len); |
| 681 | return 0; |
| 682 | } |
| 683 | |
| 684 | static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) |
| 685 | { |
| 686 | unsigned int i, n; |
| 687 | |
| 688 | /* only use complete blocks, max. PAGE_SIZE */ |
| 689 | memcpy(ctrptr, iv, AES_BLOCK_SIZE); |
| 690 | n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); |
| 691 | for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { |
| 692 | memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); |
| 693 | crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); |
| 694 | ctrptr += AES_BLOCK_SIZE; |
| 695 | } |
| 696 | return n; |
| 697 | } |
| 698 | |
| 699 | static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
| 700 | struct blkcipher_walk *walk) |
| 701 | { |
| 702 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 703 | u8 buf[AES_BLOCK_SIZE], *ctrptr; |
| 704 | unsigned int n, nbytes; |
| 705 | int ret, locked; |
| 706 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 707 | locked = mutex_trylock(&ctrblk_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 708 | |
| 709 | ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); |
| 710 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
| 711 | n = AES_BLOCK_SIZE; |
| 712 | if (nbytes >= 2*AES_BLOCK_SIZE && locked) |
| 713 | n = __ctrblk_init(ctrblk, walk->iv, nbytes); |
| 714 | ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; |
| 715 | cpacf_kmctr(sctx->fc | modifier, sctx->key, |
| 716 | walk->dst.virt.addr, walk->src.virt.addr, |
| 717 | n, ctrptr); |
| 718 | if (ctrptr == ctrblk) |
| 719 | memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE, |
| 720 | AES_BLOCK_SIZE); |
| 721 | crypto_inc(walk->iv, AES_BLOCK_SIZE); |
| 722 | ret = blkcipher_walk_done(desc, walk, nbytes - n); |
| 723 | } |
| 724 | if (locked) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 725 | mutex_unlock(&ctrblk_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 726 | /* |
| 727 | * final block may be < AES_BLOCK_SIZE, copy only nbytes |
| 728 | */ |
| 729 | if (nbytes) { |
| 730 | cpacf_kmctr(sctx->fc | modifier, sctx->key, |
| 731 | buf, walk->src.virt.addr, |
| 732 | AES_BLOCK_SIZE, walk->iv); |
| 733 | memcpy(walk->dst.virt.addr, buf, nbytes); |
| 734 | crypto_inc(walk->iv, AES_BLOCK_SIZE); |
| 735 | ret = blkcipher_walk_done(desc, walk, 0); |
| 736 | } |
| 737 | |
| 738 | return ret; |
| 739 | } |
| 740 | |
| 741 | static int ctr_aes_encrypt(struct blkcipher_desc *desc, |
| 742 | struct scatterlist *dst, struct scatterlist *src, |
| 743 | unsigned int nbytes) |
| 744 | { |
| 745 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 746 | struct blkcipher_walk walk; |
| 747 | |
| 748 | if (unlikely(!sctx->fc)) |
| 749 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 750 | |
| 751 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 752 | return ctr_aes_crypt(desc, 0, &walk); |
| 753 | } |
| 754 | |
| 755 | static int ctr_aes_decrypt(struct blkcipher_desc *desc, |
| 756 | struct scatterlist *dst, struct scatterlist *src, |
| 757 | unsigned int nbytes) |
| 758 | { |
| 759 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
| 760 | struct blkcipher_walk walk; |
| 761 | |
| 762 | if (unlikely(!sctx->fc)) |
| 763 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 764 | |
| 765 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 766 | return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk); |
| 767 | } |
| 768 | |
| 769 | static struct crypto_alg ctr_aes_alg = { |
| 770 | .cra_name = "ctr(aes)", |
| 771 | .cra_driver_name = "ctr-aes-s390", |
| 772 | .cra_priority = 402, /* ecb-aes-s390 + 1 */ |
| 773 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
| 774 | CRYPTO_ALG_NEED_FALLBACK, |
| 775 | .cra_blocksize = 1, |
| 776 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 777 | .cra_type = &crypto_blkcipher_type, |
| 778 | .cra_module = THIS_MODULE, |
| 779 | .cra_init = fallback_init_blk, |
| 780 | .cra_exit = fallback_exit_blk, |
| 781 | .cra_u = { |
| 782 | .blkcipher = { |
| 783 | .min_keysize = AES_MIN_KEY_SIZE, |
| 784 | .max_keysize = AES_MAX_KEY_SIZE, |
| 785 | .ivsize = AES_BLOCK_SIZE, |
| 786 | .setkey = ctr_aes_set_key, |
| 787 | .encrypt = ctr_aes_encrypt, |
| 788 | .decrypt = ctr_aes_decrypt, |
| 789 | } |
| 790 | } |
| 791 | }; |
| 792 | |
| 793 | static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, |
| 794 | unsigned int keylen) |
| 795 | { |
| 796 | struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); |
| 797 | |
| 798 | switch (keylen) { |
| 799 | case AES_KEYSIZE_128: |
| 800 | ctx->fc = CPACF_KMA_GCM_AES_128; |
| 801 | break; |
| 802 | case AES_KEYSIZE_192: |
| 803 | ctx->fc = CPACF_KMA_GCM_AES_192; |
| 804 | break; |
| 805 | case AES_KEYSIZE_256: |
| 806 | ctx->fc = CPACF_KMA_GCM_AES_256; |
| 807 | break; |
| 808 | default: |
| 809 | return -EINVAL; |
| 810 | } |
| 811 | |
| 812 | memcpy(ctx->key, key, keylen); |
| 813 | ctx->key_len = keylen; |
| 814 | return 0; |
| 815 | } |
| 816 | |
| 817 | static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
| 818 | { |
| 819 | switch (authsize) { |
| 820 | case 4: |
| 821 | case 8: |
| 822 | case 12: |
| 823 | case 13: |
| 824 | case 14: |
| 825 | case 15: |
| 826 | case 16: |
| 827 | break; |
| 828 | default: |
| 829 | return -EINVAL; |
| 830 | } |
| 831 | |
| 832 | return 0; |
| 833 | } |
| 834 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 835 | static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, |
| 836 | unsigned int len) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 837 | { |
| 838 | memset(gw, 0, sizeof(*gw)); |
| 839 | gw->walk_bytes_remain = len; |
| 840 | scatterwalk_start(&gw->walk, sg); |
| 841 | } |
| 842 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 843 | static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) |
| 844 | { |
| 845 | struct scatterlist *nextsg; |
| 846 | |
| 847 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); |
| 848 | while (!gw->walk_bytes) { |
| 849 | nextsg = sg_next(gw->walk.sg); |
| 850 | if (!nextsg) |
| 851 | return 0; |
| 852 | scatterwalk_start(&gw->walk, nextsg); |
| 853 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, |
| 854 | gw->walk_bytes_remain); |
| 855 | } |
| 856 | gw->walk_ptr = scatterwalk_map(&gw->walk); |
| 857 | return gw->walk_bytes; |
| 858 | } |
| 859 | |
| 860 | static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, |
| 861 | unsigned int nbytes) |
| 862 | { |
| 863 | gw->walk_bytes_remain -= nbytes; |
| 864 | scatterwalk_unmap(&gw->walk); |
| 865 | scatterwalk_advance(&gw->walk, nbytes); |
| 866 | scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); |
| 867 | gw->walk_ptr = NULL; |
| 868 | } |
| 869 | |
| 870 | static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 871 | { |
| 872 | int n; |
| 873 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 874 | if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { |
| 875 | gw->ptr = gw->buf; |
| 876 | gw->nbytes = gw->buf_bytes; |
| 877 | goto out; |
| 878 | } |
| 879 | |
| 880 | if (gw->walk_bytes_remain == 0) { |
| 881 | gw->ptr = NULL; |
| 882 | gw->nbytes = 0; |
| 883 | goto out; |
| 884 | } |
| 885 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 886 | if (!_gcm_sg_clamp_and_map(gw)) { |
| 887 | gw->ptr = NULL; |
| 888 | gw->nbytes = 0; |
| 889 | goto out; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 890 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 891 | |
| 892 | if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { |
| 893 | gw->ptr = gw->walk_ptr; |
| 894 | gw->nbytes = gw->walk_bytes; |
| 895 | goto out; |
| 896 | } |
| 897 | |
| 898 | while (1) { |
| 899 | n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); |
| 900 | memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); |
| 901 | gw->buf_bytes += n; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 902 | _gcm_sg_unmap_and_advance(gw, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 903 | if (gw->buf_bytes >= minbytesneeded) { |
| 904 | gw->ptr = gw->buf; |
| 905 | gw->nbytes = gw->buf_bytes; |
| 906 | goto out; |
| 907 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 908 | if (!_gcm_sg_clamp_and_map(gw)) { |
| 909 | gw->ptr = NULL; |
| 910 | gw->nbytes = 0; |
| 911 | goto out; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 912 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 913 | } |
| 914 | |
| 915 | out: |
| 916 | return gw->nbytes; |
| 917 | } |
| 918 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 919 | static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 920 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 921 | if (gw->walk_bytes_remain == 0) { |
| 922 | gw->ptr = NULL; |
| 923 | gw->nbytes = 0; |
| 924 | goto out; |
| 925 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 926 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 927 | if (!_gcm_sg_clamp_and_map(gw)) { |
| 928 | gw->ptr = NULL; |
| 929 | gw->nbytes = 0; |
| 930 | goto out; |
| 931 | } |
| 932 | |
| 933 | if (gw->walk_bytes >= minbytesneeded) { |
| 934 | gw->ptr = gw->walk_ptr; |
| 935 | gw->nbytes = gw->walk_bytes; |
| 936 | goto out; |
| 937 | } |
| 938 | |
| 939 | scatterwalk_unmap(&gw->walk); |
| 940 | gw->walk_ptr = NULL; |
| 941 | |
| 942 | gw->ptr = gw->buf; |
| 943 | gw->nbytes = sizeof(gw->buf); |
| 944 | |
| 945 | out: |
| 946 | return gw->nbytes; |
| 947 | } |
| 948 | |
| 949 | static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) |
| 950 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 951 | if (gw->ptr == NULL) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 952 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 953 | |
| 954 | if (gw->ptr == gw->buf) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 955 | int n = gw->buf_bytes - bytesdone; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 956 | if (n > 0) { |
| 957 | memmove(gw->buf, gw->buf + bytesdone, n); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 958 | gw->buf_bytes = n; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 959 | } else |
| 960 | gw->buf_bytes = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 961 | } else |
| 962 | _gcm_sg_unmap_and_advance(gw, bytesdone); |
| 963 | |
| 964 | return bytesdone; |
| 965 | } |
| 966 | |
| 967 | static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) |
| 968 | { |
| 969 | int i, n; |
| 970 | |
| 971 | if (gw->ptr == NULL) |
| 972 | return 0; |
| 973 | |
| 974 | if (gw->ptr == gw->buf) { |
| 975 | for (i = 0; i < bytesdone; i += n) { |
| 976 | if (!_gcm_sg_clamp_and_map(gw)) |
| 977 | return i; |
| 978 | n = min(gw->walk_bytes, bytesdone - i); |
| 979 | memcpy(gw->walk_ptr, gw->buf + i, n); |
| 980 | _gcm_sg_unmap_and_advance(gw, n); |
| 981 | } |
| 982 | } else |
| 983 | _gcm_sg_unmap_and_advance(gw, bytesdone); |
| 984 | |
| 985 | return bytesdone; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 986 | } |
| 987 | |
| 988 | static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) |
| 989 | { |
| 990 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 991 | struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); |
| 992 | unsigned int ivsize = crypto_aead_ivsize(tfm); |
| 993 | unsigned int taglen = crypto_aead_authsize(tfm); |
| 994 | unsigned int aadlen = req->assoclen; |
| 995 | unsigned int pclen = req->cryptlen; |
| 996 | int ret = 0; |
| 997 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 998 | unsigned int n, len, in_bytes, out_bytes, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 999 | min_bytes, bytes, aad_bytes, pc_bytes; |
| 1000 | struct gcm_sg_walk gw_in, gw_out; |
| 1001 | u8 tag[GHASH_DIGEST_SIZE]; |
| 1002 | |
| 1003 | struct { |
| 1004 | u32 _[3]; /* reserved */ |
| 1005 | u32 cv; /* Counter Value */ |
| 1006 | u8 t[GHASH_DIGEST_SIZE];/* Tag */ |
| 1007 | u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ |
| 1008 | u64 taadl; /* Total AAD Length */ |
| 1009 | u64 tpcl; /* Total Plain-/Cipher-text Length */ |
| 1010 | u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ |
| 1011 | u8 k[AES_MAX_KEY_SIZE]; /* Key */ |
| 1012 | } param; |
| 1013 | |
| 1014 | /* |
| 1015 | * encrypt |
| 1016 | * req->src: aad||plaintext |
| 1017 | * req->dst: aad||ciphertext||tag |
| 1018 | * decrypt |
| 1019 | * req->src: aad||ciphertext||tag |
| 1020 | * req->dst: aad||plaintext, return 0 or -EBADMSG |
| 1021 | * aad, plaintext and ciphertext may be empty. |
| 1022 | */ |
| 1023 | if (flags & CPACF_DECRYPT) |
| 1024 | pclen -= taglen; |
| 1025 | len = aadlen + pclen; |
| 1026 | |
| 1027 | memset(¶m, 0, sizeof(param)); |
| 1028 | param.cv = 1; |
| 1029 | param.taadl = aadlen * 8; |
| 1030 | param.tpcl = pclen * 8; |
| 1031 | memcpy(param.j0, req->iv, ivsize); |
| 1032 | *(u32 *)(param.j0 + ivsize) = 1; |
| 1033 | memcpy(param.k, ctx->key, ctx->key_len); |
| 1034 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1035 | gcm_walk_start(&gw_in, req->src, len); |
| 1036 | gcm_walk_start(&gw_out, req->dst, len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1037 | |
| 1038 | do { |
| 1039 | min_bytes = min_t(unsigned int, |
| 1040 | aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1041 | in_bytes = gcm_in_walk_go(&gw_in, min_bytes); |
| 1042 | out_bytes = gcm_out_walk_go(&gw_out, min_bytes); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1043 | bytes = min(in_bytes, out_bytes); |
| 1044 | |
| 1045 | if (aadlen + pclen <= bytes) { |
| 1046 | aad_bytes = aadlen; |
| 1047 | pc_bytes = pclen; |
| 1048 | flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; |
| 1049 | } else { |
| 1050 | if (aadlen <= bytes) { |
| 1051 | aad_bytes = aadlen; |
| 1052 | pc_bytes = (bytes - aadlen) & |
| 1053 | ~(AES_BLOCK_SIZE - 1); |
| 1054 | flags |= CPACF_KMA_LAAD; |
| 1055 | } else { |
| 1056 | aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); |
| 1057 | pc_bytes = 0; |
| 1058 | } |
| 1059 | } |
| 1060 | |
| 1061 | if (aad_bytes > 0) |
| 1062 | memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); |
| 1063 | |
| 1064 | cpacf_kma(ctx->fc | flags, ¶m, |
| 1065 | gw_out.ptr + aad_bytes, |
| 1066 | gw_in.ptr + aad_bytes, pc_bytes, |
| 1067 | gw_in.ptr, aad_bytes); |
| 1068 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1069 | n = aad_bytes + pc_bytes; |
| 1070 | if (gcm_in_walk_done(&gw_in, n) != n) |
| 1071 | return -ENOMEM; |
| 1072 | if (gcm_out_walk_done(&gw_out, n) != n) |
| 1073 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1074 | aadlen -= aad_bytes; |
| 1075 | pclen -= pc_bytes; |
| 1076 | } while (aadlen + pclen > 0); |
| 1077 | |
| 1078 | if (flags & CPACF_DECRYPT) { |
| 1079 | scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); |
| 1080 | if (crypto_memneq(tag, param.t, taglen)) |
| 1081 | ret = -EBADMSG; |
| 1082 | } else |
| 1083 | scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); |
| 1084 | |
| 1085 | memzero_explicit(¶m, sizeof(param)); |
| 1086 | return ret; |
| 1087 | } |
| 1088 | |
| 1089 | static int gcm_aes_encrypt(struct aead_request *req) |
| 1090 | { |
| 1091 | return gcm_aes_crypt(req, CPACF_ENCRYPT); |
| 1092 | } |
| 1093 | |
| 1094 | static int gcm_aes_decrypt(struct aead_request *req) |
| 1095 | { |
| 1096 | return gcm_aes_crypt(req, CPACF_DECRYPT); |
| 1097 | } |
| 1098 | |
| 1099 | static struct aead_alg gcm_aes_aead = { |
| 1100 | .setkey = gcm_aes_setkey, |
| 1101 | .setauthsize = gcm_aes_setauthsize, |
| 1102 | .encrypt = gcm_aes_encrypt, |
| 1103 | .decrypt = gcm_aes_decrypt, |
| 1104 | |
| 1105 | .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), |
| 1106 | .maxauthsize = GHASH_DIGEST_SIZE, |
| 1107 | .chunksize = AES_BLOCK_SIZE, |
| 1108 | |
| 1109 | .base = { |
| 1110 | .cra_blocksize = 1, |
| 1111 | .cra_ctxsize = sizeof(struct s390_aes_ctx), |
| 1112 | .cra_priority = 900, |
| 1113 | .cra_name = "gcm(aes)", |
| 1114 | .cra_driver_name = "gcm-aes-s390", |
| 1115 | .cra_module = THIS_MODULE, |
| 1116 | }, |
| 1117 | }; |
| 1118 | |
| 1119 | static struct crypto_alg *aes_s390_algs_ptr[5]; |
| 1120 | static int aes_s390_algs_num; |
| 1121 | static struct aead_alg *aes_s390_aead_alg; |
| 1122 | |
| 1123 | static int aes_s390_register_alg(struct crypto_alg *alg) |
| 1124 | { |
| 1125 | int ret; |
| 1126 | |
| 1127 | ret = crypto_register_alg(alg); |
| 1128 | if (!ret) |
| 1129 | aes_s390_algs_ptr[aes_s390_algs_num++] = alg; |
| 1130 | return ret; |
| 1131 | } |
| 1132 | |
| 1133 | static void aes_s390_fini(void) |
| 1134 | { |
| 1135 | while (aes_s390_algs_num--) |
| 1136 | crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]); |
| 1137 | if (ctrblk) |
| 1138 | free_page((unsigned long) ctrblk); |
| 1139 | |
| 1140 | if (aes_s390_aead_alg) |
| 1141 | crypto_unregister_aead(aes_s390_aead_alg); |
| 1142 | } |
| 1143 | |
| 1144 | static int __init aes_s390_init(void) |
| 1145 | { |
| 1146 | int ret; |
| 1147 | |
| 1148 | /* Query available functions for KM, KMC, KMCTR and KMA */ |
| 1149 | cpacf_query(CPACF_KM, &km_functions); |
| 1150 | cpacf_query(CPACF_KMC, &kmc_functions); |
| 1151 | cpacf_query(CPACF_KMCTR, &kmctr_functions); |
| 1152 | cpacf_query(CPACF_KMA, &kma_functions); |
| 1153 | |
| 1154 | if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || |
| 1155 | cpacf_test_func(&km_functions, CPACF_KM_AES_192) || |
| 1156 | cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { |
| 1157 | ret = aes_s390_register_alg(&aes_alg); |
| 1158 | if (ret) |
| 1159 | goto out_err; |
| 1160 | ret = aes_s390_register_alg(&ecb_aes_alg); |
| 1161 | if (ret) |
| 1162 | goto out_err; |
| 1163 | } |
| 1164 | |
| 1165 | if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || |
| 1166 | cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || |
| 1167 | cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { |
| 1168 | ret = aes_s390_register_alg(&cbc_aes_alg); |
| 1169 | if (ret) |
| 1170 | goto out_err; |
| 1171 | } |
| 1172 | |
| 1173 | if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || |
| 1174 | cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { |
| 1175 | ret = aes_s390_register_alg(&xts_aes_alg); |
| 1176 | if (ret) |
| 1177 | goto out_err; |
| 1178 | } |
| 1179 | |
| 1180 | if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || |
| 1181 | cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || |
| 1182 | cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { |
| 1183 | ctrblk = (u8 *) __get_free_page(GFP_KERNEL); |
| 1184 | if (!ctrblk) { |
| 1185 | ret = -ENOMEM; |
| 1186 | goto out_err; |
| 1187 | } |
| 1188 | ret = aes_s390_register_alg(&ctr_aes_alg); |
| 1189 | if (ret) |
| 1190 | goto out_err; |
| 1191 | } |
| 1192 | |
| 1193 | if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || |
| 1194 | cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || |
| 1195 | cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { |
| 1196 | ret = crypto_register_aead(&gcm_aes_aead); |
| 1197 | if (ret) |
| 1198 | goto out_err; |
| 1199 | aes_s390_aead_alg = &gcm_aes_aead; |
| 1200 | } |
| 1201 | |
| 1202 | return 0; |
| 1203 | out_err: |
| 1204 | aes_s390_fini(); |
| 1205 | return ret; |
| 1206 | } |
| 1207 | |
| 1208 | module_cpu_feature_match(MSA, aes_s390_init); |
| 1209 | module_exit(aes_s390_fini); |
| 1210 | |
| 1211 | MODULE_ALIAS_CRYPTO("aes-all"); |
| 1212 | |
| 1213 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); |
| 1214 | MODULE_LICENSE("GPL"); |