Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0481223..eb9782f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -49,7 +49,7 @@
struct bio *bio_out;
struct bvec_iter iter_in;
struct bvec_iter iter_out;
- sector_t cc_sector;
+ u64 cc_sector;
atomic_t cc_pending;
union {
struct skcipher_request *req;
@@ -81,7 +81,7 @@
struct convert_context *ctx;
struct scatterlist sg_in[4];
struct scatterlist sg_out[4];
- sector_t iv_sector;
+ u64 iv_sector;
};
struct crypt_config;
@@ -98,11 +98,6 @@
struct dm_crypt_request *dmreq);
};
-struct iv_essiv_private {
- struct crypto_shash *hash_tfm;
- u8 *salt;
-};
-
struct iv_benbi_private {
int shift;
};
@@ -148,25 +143,21 @@
struct task_struct *write_thread;
struct rb_root write_tree;
- char *cipher;
char *cipher_string;
char *cipher_auth;
char *key_string;
const struct crypt_iv_operations *iv_gen_ops;
union {
- struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
} iv_gen_private;
- sector_t iv_offset;
+ u64 iv_offset;
unsigned int iv_size;
unsigned short int sector_size;
unsigned char sector_shift;
- /* ESSIV: struct crypto_cipher *essiv_tfm */
- void *iv_private;
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
@@ -291,8 +282,9 @@
* Note that this encryption scheme is vulnerable to watermarking attacks
* and should be used for old compatible containers access only.
*
- * plumb: unimplemented, see:
- * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
+ * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
+ * The IV is encrypted little-endian byte-offset (with the same key
+ * and cipher as the volume).
*/
static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
@@ -323,158 +315,15 @@
return 0;
}
-/* Initialise ESSIV - compute salt but no local memory allocations */
-static int crypt_iv_essiv_init(struct crypt_config *cc)
-{
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
- struct crypto_cipher *essiv_tfm;
- int err;
-
- desc->tfm = essiv->hash_tfm;
- desc->flags = 0;
-
- err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
- shash_desc_zero(desc);
- if (err)
- return err;
-
- essiv_tfm = cc->iv_private;
-
- err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
- crypto_shash_digestsize(essiv->hash_tfm));
- if (err)
- return err;
-
- return 0;
-}
-
-/* Wipe salt and reset key derived from volume key */
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
-{
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
- struct crypto_cipher *essiv_tfm;
- int r, err = 0;
-
- memset(essiv->salt, 0, salt_size);
-
- essiv_tfm = cc->iv_private;
- r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
- if (r)
- err = r;
-
- return err;
-}
-
-/* Allocate the cipher for ESSIV */
-static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
- struct dm_target *ti,
- const u8 *salt,
- unsigned int saltsize)
-{
- struct crypto_cipher *essiv_tfm;
- int err;
-
- /* Setup the essiv_tfm with the given salt */
- essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(essiv_tfm)) {
- ti->error = "Error allocating crypto tfm for ESSIV";
- return essiv_tfm;
- }
-
- if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
- ti->error = "Block size of ESSIV cipher does "
- "not match IV size of block cipher";
- crypto_free_cipher(essiv_tfm);
- return ERR_PTR(-EINVAL);
- }
-
- err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
- if (err) {
- ti->error = "Failed to set key for ESSIV cipher";
- crypto_free_cipher(essiv_tfm);
- return ERR_PTR(err);
- }
-
- return essiv_tfm;
-}
-
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
-{
- struct crypto_cipher *essiv_tfm;
- struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-
- crypto_free_shash(essiv->hash_tfm);
- essiv->hash_tfm = NULL;
-
- kzfree(essiv->salt);
- essiv->salt = NULL;
-
- essiv_tfm = cc->iv_private;
-
- if (essiv_tfm)
- crypto_free_cipher(essiv_tfm);
-
- cc->iv_private = NULL;
-}
-
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
- const char *opts)
-{
- struct crypto_cipher *essiv_tfm = NULL;
- struct crypto_shash *hash_tfm = NULL;
- u8 *salt = NULL;
- int err;
-
- if (!opts) {
- ti->error = "Digest algorithm missing for ESSIV mode";
- return -EINVAL;
- }
-
- /* Allocate hash algorithm */
- hash_tfm = crypto_alloc_shash(opts, 0, 0);
- if (IS_ERR(hash_tfm)) {
- ti->error = "Error initializing ESSIV hash";
- err = PTR_ERR(hash_tfm);
- goto bad;
- }
-
- salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
- if (!salt) {
- ti->error = "Error kmallocing salt storage in ESSIV";
- err = -ENOMEM;
- goto bad;
- }
-
- cc->iv_gen_private.essiv.salt = salt;
- cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
-
- essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
- crypto_shash_digestsize(hash_tfm));
- if (IS_ERR(essiv_tfm)) {
- crypt_iv_essiv_dtr(cc);
- return PTR_ERR(essiv_tfm);
- }
- cc->iv_private = essiv_tfm;
-
- return 0;
-
-bad:
- if (hash_tfm && !IS_ERR(hash_tfm))
- crypto_free_shash(hash_tfm);
- kfree(salt);
- return err;
-}
-
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
- struct crypto_cipher *essiv_tfm = cc->iv_private;
-
+ /*
+ * ESSIV encryption of the IV is now handled by the crypto API,
+ * so just pass the plain sector number here.
+ */
memset(iv, 0, cc->iv_size);
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
- crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
return 0;
}
@@ -606,7 +455,6 @@
int i, r;
desc->tfm = lmk->hash_tfm;
- desc->flags = 0;
r = crypto_shash_init(desc);
if (r)
@@ -768,7 +616,6 @@
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
- desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
@@ -844,6 +691,49 @@
return 0;
}
+static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
+ ti->error = "AEAD transforms not supported for EBOIV";
+ return -EINVAL;
+ }
+
+ if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
+ ti->error = "Block size of EBOIV cipher does "
+ "not match IV size of block cipher";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
+ struct skcipher_request *req;
+ struct scatterlist src, dst;
+ struct crypto_wait wait;
+ int err;
+
+ req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+
+ memset(buf, 0, cc->iv_size);
+ *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
+
+ sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
+ sg_init_one(&dst, iv, cc->iv_size);
+ skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ skcipher_request_free(req);
+
+ return err;
+}
+
static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -857,10 +747,6 @@
};
static const struct crypt_iv_operations crypt_iv_essiv_ops = {
- .ctr = crypt_iv_essiv_ctr,
- .dtr = crypt_iv_essiv_dtr,
- .init = crypt_iv_essiv_init,
- .wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
@@ -896,6 +782,11 @@
.generator = crypt_iv_random_gen
};
+static struct crypt_iv_operations crypt_iv_eboiv_ops = {
+ .ctr = crypt_iv_eboiv_ctr,
+ .generator = crypt_iv_eboiv_gen
+};
+
/*
* Integrity extensions
*/
@@ -932,7 +823,7 @@
if (IS_ERR(bip))
return PTR_ERR(bip);
- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
+ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
bip->bip_iter.bi_size = tag_len;
bip->bip_iter.bi_sector = io->cc->start + io->sector;
@@ -949,6 +840,7 @@
{
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+ struct mapped_device *md = dm_table_get_md(ti->table);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -968,7 +860,7 @@
if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
cc->integrity_tag_size, cc->integrity_iv_size);
if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -976,7 +868,7 @@
return -EINVAL;
}
} else if (cc->integrity_iv_size)
- DMINFO("Additional per-sector space %u bytes for IV.",
+ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
@@ -1034,11 +926,11 @@
return iv_of_dmreq(cc, dmreq) + cc->iv_size;
}
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
- return (uint64_t*) ptr;
+ return (__le64 *) ptr;
}
static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
@@ -1074,7 +966,7 @@
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv, *tag;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
@@ -1146,9 +1038,11 @@
r = crypto_aead_decrypt(req);
}
- if (r == -EBADMSG)
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ if (r == -EBADMSG) {
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*sector));
+ }
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
@@ -1169,7 +1063,7 @@
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
/* Reject unexpected unaligned bio. */
@@ -1445,10 +1339,10 @@
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
{
- unsigned int i;
struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bv, clone, i) {
+ bio_for_each_segment_all(bv, clone, iter_all) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, &cc->page_pool);
}
@@ -1791,7 +1685,8 @@
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
@@ -1885,6 +1780,13 @@
}
}
+ /*
+ * dm-crypt performance can vary greatly depending on which crypto
+ * algorithm implementation is used. Help people debug performance
+ * problems by logging the ->cra_driver_name.
+ */
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
+ crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
return 0;
}
@@ -1903,6 +1805,8 @@
return err;
}
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
+ crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
return 0;
}
@@ -2148,6 +2052,14 @@
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
get_random_bytes(&cc->key, cc->key_size);
+
+ /* Wipe IV private keys */
+ if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
+ r = cc->iv_gen_ops->wipe(cc);
+ if (r)
+ return r;
+ }
+
kzfree(cc->key_string);
cc->key_string = NULL;
r = crypt_setkey(cc);
@@ -2158,7 +2070,7 @@
static void crypt_calculate_pages_per_client(void)
{
- unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
+ unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
if (!dm_crypt_clients_n)
return;
@@ -2227,7 +2139,6 @@
if (cc->dev)
dm_put_device(ti, cc->dev);
- kzfree(cc->cipher);
kzfree(cc->cipher_string);
kzfree(cc->key_string);
kzfree(cc->cipher_auth);
@@ -2278,6 +2189,8 @@
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else if (strcmp(ivmode, "null") == 0)
cc->iv_gen_ops = &crypt_iv_null_ops;
+ else if (strcmp(ivmode, "eboiv") == 0)
+ cc->iv_gen_ops = &crypt_iv_eboiv_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
/*
@@ -2307,52 +2220,6 @@
}
/*
- * Workaround to parse cipher algorithm from crypto API spec.
- * The cc->cipher is currently used only in ESSIV.
- * This should be probably done by crypto-api calls (once available...)
- */
-static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
-{
- const char *alg_name = NULL;
- char *start, *end;
-
- if (crypt_integrity_aead(cc)) {
- alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
- if (!alg_name)
- return -EINVAL;
- if (crypt_integrity_hmac(cc)) {
- alg_name = strchr(alg_name, ',');
- if (!alg_name)
- return -EINVAL;
- }
- alg_name++;
- } else {
- alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
- if (!alg_name)
- return -EINVAL;
- }
-
- start = strchr(alg_name, '(');
- end = strchr(alg_name, ')');
-
- if (!start && !end) {
- cc->cipher = kstrdup(alg_name, GFP_KERNEL);
- return cc->cipher ? 0 : -ENOMEM;
- }
-
- if (!start || !end || ++start >= end)
- return -EINVAL;
-
- cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
- if (!cc->cipher)
- return -ENOMEM;
-
- strncpy(cc->cipher, start, end - start);
-
- return 0;
-}
-
-/*
* Workaround to parse HMAC algorithm from AEAD crypto API spec.
* The HMAC is needed to calculate tag size (HMAC digest size).
* This should be probably done by crypto-api calls (once available...)
@@ -2395,7 +2262,7 @@
char **ivmode, char **ivopts)
{
struct crypt_config *cc = ti->private;
- char *tmp, *cipher_api;
+ char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
int ret = -EINVAL;
cc->tfms_count = 1;
@@ -2405,13 +2272,48 @@
* capi:cipher_api_spec-iv:ivopts
*/
tmp = &cipher_in[strlen("capi:")];
- cipher_api = strsep(&tmp, "-");
- *ivmode = strsep(&tmp, ":");
- *ivopts = tmp;
+
+ /* Separate IV options if present, it can contain another '-' in hash name */
+ *ivopts = strrchr(tmp, ':');
+ if (*ivopts) {
+ **ivopts = '\0';
+ (*ivopts)++;
+ }
+ /* Parse IV mode */
+ *ivmode = strrchr(tmp, '-');
+ if (*ivmode) {
+ **ivmode = '\0';
+ (*ivmode)++;
+ }
+ /* The rest is crypto API spec */
+ cipher_api = tmp;
+
+ /* Alloc AEAD, can be used only in new format. */
+ if (crypt_integrity_aead(cc)) {
+ ret = crypt_ctr_auth_cipher(cc, cipher_api);
+ if (ret < 0) {
+ ti->error = "Invalid AEAD cipher spec";
+ return -ENOMEM;
+ }
+ }
if (*ivmode && !strcmp(*ivmode, "lmk"))
cc->tfms_count = 64;
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
+ if (!*ivopts) {
+ ti->error = "Digest algorithm missing for ESSIV mode";
+ return -EINVAL;
+ }
+ ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
+ cipher_api, *ivopts);
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
+ ti->error = "Cannot allocate cipher string";
+ return -ENOMEM;
+ }
+ cipher_api = buf;
+ }
+
cc->key_parts = cc->tfms_count;
/* Allocate cipher */
@@ -2421,23 +2323,11 @@
return ret;
}
- /* Alloc AEAD, can be used only in new format. */
- if (crypt_integrity_aead(cc)) {
- ret = crypt_ctr_auth_cipher(cc, cipher_api);
- if (ret < 0) {
- ti->error = "Invalid AEAD cipher spec";
- return -ENOMEM;
- }
+ if (crypt_integrity_aead(cc))
cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
- } else
+ else
cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
- ret = crypt_ctr_blkdev_cipher(cc);
- if (ret < 0) {
- ti->error = "Cannot allocate cipher string";
- return -ENOMEM;
- }
-
return 0;
}
@@ -2472,16 +2362,9 @@
}
cc->key_parts = cc->tfms_count;
- cc->cipher = kstrdup(cipher, GFP_KERNEL);
- if (!cc->cipher)
- goto bad_mem;
-
chainmode = strsep(&tmp, "-");
- *ivopts = strsep(&tmp, "-");
- *ivmode = strsep(&*ivopts, ":");
-
- if (tmp)
- DMWARN("Ignoring unexpected additional cipher options");
+ *ivmode = strsep(&tmp, ":");
+ *ivopts = tmp;
/*
* For compatibility with the original dm-crypt mapping format, if
@@ -2501,9 +2384,19 @@
if (!cipher_api)
goto bad_mem;
- ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", chainmode, cipher);
- if (ret < 0) {
+ if (*ivmode && !strcmp(*ivmode, "essiv")) {
+ if (!*ivopts) {
+ ti->error = "Digest algorithm missing for ESSIV mode";
+ kfree(cipher_api);
+ return -EINVAL;
+ }
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+ "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
+ } else {
+ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", chainmode, cipher);
+ }
+ if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
kfree(cipher_api);
goto bad_mem;
}
@@ -2661,6 +2554,7 @@
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
+ const char *devname = dm_table_device_name(ti->table);
int key_size;
unsigned int align_mask;
unsigned long long tmpll;
@@ -2679,7 +2573,7 @@
return -EINVAL;
}
- cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
+ cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
if (!cc) {
ti->error = "Cannot allocate encryption context";
return -ENOMEM;
@@ -2780,7 +2674,7 @@
}
ret = -EINVAL;
- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
ti->error = "Invalid device sector";
goto bad;
}
@@ -2806,18 +2700,19 @@
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
- num_online_cpus());
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -2826,7 +2721,7 @@
spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT;
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
@@ -3026,14 +2921,8 @@
memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
- if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
- if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
- ret = cc->iv_gen_ops->wipe(cc);
- if (ret)
- return ret;
- }
+ if (argc == 2 && !strcasecmp(argv[1], "wipe"))
return crypt_wipe_key(cc);
- }
}
error:
@@ -3070,7 +2959,7 @@
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 18, 1},
+ .version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,