From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: 31797 CVE: NA
-------------------------
The next two patches to be reverted will conflict with this patch. Let's revert this patch and merge the original patch.
This reverts commit 808f3768deec5e1a5c9d2a4a2d8593fbbaf3e4cc.
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: ZhangXiaoxu zhangxiaoxu5@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/dm-crypt.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d451f98..aa7f741 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -563,14 +563,8 @@ static int crypt_iv_essiv_gen(struct geniv_ctx *ctx,
static int crypt_iv_benbi_ctr(struct geniv_ctx *ctx) { - unsigned bs; - int log; - - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &ctx->cipher_flags)) - bs = crypto_aead_blocksize(ctx->tfms.tfms_aead[0]); - else - bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]); - log = ilog2(bs); + unsigned int bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]); + int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */
From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: 31797 CVE: NA
------------------------
We come across a KASAN double-free issue which seems to be related with this patch. Let's revert this patch for now.
This reverts commit 865258a05a1e141f8d77f347d34e056013cbf61f.
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: ZhangXiaoxu zhangxiaoxu5@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/dm-crypt.c | 1223 +++++++++++++++++++++++++++++++------------------ 1 file changed, 771 insertions(+), 452 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index aa7f741..82060f1 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -126,14 +126,26 @@ struct dm_crypt_io {
struct dm_crypt_request { struct convert_context *ctx; - struct scatterlist *sg_in; - struct scatterlist *sg_out; + struct scatterlist sg_in[4]; + struct scatterlist sg_out[4]; u64 iv_sector; };
struct crypt_config;
struct crypt_iv_operations { + int (*ctr)(struct crypt_config *cc, struct dm_target *ti, + const char *opts); + void (*dtr)(struct crypt_config *cc); + int (*init)(struct crypt_config *cc); + int (*wipe)(struct crypt_config *cc); + int (*generator)(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq); + int (*post)(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq); +}; + +struct crypt_geniv_operations { int (*ctr)(struct geniv_ctx *ctx); void (*dtr)(struct geniv_ctx *ctx); int (*init)(struct geniv_ctx *ctx); @@ -196,10 +208,18 @@ struct crypt_config { struct task_struct *write_thread; struct rb_root write_tree;
+ char *cipher; char *cipher_string; char *cipher_auth; char *key_string;
+ const struct crypt_iv_operations *iv_gen_ops; + union { + struct iv_essiv_private essiv; + struct iv_benbi_private benbi; + struct iv_lmk_private lmk; + struct iv_tcw_private tcw; + } iv_gen_private; u64 iv_offset; unsigned int iv_size; unsigned short int sector_size; @@ -208,10 +228,10 @@ struct crypt_config { /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; union { - struct crypto_skcipher *tfm; - struct crypto_aead *tfm_aead; + struct crypto_skcipher **tfms; + struct crypto_aead **tfms_aead; } cipher_tfm; - unsigned int tfms_count; + unsigned tfms_count; unsigned long cipher_flags;
/* @@ -253,6 +273,7 @@ struct crypt_config { struct bio_set bs; struct mutex bio_alloc_lock;
+ u8 *authenc_key; /* space for keys in authenc() format (if used) */ u8 key[0]; };
@@ -260,7 +281,6 @@ struct crypt_config { #define MAX_TAG_SIZE 480 #define POOL_ENTRY_SIZE 512 #define SECTOR_MASK ((1 << SECTOR_SHIFT) - 1) -#define MAX_SG_LIST (BIO_MAX_PAGES * 8)
static DEFINE_SPINLOCK(dm_crypt_clients_lock); static unsigned dm_crypt_clients_n = 0; @@ -270,7 +290,7 @@ struct crypt_config {
static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); -static struct scatterlist *crypt_get_sg_data(struct geniv_ctx *ctx, +static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, struct scatterlist *sg);
/* @@ -278,12 +298,12 @@ static struct scatterlist *crypt_get_sg_data(struct geniv_ctx *ctx, */ static struct crypto_skcipher *any_tfm(struct crypt_config *cc) { - return cc->cipher_tfm.tfm; + return cc->cipher_tfm.tfms[0]; }
static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) { - return cc->cipher_tfm.tfm_aead; + return cc->cipher_tfm.tfms_aead[0]; }
/* context of geniv tfm */ @@ -311,7 +331,7 @@ struct geniv_ctx { char *ciphermode; unsigned long cipher_flags;
- const struct crypt_iv_operations *iv_gen_ops; + const struct crypt_geniv_operations *iv_gen_ops; union { struct iv_essiv_private essiv; struct iv_benbi_private benbi; @@ -385,41 +405,38 @@ struct geniv_ctx { * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */
-static int crypt_iv_plain_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - memset(iv, 0, ctx->iv_size); - *(__le32 *)iv = cpu_to_le32(subreq->iv_sector & 0xffffffff); + memset(iv, 0, cc->iv_size); + *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
return 0; }
-static int crypt_iv_plain64_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - memset(iv, 0, ctx->iv_size); - *(__le64 *)iv = cpu_to_le64(subreq->iv_sector); + memset(iv, 0, cc->iv_size); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
return 0; }
-static int crypt_iv_plain64be_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - memset(iv, 0, ctx->iv_size); + memset(iv, 0, cc->iv_size); /* iv_size is at least of size u64; usually it is 16 bytes */ - *(__be64 *)&iv[ctx->iv_size - sizeof(u64)] = cpu_to_be64(subreq->iv_sector); + *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
return 0; }
/* Initialise ESSIV - compute salt but no local memory allocations */ -static int crypt_iv_essiv_init(struct geniv_ctx *ctx) +static int crypt_iv_essiv_init(struct crypt_config *cc) { - struct iv_essiv_private *essiv = &ctx->iv_gen_private.essiv; + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; SHASH_DESC_ON_STACK(desc, essiv->hash_tfm); struct crypto_cipher *essiv_tfm; int err; @@ -427,54 +444,65 @@ static int crypt_iv_essiv_init(struct geniv_ctx *ctx) desc->tfm = essiv->hash_tfm; desc->flags = 0;
- err = crypto_shash_digest(desc, ctx->key, ctx->key_size, essiv->salt); + err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); shash_desc_zero(desc); if (err) return err;
- essiv_tfm = ctx->iv_private; + essiv_tfm = cc->iv_private;
- return crypto_cipher_setkey(essiv_tfm, essiv->salt, + err = crypto_cipher_setkey(essiv_tfm, essiv->salt, crypto_shash_digestsize(essiv->hash_tfm)); + if (err) + return err; + + return 0; }
/* Wipe salt and reset key derived from volume key */ -static int crypt_iv_essiv_wipe(struct geniv_ctx *ctx) +static int crypt_iv_essiv_wipe(struct crypt_config *cc) { - struct iv_essiv_private *essiv = &ctx->iv_gen_private.essiv; - unsigned int salt_size = crypto_shash_digestsize(essiv->hash_tfm); + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm); struct crypto_cipher *essiv_tfm; + int r, err = 0;
memset(essiv->salt, 0, salt_size);
- essiv_tfm = ctx->iv_private; - return crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); + essiv_tfm = cc->iv_private; + r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); + if (r) + err = r; + + return err; }
/* Allocate the cipher for ESSIV */ -static struct crypto_cipher *alloc_essiv_cipher(struct geniv_ctx *ctx, - u8 *salt, unsigned int saltsize) +static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, + struct dm_target *ti, + const u8 *salt, + unsigned int saltsize) { struct crypto_cipher *essiv_tfm; int err;
/* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_cipher(ctx->cipher, 0, CRYPTO_ALG_ASYNC); + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(essiv_tfm)) { - DMERR("Error allocating crypto tfm for ESSIV\n"); + ti->error = "Error allocating crypto tfm for ESSIV"; return essiv_tfm; }
- if (crypto_cipher_blocksize(essiv_tfm) != ctx->iv_size) { - DMERR("Block size of ESSIV cipher does " - "not match IV size of block cipher\n"); + if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { + ti->error = "Block size of ESSIV cipher does " + "not match IV size of block cipher"; crypto_free_cipher(essiv_tfm); return ERR_PTR(-EINVAL); }
err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); if (err) { - DMERR("Failed to set key for ESSIV cipher\n"); + ti->error = "Failed to set key for ESSIV cipher"; crypto_free_cipher(essiv_tfm); return ERR_PTR(err); } @@ -482,10 +510,10 @@ static struct crypto_cipher *alloc_essiv_cipher(struct geniv_ctx *ctx, return essiv_tfm; }
-static void crypt_iv_essiv_dtr(struct geniv_ctx *ctx) +static void crypt_iv_essiv_dtr(struct crypt_config *cc) { struct crypto_cipher *essiv_tfm; - struct iv_essiv_private *essiv = &ctx->iv_gen_private.essiv; + struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
crypto_free_shash(essiv->hash_tfm); essiv->hash_tfm = NULL; @@ -493,51 +521,52 @@ static void crypt_iv_essiv_dtr(struct geniv_ctx *ctx) kzfree(essiv->salt); essiv->salt = NULL;
- essiv_tfm = ctx->iv_private; + essiv_tfm = cc->iv_private;
if (essiv_tfm) crypto_free_cipher(essiv_tfm);
- ctx->iv_private = NULL; + cc->iv_private = NULL; }
-static int crypt_iv_essiv_ctr(struct geniv_ctx *ctx) +static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) { struct crypto_cipher *essiv_tfm = NULL; struct crypto_shash *hash_tfm = NULL; u8 *salt = NULL; int err;
- if (!ctx->ivopts) { - DMERR("Digest algorithm missing for ESSIV mode\n"); + if (!opts) { + ti->error = "Digest algorithm missing for ESSIV mode"; return -EINVAL; }
/* Allocate hash algorithm */ - hash_tfm = crypto_alloc_shash(ctx->ivopts, 0, 0); + hash_tfm = crypto_alloc_shash(opts, 0, 0); if (IS_ERR(hash_tfm)) { - DMERR("Error initializing ESSIV hash\n"); + ti->error = "Error initializing ESSIV hash"; err = PTR_ERR(hash_tfm); goto bad; }
salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL); if (!salt) { - DMERR("Error kmallocing salt storage in ESSIV\n"); + ti->error = "Error kmallocing salt storage in ESSIV"; err = -ENOMEM; goto bad; }
- ctx->iv_gen_private.essiv.salt = salt; - ctx->iv_gen_private.essiv.hash_tfm = hash_tfm; + cc->iv_gen_private.essiv.salt = salt; + cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
- essiv_tfm = alloc_essiv_cipher(ctx, salt, + essiv_tfm = alloc_essiv_cipher(cc, ti, salt, crypto_shash_digestsize(hash_tfm)); if (IS_ERR(essiv_tfm)) { - crypt_iv_essiv_dtr(ctx); + crypt_iv_essiv_dtr(cc); return PTR_ERR(essiv_tfm); } - ctx->iv_private = essiv_tfm; + cc->iv_private = essiv_tfm;
return 0;
@@ -548,72 +577,70 @@ static int crypt_iv_essiv_ctr(struct geniv_ctx *ctx) return err; }
-static int crypt_iv_essiv_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - struct crypto_cipher *essiv_tfm = ctx->iv_private; + struct crypto_cipher *essiv_tfm = cc->iv_private;
- memset(iv, 0, ctx->iv_size); - *(__le64 *)iv = cpu_to_le64(subreq->iv_sector); + memset(iv, 0, cc->iv_size); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
return 0; }
-static int crypt_iv_benbi_ctr(struct geniv_ctx *ctx) +static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) { - unsigned int bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]); + unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */
if (1 << log != bs) { - DMERR("cypher blocksize is not a power of 2\n"); + ti->error = "cypher blocksize is not a power of 2"; return -EINVAL; }
if (log > 9) { - DMERR("cypher blocksize is > 512\n"); + ti->error = "cypher blocksize is > 512"; return -EINVAL; }
- ctx->iv_gen_private.benbi.shift = 9 - log; + cc->iv_gen_private.benbi.shift = 9 - log;
return 0; }
-static void crypt_iv_benbi_dtr(struct geniv_ctx *ctx) +static void crypt_iv_benbi_dtr(struct crypt_config *cc) { }
-static int crypt_iv_benbi_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { __be64 val;
- memset(iv, 0, ctx->iv_size - sizeof(u64)); /* rest is cleared below */ + memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
- val = cpu_to_be64(((u64)subreq->iv_sector << ctx->iv_gen_private.benbi.shift) + 1); - put_unaligned(val, (__be64 *)(iv + ctx->iv_size - sizeof(u64))); + val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); + put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0; }
-static int crypt_iv_null_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - memset(iv, 0, ctx->iv_size); + memset(iv, 0, cc->iv_size);
return 0; }
-static void crypt_iv_lmk_dtr(struct geniv_ctx *ctx) +static void crypt_iv_lmk_dtr(struct crypt_config *cc) { - struct iv_lmk_private *lmk = &ctx->iv_gen_private.lmk; + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) crypto_free_shash(lmk->hash_tfm); @@ -623,54 +650,54 @@ static void crypt_iv_lmk_dtr(struct geniv_ctx *ctx) lmk->seed = NULL; }
-static int crypt_iv_lmk_ctr(struct geniv_ctx *ctx) +static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) { - struct iv_lmk_private *lmk = &ctx->iv_gen_private.lmk; + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- if (ctx->sector_size != (1 << SECTOR_SHIFT)) { - DMERR("Unsupported sector size for LMK\n"); + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + ti->error = "Unsupported sector size for LMK"; return -EINVAL; }
lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(lmk->hash_tfm)) { - DMERR("Error initializing LMK hash, err=%ld\n", - PTR_ERR(lmk->hash_tfm)); + ti->error = "Error initializing LMK hash"; return PTR_ERR(lmk->hash_tfm); }
/* No seed in LMK version 2 */ - if (ctx->key_parts == ctx->tfms_count) { + if (cc->key_parts == cc->tfms_count) { lmk->seed = NULL; return 0; }
lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); if (!lmk->seed) { - crypt_iv_lmk_dtr(ctx); - DMERR("Error kmallocing seed storage in LMK\n"); + crypt_iv_lmk_dtr(cc); + ti->error = "Error kmallocing seed storage in LMK"; return -ENOMEM; }
return 0; }
-static int crypt_iv_lmk_init(struct geniv_ctx *ctx) +static int crypt_iv_lmk_init(struct crypt_config *cc) { - struct iv_lmk_private *lmk = &ctx->iv_gen_private.lmk; - int subkey_size = ctx->key_size / ctx->key_parts; + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + int subkey_size = cc->key_size / cc->key_parts;
/* LMK seed is on the position of LMK_KEYS + 1 key */ if (lmk->seed) - memcpy(lmk->seed, ctx->key + (ctx->tfms_count * subkey_size), + memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), crypto_shash_digestsize(lmk->hash_tfm));
return 0; }
-static int crypt_iv_lmk_wipe(struct geniv_ctx *ctx) +static int crypt_iv_lmk_wipe(struct crypt_config *cc) { - struct iv_lmk_private *lmk = &ctx->iv_gen_private.lmk; + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
if (lmk->seed) memset(lmk->seed, 0, LMK_SEED_SIZE); @@ -678,10 +705,11 @@ static int crypt_iv_lmk_wipe(struct geniv_ctx *ctx) return 0; }
-static int crypt_iv_lmk_one(struct geniv_ctx *ctx, u8 *iv, - struct geniv_subreq *subreq, u8 *data) +static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq, + u8 *data) { - struct iv_lmk_private *lmk = &ctx->iv_gen_private.lmk; + struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); struct md5_state md5state; __le32 buf[4]; @@ -706,8 +734,8 @@ static int crypt_iv_lmk_one(struct geniv_ctx *ctx, u8 *iv, return r;
/* Sector is cropped to 56 bits here */ - buf[0] = cpu_to_le32(subreq->iv_sector & 0xFFFFFFFF); - buf[1] = cpu_to_le32((((u64)subreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); + buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); + buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); buf[2] = cpu_to_le32(4024); buf[3] = 0; r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); @@ -721,56 +749,54 @@ static int crypt_iv_lmk_one(struct geniv_ctx *ctx, u8 *iv,
for (i = 0; i < MD5_HASH_WORDS; i++) __cpu_to_le32s(&md5state.hash[i]); - memcpy(iv, &md5state.hash, ctx->iv_size); + memcpy(iv, &md5state.hash, cc->iv_size);
return 0; }
-static int crypt_iv_lmk_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { struct scatterlist *sg; u8 *src; int r = 0;
- if (rctx->is_write) { - sg = crypt_get_sg_data(ctx, subreq->sg_in); + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + sg = crypt_get_sg_data(cc, dmreq->sg_in); src = kmap_atomic(sg_page(sg)); - r = crypt_iv_lmk_one(ctx, iv, subreq, src + sg->offset); + r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); kunmap_atomic(src); } else - memset(iv, 0, ctx->iv_size); + memset(iv, 0, cc->iv_size);
return r; }
-static int crypt_iv_lmk_post(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { struct scatterlist *sg; u8 *dst; int r;
- if (rctx->is_write) + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) return 0;
- sg = crypt_get_sg_data(ctx, subreq->sg_out); + sg = crypt_get_sg_data(cc, dmreq->sg_out); dst = kmap_atomic(sg_page(sg)); - r = crypt_iv_lmk_one(ctx, iv, subreq, dst + sg->offset); + r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */ if (!r) - crypto_xor(dst + sg->offset, iv, ctx->iv_size); + crypto_xor(dst + sg->offset, iv, cc->iv_size);
kunmap_atomic(dst); return r; }
-static void crypt_iv_tcw_dtr(struct geniv_ctx *ctx) +static void crypt_iv_tcw_dtr(struct crypt_config *cc) { - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
kzfree(tcw->iv_seed); tcw->iv_seed = NULL; @@ -782,66 +808,66 @@ static void crypt_iv_tcw_dtr(struct geniv_ctx *ctx) tcw->crc32_tfm = NULL; }
-static int crypt_iv_tcw_ctr(struct geniv_ctx *ctx) +static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) { - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- if (ctx->sector_size != (1 << SECTOR_SHIFT)) { - DMERR("Unsupported sector size for TCW\n"); + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + ti->error = "Unsupported sector size for TCW"; return -EINVAL; }
- if (ctx->key_size <= (ctx->iv_size + TCW_WHITENING_SIZE)) { - DMERR("Wrong key size (%d) for TCW. Choose a value > %d bytes\n", - ctx->key_size, ctx->iv_size + TCW_WHITENING_SIZE); + if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { + ti->error = "Wrong key size for TCW"; return -EINVAL; }
tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(tcw->crc32_tfm)) { - DMERR("Error initializing CRC32 in TCW; err=%ld\n", - PTR_ERR(tcw->crc32_tfm)); + ti->error = "Error initializing CRC32 in TCW"; return PTR_ERR(tcw->crc32_tfm); }
- tcw->iv_seed = kzalloc(ctx->iv_size, GFP_KERNEL); + tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); if (!tcw->iv_seed || !tcw->whitening) { - crypt_iv_tcw_dtr(ctx); - DMERR("Error allocating seed storage in TCW\n"); + crypt_iv_tcw_dtr(cc); + ti->error = "Error allocating seed storage in TCW"; return -ENOMEM; }
return 0; }
-static int crypt_iv_tcw_init(struct geniv_ctx *ctx) +static int crypt_iv_tcw_init(struct crypt_config *cc) { - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; - int key_offset = ctx->key_size - ctx->iv_size - TCW_WHITENING_SIZE; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
- memcpy(tcw->iv_seed, &ctx->key[key_offset], ctx->iv_size); - memcpy(tcw->whitening, &ctx->key[key_offset + ctx->iv_size], + memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); + memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], TCW_WHITENING_SIZE);
return 0; }
-static int crypt_iv_tcw_wipe(struct geniv_ctx *ctx) +static int crypt_iv_tcw_wipe(struct crypt_config *cc) { - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- memset(tcw->iv_seed, 0, ctx->iv_size); + memset(tcw->iv_seed, 0, cc->iv_size); memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
return 0; }
-static int crypt_iv_tcw_whitening(struct geniv_ctx *ctx, - struct geniv_subreq *subreq, u8 *data) +static int crypt_iv_tcw_whitening(struct crypt_config *cc, + struct dm_crypt_request *dmreq, + u8 *data) { - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; - __le64 sector = cpu_to_le64(subreq->iv_sector); + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 buf[TCW_WHITENING_SIZE]; SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); int i, r; @@ -875,59 +901,56 @@ static int crypt_iv_tcw_whitening(struct geniv_ctx *ctx, return r; }
-static int crypt_iv_tcw_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { struct scatterlist *sg; - struct iv_tcw_private *tcw = &ctx->iv_gen_private.tcw; - __le64 sector = cpu_to_le64(subreq->iv_sector); + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 *src; int r = 0;
/* Remove whitening from ciphertext */ - if (!rctx->is_write) { - sg = crypt_get_sg_data(ctx, subreq->sg_in); + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { + sg = crypt_get_sg_data(cc, dmreq->sg_in); src = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(ctx, subreq, src + sg->offset); + r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); kunmap_atomic(src); }
/* Calculate IV */ crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); - if (ctx->iv_size > 8) + if (cc->iv_size > 8) crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, - ctx->iv_size - 8); + cc->iv_size - 8);
return r; }
-static int crypt_iv_tcw_post(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { struct scatterlist *sg; u8 *dst; int r;
- if (!rctx->is_write) + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) return 0;
/* Apply whitening on ciphertext */ - sg = crypt_get_sg_data(ctx, subreq->sg_out); + sg = crypt_get_sg_data(cc, dmreq->sg_out); dst = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(ctx, subreq, dst + sg->offset); + r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); kunmap_atomic(dst);
return r; }
-static int crypt_iv_random_gen(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv) +static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { /* Used only for writes, there must be an additional space to store IV */ - get_random_bytes(iv, ctx->iv_size); + get_random_bytes(iv, cc->iv_size); return 0; }
@@ -1014,6 +1037,15 @@ static u8 *iv_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) crypto_skcipher_alignmask(crypto_skcipher_reqtfm(subreq->rctx->r.req)) + 1); }
+static const struct crypt_geniv_operations crypt_geniv_plain_ops; +static const struct crypt_geniv_operations crypt_geniv_plain64_ops; +static const struct crypt_geniv_operations crypt_geniv_essiv_ops; +static const struct crypt_geniv_operations crypt_geniv_benbi_ops; +static const struct crypt_geniv_operations crypt_geniv_null_ops; +static const struct crypt_geniv_operations crypt_geniv_lmk_ops; +static const struct crypt_geniv_operations crypt_geniv_tcw_ops; +static const struct crypt_geniv_operations crypt_geniv_random_ops; + static int geniv_init_iv(struct geniv_ctx *ctx) { int ret; @@ -1023,17 +1055,17 @@ static int geniv_init_iv(struct geniv_ctx *ctx) if (ctx->ivmode == NULL) ctx->iv_gen_ops = NULL; else if (strcmp(ctx->ivmode, "plain") == 0) - ctx->iv_gen_ops = &crypt_iv_plain_ops; + ctx->iv_gen_ops = &crypt_geniv_plain_ops; else if (strcmp(ctx->ivmode, "plain64") == 0) - ctx->iv_gen_ops = &crypt_iv_plain64_ops; + ctx->iv_gen_ops = &crypt_geniv_plain64_ops; else if (strcmp(ctx->ivmode, "essiv") == 0) - ctx->iv_gen_ops = &crypt_iv_essiv_ops; + ctx->iv_gen_ops = &crypt_geniv_essiv_ops; else if (strcmp(ctx->ivmode, "benbi") == 0) - ctx->iv_gen_ops = &crypt_iv_benbi_ops; + ctx->iv_gen_ops = &crypt_geniv_benbi_ops; else if (strcmp(ctx->ivmode, "null") == 0) - ctx->iv_gen_ops = &crypt_iv_null_ops; + ctx->iv_gen_ops = &crypt_geniv_null_ops; else if (strcmp(ctx->ivmode, "lmk") == 0) { - ctx->iv_gen_ops = &crypt_iv_lmk_ops; + ctx->iv_gen_ops = &crypt_geniv_lmk_ops; /* * Version 2 and 3 is recognised according * to length of provided multi-key string. @@ -1045,11 +1077,11 @@ static int geniv_init_iv(struct geniv_ctx *ctx) ctx->key_extra_size = ctx->key_size / ctx->key_parts; } } else if (strcmp(ctx->ivmode, "tcw") == 0) { - ctx->iv_gen_ops = &crypt_iv_tcw_ops; + ctx->iv_gen_ops = &crypt_geniv_tcw_ops; ctx->key_parts += 2; /* IV + whitening */ ctx->key_extra_size = ctx->iv_size + TCW_WHITENING_SIZE; } else if (strcmp(ctx->ivmode, "random") == 0) { - ctx->iv_gen_ops = &crypt_iv_random_ops; + ctx->iv_gen_ops = &crypt_geniv_random_ops; /* Need storage space in integrity fields. */ ctx->integrity_iv_size = ctx->iv_size; } else { @@ -2382,11 +2414,16 @@ static bool crypt_integrity_aead(struct crypt_config *cc) return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); }
+static bool crypt_integrity_hmac(struct crypt_config *cc) +{ + return crypt_integrity_aead(cc) && cc->key_mac_size; +} + /* Get sg containing data */ -static struct scatterlist *crypt_get_sg_data(struct geniv_ctx *ctx, +static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, struct scatterlist *sg) { - if (unlikely(geniv_integrity_aead(ctx))) + if (unlikely(crypt_integrity_aead(cc))) return &sg[2];
return sg; @@ -2491,6 +2528,221 @@ static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmre return (void *)((char *)dmreq - cc->dmreq_start); }
+static u8 *iv_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + if (crypt_integrity_aead(cc)) + return (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_aead_alignmask(any_tfm_aead(cc)) + 1); + else + return (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_skcipher_alignmask(any_tfm(cc)) + 1); +} + +static u8 *org_iv_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return iv_of_dmreq(cc, dmreq) + cc->iv_size; +} + +static uint64_t *org_sector_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; + return (uint64_t*) ptr; +} + +static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + + cc->iv_size + sizeof(uint64_t); + return (unsigned int*)ptr; +} + +static void *tag_from_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + struct convert_context *ctx = dmreq->ctx; + struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); + + return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * + cc->on_disk_tag_size]; +} + +static void *iv_tag_from_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size; +} + +static int crypt_convert_block_aead(struct crypt_config *cc, + struct convert_context *ctx, + struct aead_request *req, + unsigned int tag_offset) +{ + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); + struct dm_crypt_request *dmreq; + u8 *iv, *org_iv, *tag_iv, *tag; + uint64_t *sector; + int r = 0; + + BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); + + /* Reject unexpected unaligned bio. */ + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) + return -EIO; + + dmreq = dmreq_of_req(cc, req); + dmreq->iv_sector = ctx->cc_sector; + if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) + dmreq->iv_sector >>= cc->sector_shift; + dmreq->ctx = ctx; + + *org_tag_of_dmreq(cc, dmreq) = tag_offset; + + sector = org_sector_of_dmreq(cc, dmreq); + *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); + + iv = iv_of_dmreq(cc, dmreq); + org_iv = org_iv_of_dmreq(cc, dmreq); + tag = tag_from_dmreq(cc, dmreq); + tag_iv = iv_tag_from_dmreq(cc, dmreq); + + /* AEAD request: + * |----- AAD -------|------ DATA -------|-- AUTH TAG --| + * | (authenticated) | (auth+encryption) | | + * | sector_LE | IV | sector in/out | tag in/out | + */ + sg_init_table(dmreq->sg_in, 4); + sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); + sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); + sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset); + sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); + + sg_init_table(dmreq->sg_out, 4); + sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); + sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); + sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset); + sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); + + if (cc->iv_gen_ops) { + /* For READs use IV stored in integrity metadata */ + if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { + memcpy(org_iv, tag_iv, cc->iv_size); + } else { + r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); + if (r < 0) + return r; + /* Store generated IV in integrity metadata */ + if (cc->integrity_iv_size) + memcpy(tag_iv, org_iv, cc->iv_size); + } + /* Working copy of IV, to be modified in crypto API */ + memcpy(iv, org_iv, cc->iv_size); + } + + aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); + if (bio_data_dir(ctx->bio_in) == WRITE) { + aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, + cc->sector_size, iv); + r = crypto_aead_encrypt(req); + if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) + memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, + cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); + } else { + aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, + cc->sector_size + cc->integrity_tag_size, iv); + r = crypto_aead_decrypt(req); + } + + if (r == -EBADMSG) + DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", + (unsigned long long)le64_to_cpu(*sector)); + + if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) + r = cc->iv_gen_ops->post(cc, org_iv, dmreq); + + bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); + + return r; +} + +static int crypt_convert_block_skcipher(struct crypt_config *cc, + struct convert_context *ctx, + struct skcipher_request *req, + unsigned int tag_offset) +{ + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); + struct scatterlist *sg_in, *sg_out; + struct dm_crypt_request *dmreq; + u8 *iv, *org_iv, *tag_iv; + uint64_t *sector; + int r = 0; + + /* Reject unexpected unaligned bio. */ + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) + return -EIO; + + dmreq = dmreq_of_req(cc, req); + dmreq->iv_sector = ctx->cc_sector; + if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) + dmreq->iv_sector >>= cc->sector_shift; + dmreq->ctx = ctx; + + *org_tag_of_dmreq(cc, dmreq) = tag_offset; + + iv = iv_of_dmreq(cc, dmreq); + org_iv = org_iv_of_dmreq(cc, dmreq); + tag_iv = iv_tag_from_dmreq(cc, dmreq); + + sector = org_sector_of_dmreq(cc, dmreq); + *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); + + /* For skcipher we use only the first sg item */ + sg_in = &dmreq->sg_in[0]; + sg_out = &dmreq->sg_out[0]; + + sg_init_table(sg_in, 1); + sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset); + + sg_init_table(sg_out, 1); + sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset); + + if (cc->iv_gen_ops) { + /* For READs use IV stored in integrity metadata */ + if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { + memcpy(org_iv, tag_iv, cc->integrity_iv_size); + } else { + r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); + if (r < 0) + return r; + /* Store generated IV in integrity metadata */ + if (cc->integrity_iv_size) + memcpy(tag_iv, org_iv, cc->integrity_iv_size); + } + /* Working copy of IV, to be modified in crypto API */ + memcpy(iv, org_iv, cc->iv_size); + } + + skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv); + + if (bio_data_dir(ctx->bio_in) == WRITE) + r = crypto_skcipher_encrypt(req); + else + r = crypto_skcipher_decrypt(req); + + if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) + r = cc->iv_gen_ops->post(cc, org_iv, dmreq); + + bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); + + return r; +}
static void kcryptd_async_done(struct crypto_async_request *async_req, int error); @@ -2498,10 +2750,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, static void crypt_alloc_req_skcipher(struct crypt_config *cc, struct convert_context *ctx) { + unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); + if (!ctx->r.req) ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
- skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfm); + skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
/* * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs @@ -2518,7 +2772,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, if (!ctx->r.req_aead) ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
- aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfm_aead); + aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
/* * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs @@ -2567,117 +2821,68 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ -static blk_status_t crypt_convert_bio(struct crypt_config *cc, - struct convert_context *ctx) +static blk_status_t crypt_convert(struct crypt_config *cc, + struct convert_context *ctx) { - unsigned int cryptlen, n1, n2, nents, i = 0, bytes = 0; - struct skcipher_request *req = NULL; - struct aead_request *req_aead = NULL; - struct dm_crypt_request *dmreq; - struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); - struct geniv_req_info rinfo; - struct bio_vec bv_in, bv_out; + unsigned int tag_offset = 0; + unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r;
atomic_set(&ctx->cc_pending, 1); - crypt_alloc_req(cc, ctx);
- if (crypt_integrity_aead(cc)) { - req_aead = ctx->r.req_aead; - dmreq = dmreq_of_req(cc, req_aead); - } else { - req = ctx->r.req; - dmreq = dmreq_of_req(cc, req); - } - - n1 = bio_segments(ctx->bio_in); - n2 = bio_segments(ctx->bio_out); - nents = max(n1, n2); - nents = min((unsigned int)MAX_SG_LIST, nents); - cryptlen = ctx->iter_in.bi_size; - - DMDEBUG("dm-crypt:%s: segments:[in=%u, out=%u] bi_size=%u\n", - bio_data_dir(ctx->bio_in) == WRITE ? "write" : "read", - n1, n2, cryptlen); - - dmreq->sg_in = kcalloc(nents, sizeof(struct scatterlist), GFP_KERNEL); - dmreq->sg_out = kcalloc(nents, sizeof(struct scatterlist), GFP_KERNEL); - if (!dmreq->sg_in || !dmreq->sg_out) { - DMERR("dm-crypt: Failed to allocate scatterlist\n"); - r = -ENOMEM; - return r; - } - dmreq->ctx = ctx; - - sg_init_table(dmreq->sg_in, nents); - sg_init_table(dmreq->sg_out, nents); - - while (ctx->iter_in.bi_size && ctx->iter_out.bi_size && i < nents) { - bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); - bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); - - sg_set_page(&dmreq->sg_in[i], bv_in.bv_page, bv_in.bv_len, - bv_in.bv_offset); - sg_set_page(&dmreq->sg_out[i], bv_out.bv_page, bv_out.bv_len, - bv_out.bv_offset); - - bio_advance_iter(ctx->bio_in, &ctx->iter_in, bv_in.bv_len); - bio_advance_iter(ctx->bio_out, &ctx->iter_out, bv_out.bv_len); - - bytes += bv_in.bv_len; - i++; - } - - DMDEBUG("dm-crypt: Processed %u of %u bytes\n", bytes, cryptlen); + while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
- rinfo.cc_sector = ctx->cc_sector; - rinfo.nents = nents; - rinfo.integrity_metadata = io->integrity_metadata; + crypt_alloc_req(cc, ctx); + atomic_inc(&ctx->cc_pending);
- atomic_inc(&ctx->cc_pending); - if (crypt_integrity_aead(cc)) { - aead_request_set_crypt(req_aead, dmreq->sg_in, dmreq->sg_out, - bytes, (u8 *)&rinfo); - if (bio_data_dir(ctx->bio_in) == WRITE) - r = crypto_aead_encrypt(req_aead); - else - r = crypto_aead_decrypt(req_aead); - } else { - skcipher_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, - bytes, (u8 *)&rinfo); - if (bio_data_dir(ctx->bio_in) == WRITE) - r = crypto_skcipher_encrypt(req); + if (crypt_integrity_aead(cc)) + r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); else - r = crypto_skcipher_decrypt(req); - } + r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
- switch (r) { - /* The request was queued so wait. */ - case -EBUSY: - wait_for_completion(&ctx->restart); - reinit_completion(&ctx->restart); - /* fall through */ - /* - * The request is queued and processed asynchronously, - * completion function kcryptd_async_done() is called. - */ - case -EINPROGRESS: - ctx->r.req = NULL; - cond_resched(); - return 0; - /* The requeest was already processed (synchronously). */ - case 0: - atomic_dec(&ctx->cc_pending); - return 0; - /* There was a data integrity error. */ - case -EBADMSG: - atomic_dec(&ctx->cc_pending); - return BLK_STS_PROTECTION; - /* There was an error while processing the request. */ - default: - atomic_dec(&ctx->cc_pending); - return BLK_STS_IOERR; + switch (r) { + /* + * The request was queued by a crypto driver + * but the driver request queue is full, let's wait. + */ + case -EBUSY: + wait_for_completion(&ctx->restart); + reinit_completion(&ctx->restart); + /* fall through */ + /* + * The request is queued and processed asynchronously, + * completion function kcryptd_async_done() will be called. + */ + case -EINPROGRESS: + ctx->r.req = NULL; + ctx->cc_sector += sector_step; + tag_offset++; + continue; + /* + * The request was already processed (synchronously). + */ + case 0: + atomic_dec(&ctx->cc_pending); + ctx->cc_sector += sector_step; + tag_offset++; + cond_resched(); + continue; + /* + * There was a data integrity error. + */ + case -EBADMSG: + atomic_dec(&ctx->cc_pending); + return BLK_STS_PROTECTION; + /* + * There was an error while processing the request. + */ + default: + atomic_dec(&ctx->cc_pending); + return BLK_STS_IOERR; + } } + + return 0; }
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); @@ -2786,24 +2991,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; - struct dm_crypt_request *dmreq; blk_status_t error = io->error;
if (!atomic_dec_and_test(&io->io_pending)) return;
- if (io->ctx.r.req) { + if (io->ctx.r.req) crypt_free_req(cc, io->ctx.r.req, base_bio);
- if (crypt_integrity_aead(cc)) - dmreq = dmreq_of_req(cc, io->ctx.r.req_aead); - else - dmreq = dmreq_of_req(cc, io->ctx.r.req); - DMDEBUG("dm-crypt: Freeing scatterlists [sync]\n"); - kfree(dmreq->sg_in); - kfree(dmreq->sg_out); - } - if (unlikely(io->integrity_metadata_from_pool)) mempool_free(io->integrity_metadata, &io->cc->tag_pool); else @@ -3045,7 +3240,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) sector += bio_sectors(clone);
crypt_inc_pending(io); - r = crypt_convert_bio(cc, &io->ctx); + r = crypt_convert(cc, &io->ctx); if (r) io->error = r; crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); @@ -3075,7 +3270,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector);
- r = crypt_convert_bio(cc, &io->ctx); + r = crypt_convert(cc, &io->ctx); if (r) io->error = r;
@@ -3103,16 +3298,16 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, return; }
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) + error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); + if (error == -EBADMSG) { - DMERR("INTEGRITY AEAD ERROR\n"); + DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", + (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); io->error = BLK_STS_PROTECTION; } else if (error < 0) io->error = BLK_STS_IOERR;
- DMDEBUG("dm-crypt: Freeing scatterlists and request struct [async]\n"); - kfree(dmreq->sg_in); - kfree(dmreq->sg_out); - crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
if (!atomic_dec_and_test(&ctx->cc_pending)) @@ -3142,41 +3337,61 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) queue_work(cc->crypt_queue, &io->work); }
-static void crypt_free_tfm(struct crypt_config *cc) +static void crypt_free_tfms_aead(struct crypt_config *cc) { - if (crypt_integrity_aead(cc)) { - if (!cc->cipher_tfm.tfm_aead) - return; - if (cc->cipher_tfm.tfm_aead && !IS_ERR(cc->cipher_tfm.tfm_aead)) { - crypto_free_aead(cc->cipher_tfm.tfm_aead); - cc->cipher_tfm.tfm_aead = NULL; - } - } else { - if (!cc->cipher_tfm.tfm) - return; - if (cc->cipher_tfm.tfm && !IS_ERR(cc->cipher_tfm.tfm)) { - crypto_free_skcipher(cc->cipher_tfm.tfm); - cc->cipher_tfm.tfm = NULL; - } + if (!cc->cipher_tfm.tfms_aead) + return; + + if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) { + crypto_free_aead(cc->cipher_tfm.tfms_aead[0]); + cc->cipher_tfm.tfms_aead[0] = NULL; } + + kfree(cc->cipher_tfm.tfms_aead); + cc->cipher_tfm.tfms_aead = NULL; }
-static int crypt_alloc_tfm(struct crypt_config *cc, char *ciphermode) +static void crypt_free_tfms_skcipher(struct crypt_config *cc) { - int err; + unsigned i;
- if (crypt_integrity_aead(cc)) { - cc->cipher_tfm.tfm_aead = crypto_alloc_aead(ciphermode, 0, 0); - if (IS_ERR(cc->cipher_tfm.tfm_aead)) { - err = PTR_ERR(cc->cipher_tfm.tfm_aead); - crypt_free_tfm(cc); - return err; + if (!cc->cipher_tfm.tfms) + return; + + for (i = 0; i < cc->tfms_count; i++) + if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) { + crypto_free_skcipher(cc->cipher_tfm.tfms[i]); + cc->cipher_tfm.tfms[i] = NULL; } - } else { - cc->cipher_tfm.tfm = crypto_alloc_skcipher(ciphermode, 0, 0); - if (IS_ERR(cc->cipher_tfm.tfm)) { - err = PTR_ERR(cc->cipher_tfm.tfm); - crypt_free_tfm(cc); + + kfree(cc->cipher_tfm.tfms); + cc->cipher_tfm.tfms = NULL; +} + +static void crypt_free_tfms(struct crypt_config *cc) +{ + if (crypt_integrity_aead(cc)) + crypt_free_tfms_aead(cc); + else + crypt_free_tfms_skcipher(cc); +} + +static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) +{ + unsigned i; + int err; + + cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, + sizeof(struct crypto_skcipher *), + GFP_KERNEL); + if (!cc->cipher_tfm.tfms) + return -ENOMEM; + + for (i = 0; i < cc->tfms_count; i++) { + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); + if (IS_ERR(cc->cipher_tfm.tfms[i])) { + err = PTR_ERR(cc->cipher_tfm.tfms[i]); + crypt_free_tfms(cc); return err; } } @@ -3184,36 +3399,82 @@ static int crypt_alloc_tfm(struct crypt_config *cc, char *ciphermode) return 0; }
-static void init_key_info(struct crypt_config *cc, enum setkey_op keyop, - char *ivopts, struct geniv_key_info *kinfo) +static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) { - kinfo->keyop = keyop; - kinfo->tfms_count = cc->tfms_count; - kinfo->key = cc->key; - kinfo->cipher_flags = cc->cipher_flags; - kinfo->ivopts = ivopts; - kinfo->iv_offset = cc->iv_offset; - kinfo->sector_size = cc->sector_size; - kinfo->key_size = cc->key_size; - kinfo->key_parts = cc->key_parts; - kinfo->key_mac_size = cc->key_mac_size; - kinfo->on_disk_tag_size = cc->on_disk_tag_size; -} + int err;
-static int crypt_setkey(struct crypt_config *cc, enum setkey_op keyop, - char *ivopts) -{ - int r = 0; - struct geniv_key_info kinfo; + cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL); + if (!cc->cipher_tfm.tfms) + return -ENOMEM; + + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); + if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { + err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); + crypt_free_tfms(cc); + return err; + }
- init_key_info(cc, keyop, ivopts, &kinfo); + return 0; +}
+static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) +{ if (crypt_integrity_aead(cc)) - r = crypto_aead_setkey(cc->cipher_tfm.tfm_aead, (u8 *)&kinfo, sizeof(kinfo)); + return crypt_alloc_tfms_aead(cc, ciphermode); else - r = crypto_skcipher_setkey(cc->cipher_tfm.tfm, (u8 *)&kinfo, sizeof(kinfo)); + return crypt_alloc_tfms_skcipher(cc, ciphermode); +}
- return r; +static unsigned crypt_subkey_size(struct crypt_config *cc) +{ + return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); +} + +static unsigned crypt_authenckey_size(struct crypt_config *cc) +{ + return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); +} + +static void crypt_copy_authenckey(char *p, const void *key, + unsigned enckeylen, unsigned authkeylen); + +static int crypt_setkey(struct crypt_config *cc) +{ + unsigned subkey_size; + int err = 0, i, r; + + /* Ignore extra keys (which are used for IV etc) */ + subkey_size = crypt_subkey_size(cc); + + if (crypt_integrity_hmac(cc)) { + if (subkey_size < cc->key_mac_size) + return -EINVAL; + + crypt_copy_authenckey(cc->authenc_key, cc->key, + subkey_size - cc->key_mac_size, + cc->key_mac_size); + } + + for (i = 0; i < cc->tfms_count; i++) { + if (crypt_integrity_hmac(cc)) + r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], + cc->authenc_key, crypt_authenckey_size(cc)); + else if (crypt_integrity_aead(cc)) + r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], + cc->key + (i * subkey_size), + subkey_size); + else + r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i], + cc->key + (i * subkey_size), + subkey_size); + if (r) + err = r; + } + + if (crypt_integrity_hmac(cc)) + memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc)); + + return err; }
#ifdef CONFIG_KEYS @@ -3226,9 +3487,7 @@ static bool contains_whitespace(const char *str) return false; }
-static int crypt_set_keyring_key(struct crypt_config *cc, - const char *key_string, - enum setkey_op keyop, char *ivopts) +static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) { char *new_key_string, *key_desc; int ret; @@ -3289,7 +3548,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, /* clear the flag since following operations may invalidate previously valid key */ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- ret = crypt_setkey(cc, keyop, ivopts); + ret = crypt_setkey(cc);
if (!ret) { set_bit(DM_CRYPT_KEY_VALID, &cc->flags); @@ -3326,9 +3585,7 @@ static int get_key_size(char **key_string)
#else
-static int crypt_set_keyring_key(struct crypt_config *cc, - const char *key_string, - enum setkey_op keyop, char *ivopts) +static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) { return -EINVAL; } @@ -3340,8 +3597,7 @@ static int get_key_size(char **key_string)
#endif
-static int crypt_set_key(struct crypt_config *cc, enum setkey_op keyop, - char *key, char *ivopts) +static int crypt_set_key(struct crypt_config *cc, char *key) { int r = -EINVAL; int key_string_len = strlen(key); @@ -3352,7 +3608,7 @@ static int crypt_set_key(struct crypt_config *cc, enum setkey_op keyop,
/* ':' means the key is in kernel keyring, short-circuit normal key processing */ if (key[0] == ':') { - r = crypt_set_keyring_key(cc, key + 1, keyop, ivopts); + r = crypt_set_keyring_key(cc, key + 1); goto out; }
@@ -3367,7 +3623,7 @@ static int crypt_set_key(struct crypt_config *cc, enum setkey_op keyop, if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0) goto out;
- r = crypt_setkey(cc, keyop, ivopts); + r = crypt_setkey(cc); if (!r) set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
@@ -3378,17 +3634,6 @@ static int crypt_set_key(struct crypt_config *cc, enum setkey_op keyop, return r; }
-static int crypt_init_key(struct dm_target *ti, char *key, char *ivopts) -{ - struct crypt_config *cc = ti->private; - int ret; - - ret = crypt_set_key(cc, SETKEY_OP_INIT, key, ivopts); - if (ret < 0) - ti->error = "Error decoding and setting key"; - return ret; -} - static int crypt_wipe_key(struct crypt_config *cc) { int r; @@ -3397,7 +3642,7 @@ static int crypt_wipe_key(struct crypt_config *cc) get_random_bytes(&cc->key, cc->key_size); kzfree(cc->key_string); cc->key_string = NULL; - r = crypt_setkey(cc, SETKEY_OP_WIPE, NULL); + r = crypt_setkey(cc); memset(&cc->key, 0, cc->key_size * sizeof(u8));
return r; @@ -3457,7 +3702,7 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue);
- crypt_free_tfm(cc); + crypt_free_tfms(cc);
bioset_exit(&cc->bs);
@@ -3468,12 +3713,17 @@ static void crypt_dtr(struct dm_target *ti) WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0); percpu_counter_destroy(&cc->n_allocated_pages);
+ if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) + cc->iv_gen_ops->dtr(cc); + if (cc->dev) dm_put_device(ti, cc->dev);
+ kzfree(cc->cipher); kzfree(cc->cipher_string); kzfree(cc->key_string); kzfree(cc->cipher_auth); + kzfree(cc->authenc_key);
mutex_destroy(&cc->bio_alloc_lock);
@@ -3487,32 +3737,6 @@ static void crypt_dtr(struct dm_target *ti) spin_unlock(&dm_crypt_clients_lock); }
-static int get_iv_size_by_name(struct crypt_config *cc, char *alg_name) -{ - unsigned int iv_size; - struct crypto_aead *tfm_aead; - struct crypto_skcipher *tfm; - - if (crypt_integrity_aead(cc)) { - tfm_aead = crypto_alloc_aead(alg_name, 0, 0); - if (IS_ERR(tfm_aead)) - return -ENOMEM; - - iv_size = crypto_aead_ivsize(tfm_aead); - crypto_free_aead(tfm_aead); - } else { - tfm = crypto_alloc_skcipher(alg_name, 0, 0); - if (IS_ERR(tfm)) - return -ENOMEM; - - iv_size = crypto_skcipher_ivsize(tfm); - crypto_free_skcipher(tfm); - } - - return iv_size; - -} - static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) { struct crypt_config *cc = ti->private; @@ -3526,12 +3750,97 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, (unsigned int)(sizeof(u64) / sizeof(u8))); + else if (ivmode) { + DMWARN("Selected cipher does not support IVs"); + ivmode = NULL; + }
- if (strcmp(ivmode, "random") == 0) { + /* Choose ivmode, see comments at iv code. */ + if (ivmode == NULL) + cc->iv_gen_ops = NULL; + else if (strcmp(ivmode, "plain") == 0) + cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; + else if (strcmp(ivmode, "plain64be") == 0) + cc->iv_gen_ops = &crypt_iv_plain64be_ops; + else if (strcmp(ivmode, "essiv") == 0) + cc->iv_gen_ops = &crypt_iv_essiv_ops; + else if (strcmp(ivmode, "benbi") == 0) + cc->iv_gen_ops = &crypt_iv_benbi_ops; + else if (strcmp(ivmode, "null") == 0) + cc->iv_gen_ops = &crypt_iv_null_ops; + else if (strcmp(ivmode, "lmk") == 0) { + cc->iv_gen_ops = &crypt_iv_lmk_ops; + /* + * Version 2 and 3 is recognised according + * to length of provided multi-key string. + * If present (version 3), last key is used as IV seed. + * All keys (including IV seed) are always the same size. + */ + if (cc->key_size % cc->key_parts) { + cc->key_parts++; + cc->key_extra_size = cc->key_size / cc->key_parts; + } + } else if (strcmp(ivmode, "tcw") == 0) { + cc->iv_gen_ops = &crypt_iv_tcw_ops; + cc->key_parts += 2; /* IV + whitening */ + cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; + } else if (strcmp(ivmode, "random") == 0) { + cc->iv_gen_ops = &crypt_iv_random_ops; /* Need storage space in integrity fields. */ cc->integrity_iv_size = cc->iv_size; + } else { + ti->error = "Invalid IV mode"; + return -EINVAL; + } + + return 0; +} + +/* + * Workaround to parse cipher algorithm from crypto API spec. + * The cc->cipher is currently used only in ESSIV. + * This should be probably done by crypto-api calls (once available...) + */ +static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) +{ + const char *alg_name = NULL; + char *start, *end; + + if (crypt_integrity_aead(cc)) { + alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); + if (!alg_name) + return -EINVAL; + if (crypt_integrity_hmac(cc)) { + alg_name = strchr(alg_name, ','); + if (!alg_name) + return -EINVAL; + } + alg_name++; + } else { + alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); + if (!alg_name) + return -EINVAL; + } + + start = strchr(alg_name, '('); + end = strchr(alg_name, ')'); + + if (!start && !end) { + cc->cipher = kstrdup(alg_name, GFP_KERNEL); + return cc->cipher ? 0 : -ENOMEM; }
+ if (!start || !end || ++start >= end) + return -EINVAL; + + cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); + if (!cc->cipher) + return -ENOMEM; + + strncpy(cc->cipher, start, end - start); + return 0; }
@@ -3567,6 +3876,10 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) cc->key_mac_size = crypto_ahash_digestsize(mac); crypto_free_ahash(mac);
+ cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); + if (!cc->authenc_key) + return -ENOMEM; + return 0; }
@@ -3575,7 +3888,6 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key { struct crypt_config *cc = ti->private; char *tmp, *cipher_api; - char cipher_name[CRYPTO_MAX_ALG_NAME]; int ret = -EINVAL;
cc->tfms_count = 1; @@ -3606,29 +3918,8 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
cc->key_parts = cc->tfms_count;
- if (!*ivmode) - *ivmode = "null"; - - /* - * For those ciphers which do not support IVs, but input ivmode is not - * NULL, use "null" as ivmode compulsively. - */ - cc->iv_size = get_iv_size_by_name(cc, cipher_api); - if (cc->iv_size < 0) - return -ENOMEM; - if (!cc->iv_size && ivmode) { - DMWARN("Selected cipher does not support IVs"); - *ivmode = "null"; - } - /* Allocate cipher */ - ret = snprintf(cipher_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - *ivmode, cipher_api); - if (ret < 0) { - ti->error = "Cannot allocate cipher strings"; - return -ENOMEM; - } - ret = crypt_alloc_tfm(cc, cipher_name); + ret = crypt_alloc_tfms(cc, cipher_api); if (ret < 0) { ti->error = "Error allocating crypto tfm"; return ret; @@ -3645,6 +3936,12 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key } else cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
+ ret = crypt_ctr_blkdev_cipher(cc); + if (ret < 0) { + ti->error = "Cannot allocate cipher string"; + return -ENOMEM; + } + return 0; }
@@ -3679,6 +3976,10 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key } cc->key_parts = cc->tfms_count;
+ cc->cipher = kstrdup(cipher, GFP_KERNEL); + if (!cc->cipher) + goto bad_mem; + chainmode = strsep(&tmp, "-"); *ivmode = strsep(&tmp, ":"); *ivopts = tmp; @@ -3701,35 +4002,15 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key if (!cipher_api) goto bad_mem;
- /* For those ciphers which do not support IVs, - * use the 'null' template cipher - */ - if (!*ivmode) - *ivmode = "null"; - - /* - * For those ciphers which do not support IVs, but input ivmode is not - * NULL, use "null" as ivmode compulsively. - */ ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, cipher); - cc->iv_size = get_iv_size_by_name(cc, cipher_api); - if (cc->iv_size < 0) - return -ENOMEM; - if (!cc->iv_size && ivmode) { - DMWARN("Selected cipher does not support IVs"); - *ivmode = "null"; - } - - ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, - "%s(%s(%s))", *ivmode, chainmode, cipher); if (ret < 0) { kfree(cipher_api); goto bad_mem; }
/* Allocate cipher */ - ret = crypt_alloc_tfm(cc, cipher_api); + ret = crypt_alloc_tfms(cc, cipher_api); if (ret < 0) { ti->error = "Error allocating crypto tfm"; kfree(cipher_api); @@ -3768,12 +4049,30 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) return ret;
/* Initialize and set key */ - ret = crypt_init_key(ti, key, ivopts); + ret = crypt_set_key(cc, key); if (ret < 0) { ti->error = "Error decoding and setting key"; return ret; }
+ /* Allocate IV */ + if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { + ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); + if (ret < 0) { + ti->error = "Error creating IV"; + return ret; + } + } + + /* Initialize IV (set keys for ESSIV etc) */ + if (cc->iv_gen_ops && cc->iv_gen_ops->init) { + ret = cc->iv_gen_ops->init(cc); + if (ret < 0) { + ti->error = "Error initialising IV"; + return ret; + } + } + /* wipe the kernel key payload copy */ if (cc->key_string) memset(cc->key, 0, cc->key_size * sizeof(u8)); @@ -3867,7 +4166,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned int align_mask; unsigned long long tmpll; int ret; - size_t additional_req_size; + size_t iv_size_padding, additional_req_size; char dummy;
if (argc < 5) { @@ -3923,7 +4222,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
- additional_req_size = sizeof(struct dm_crypt_request); + if (align_mask < CRYPTO_MINALIGN) { + /* Allocate the padding exactly */ + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) + & align_mask; + } else { + /* + * If the cipher requires greater alignment than kmalloc + * alignment, we don't know the exact position of the + * initialization vector. We must assume worst case. + */ + iv_size_padding = align_mask; + } + + /* ...| IV + padding | original IV | original sec. number | bio tag offset | */ + additional_req_size = sizeof(struct dm_crypt_request) + + iv_size_padding + cc->iv_size + + cc->iv_size + + sizeof(uint64_t) + + sizeof(unsigned int);
ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size); if (ret) { @@ -4199,13 +4516,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, return -EINVAL; }
- ret = crypt_set_key(cc, SETKEY_OP_SET, argv[2], NULL); + ret = crypt_set_key(cc, argv[2]); + if (ret) + return ret; + if (cc->iv_gen_ops && cc->iv_gen_ops->init) + ret = cc->iv_gen_ops->init(cc); /* wipe the kernel key payload copy */ if (cc->key_string) memset(cc->key, 0, cc->key_size * sizeof(u8)); return ret; } if (argc == 2 && !strcasecmp(argv[1], "wipe")) { + if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { + ret = cc->iv_gen_ops->wipe(cc); + if (ret) + return ret; + } return crypt_wipe_key(cc); } } @@ -4244,7 +4570,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = { .name = "crypt", - .version = {1, 19, 1}, + .version = {1, 18, 1}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -4262,12 +4588,6 @@ static int __init dm_crypt_init(void) { int r;
- r = crypto_register_templates(geniv_tmpl, ARRAY_SIZE(geniv_tmpl)); - if (r) { - DMERR("register template failed %d", r); - return r; - } - r = dm_register_target(&crypt_target); if (r < 0) DMERR("register failed %d", r); @@ -4278,7 +4598,6 @@ static int __init dm_crypt_init(void) static void __exit dm_crypt_exit(void) { dm_unregister_target(&crypt_target); - crypto_unregister_templates(geniv_tmpl, ARRAY_SIZE(geniv_tmpl)); }
module_init(dm_crypt_init);
From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: 31797 CVE: NA
--------------------------------
We come across a KASAN double-free issue which seems to be related with this patch. Let's revert this patch for now.
This reverts commit 3449c349585d560f37db2fb938347eb37e78bcae.
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: ZhangXiaoxu zhangxiaoxu5@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/dm-crypt.c | 1929 ++++++------------------------------------------ include/crypto/geniv.h | 42 -- 2 files changed, 219 insertions(+), 1752 deletions(-) delete mode 100644 include/crypto/geniv.h
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 82060f1..e225246 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -33,60 +33,13 @@ #include <crypto/skcipher.h> #include <crypto/aead.h> #include <crypto/authenc.h> -#include <crypto/geniv.h> -#include <crypto/internal/aead.h> -#include <crypto/internal/skcipher.h> #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ #include <keys/user-type.h> -#include <linux/backing-dev.h> + #include <linux/device-mapper.h> -#include <linux/log2.h>
#define DM_MSG_PREFIX "crypt"
-struct geniv_ctx; -struct geniv_req_ctx; - -/* Sub request for each of the skcipher_request's for a segment */ -struct geniv_subreq { - struct scatterlist sg_in[4]; - struct scatterlist sg_out[4]; - sector_t iv_sector; - struct geniv_req_ctx *rctx; - union { - struct skcipher_request req; - struct aead_request req_aead; - } r CRYPTO_MINALIGN_ATTR; -}; - -/* used to iter the src scatterlist of the input parent request */ -struct scatterlist_iter { - /* current segment to be processed */ - unsigned int seg_no; - /* bytes had been processed in current segment */ - unsigned int done; - /* bytes to be processed in the next request */ - unsigned int len; -}; - -/* contex of the input parent request */ -struct geniv_req_ctx { - struct geniv_subreq *subreq; - bool is_write; - bool is_aead_request; - sector_t cc_sector; - /* array size of src scatterlist of parent request */ - unsigned int nents; - struct scatterlist_iter iter; - struct completion restart; - atomic_t req_pending; - u8 *integrity_metadata; - /* point to the input parent request */ - union { - struct skcipher_request *req; - struct aead_request *req_aead; - } r; -}; /* * context holding the current state of a multi-part conversion */ @@ -145,19 +98,6 @@ struct crypt_iv_operations { struct dm_crypt_request *dmreq); };
-struct crypt_geniv_operations { - int (*ctr)(struct geniv_ctx *ctx); - void (*dtr)(struct geniv_ctx *ctx); - int (*init)(struct geniv_ctx *ctx); - int (*wipe)(struct geniv_ctx *ctx); - int (*generator)(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv); - int (*post)(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, u8 *iv); -}; - struct iv_essiv_private { struct crypto_shash *hash_tfm; u8 *salt; @@ -280,7 +220,6 @@ struct crypt_config { #define MIN_IOS 64 #define MAX_TAG_SIZE 480 #define POOL_ENTRY_SIZE 512 -#define SECTOR_MASK ((1 << SECTOR_SHIFT) - 1)
static DEFINE_SPINLOCK(dm_crypt_clients_lock); static unsigned dm_crypt_clients_n = 0; @@ -306,55 +245,6 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) return cc->cipher_tfm.tfms_aead[0]; }
-/* context of geniv tfm */ -struct geniv_ctx { - unsigned int tfms_count; - union { - struct crypto_skcipher *tfm; - struct crypto_aead *tfm_aead; - } tfm_child; - union { - struct crypto_skcipher **tfms; - struct crypto_aead **tfms_aead; - } tfms; - - char *ivmode; - unsigned int iv_size; - unsigned int iv_start; - unsigned int rctx_start; - sector_t iv_offset; - unsigned short int sector_size; - unsigned char sector_shift; - char *algname; - char *ivopts; - char *cipher; - char *ciphermode; - unsigned long cipher_flags; - - const struct crypt_geniv_operations *iv_gen_ops; - union { - struct iv_essiv_private essiv; - struct iv_benbi_private benbi; - struct iv_lmk_private lmk; - struct iv_tcw_private tcw; - } iv_gen_private; - void *iv_private; - - mempool_t *subreq_pool; - unsigned int key_size; - unsigned int key_parts; /* independent parts in key buffer */ - unsigned int key_extra_size; /* additional keys length */ - unsigned int key_mac_size; - - unsigned int integrity_tag_size; - unsigned int integrity_iv_size; - unsigned int on_disk_tag_size; - - char *msg; - u8 *authenc_key; /* space for keys in authenc() format (if used) */ - u8 *key; -}; - /* * Different IV generation algorithms: * @@ -748,1662 +638,262 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, return r;
for (i = 0; i < MD5_HASH_WORDS; i++) - __cpu_to_le32s(&md5state.hash[i]); - memcpy(iv, &md5state.hash, cc->iv_size); - - return 0; -} - -static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - struct scatterlist *sg; - u8 *src; - int r = 0; - - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); - kunmap_atomic(src); - } else - memset(iv, 0, cc->iv_size); - - return r; -} - -static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - struct scatterlist *sg; - u8 *dst; - int r; - - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) - return 0; - - sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); - - /* Tweak the first block of plaintext sector */ - if (!r) - crypto_xor(dst + sg->offset, iv, cc->iv_size); - - kunmap_atomic(dst); - return r; -} - -static void crypt_iv_tcw_dtr(struct crypt_config *cc) -{ - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - - kzfree(tcw->iv_seed); - tcw->iv_seed = NULL; - kzfree(tcw->whitening); - tcw->whitening = NULL; - - if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) - crypto_free_shash(tcw->crc32_tfm); - tcw->crc32_tfm = NULL; -} - -static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) -{ - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - - if (cc->sector_size != (1 << SECTOR_SHIFT)) { - ti->error = "Unsupported sector size for TCW"; - return -EINVAL; - } - - if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { - ti->error = "Wrong key size for TCW"; - return -EINVAL; - } - - tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); - if (IS_ERR(tcw->crc32_tfm)) { - ti->error = "Error initializing CRC32 in TCW"; - return PTR_ERR(tcw->crc32_tfm); - } - - tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); - tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); - if (!tcw->iv_seed || !tcw->whitening) { - crypt_iv_tcw_dtr(cc); - ti->error = "Error allocating seed storage in TCW"; - return -ENOMEM; - } - - return 0; -} - -static int crypt_iv_tcw_init(struct crypt_config *cc) -{ - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; - - memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); - memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], - TCW_WHITENING_SIZE); - - return 0; -} - -static int crypt_iv_tcw_wipe(struct crypt_config *cc) -{ - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - - memset(tcw->iv_seed, 0, cc->iv_size); - memset(tcw->whitening, 0, TCW_WHITENING_SIZE); - - return 0; -} - -static int crypt_iv_tcw_whitening(struct crypt_config *cc, - struct dm_crypt_request *dmreq, - u8 *data) -{ - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - __le64 sector = cpu_to_le64(dmreq->iv_sector); - u8 buf[TCW_WHITENING_SIZE]; - SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); - int i, r; - - /* xor whitening with sector number */ - crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); - crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); - - /* calculate crc32 for every 32bit part and xor it */ - desc->tfm = tcw->crc32_tfm; - desc->flags = 0; - for (i = 0; i < 4; i++) { - r = crypto_shash_init(desc); - if (r) - goto out; - r = crypto_shash_update(desc, &buf[i * 4], 4); - if (r) - goto out; - r = crypto_shash_final(desc, &buf[i * 4]); - if (r) - goto out; - } - crypto_xor(&buf[0], &buf[12], 4); - crypto_xor(&buf[4], &buf[8], 4); - - /* apply whitening (8 bytes) to whole sector */ - for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) - crypto_xor(data + i * 8, buf, 8); -out: - memzero_explicit(buf, sizeof(buf)); - return r; -} - -static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - struct scatterlist *sg; - struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - __le64 sector = cpu_to_le64(dmreq->iv_sector); - u8 *src; - int r = 0; - - /* Remove whitening from ciphertext */ - if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { - sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); - kunmap_atomic(src); - } - - /* Calculate IV */ - crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); - if (cc->iv_size > 8) - crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, - cc->iv_size - 8); - - return r; -} - -static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - struct scatterlist *sg; - u8 *dst; - int r; - - if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) - return 0; - - /* Apply whitening on ciphertext */ - sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); - r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); - kunmap_atomic(dst); - - return r; -} - -static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - /* Used only for writes, there must be an additional space to store IV */ - get_random_bytes(iv, cc->iv_size); - return 0; -} - -static const struct crypt_iv_operations crypt_iv_plain_ops = { - .generator = crypt_iv_plain_gen -}; - -static const struct crypt_iv_operations crypt_iv_plain64_ops = { - .generator = crypt_iv_plain64_gen -}; - -static const struct crypt_iv_operations crypt_iv_plain64be_ops = { - .generator = crypt_iv_plain64be_gen -}; - -static const struct crypt_iv_operations crypt_iv_essiv_ops = { - .ctr = crypt_iv_essiv_ctr, - .dtr = crypt_iv_essiv_dtr, - .init = crypt_iv_essiv_init, - .wipe = crypt_iv_essiv_wipe, - .generator = crypt_iv_essiv_gen -}; - -static const struct crypt_iv_operations crypt_iv_benbi_ops = { - .ctr = crypt_iv_benbi_ctr, - .dtr = crypt_iv_benbi_dtr, - .generator = crypt_iv_benbi_gen -}; - -static const struct crypt_iv_operations crypt_iv_null_ops = { - .generator = crypt_iv_null_gen -}; - -static const struct crypt_iv_operations crypt_iv_lmk_ops = { - .ctr = crypt_iv_lmk_ctr, - .dtr = crypt_iv_lmk_dtr, - .init = crypt_iv_lmk_init, - .wipe = crypt_iv_lmk_wipe, - .generator = crypt_iv_lmk_gen, - .post = crypt_iv_lmk_post -}; - -static const struct crypt_iv_operations crypt_iv_tcw_ops = { - .ctr = crypt_iv_tcw_ctr, - .dtr = crypt_iv_tcw_dtr, - .init = crypt_iv_tcw_init, - .wipe = crypt_iv_tcw_wipe, - .generator = crypt_iv_tcw_gen, - .post = crypt_iv_tcw_post -}; - -static struct crypt_iv_operations crypt_iv_random_ops = { - .generator = crypt_iv_random_gen -}; - - -static bool geniv_integrity_aead(struct geniv_ctx *ctx) -{ - return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &ctx->cipher_flags); -} - -static bool geniv_integrity_hmac(struct geniv_ctx *ctx) -{ - return geniv_integrity_aead(ctx) && ctx->key_mac_size; -} - -static struct geniv_req_ctx *geniv_skcipher_req_ctx(struct skcipher_request *req) -{ - return (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), __alignof__(struct geniv_req_ctx)); -} - -static struct geniv_req_ctx *geniv_aead_req_ctx(struct aead_request *req) -{ - return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), __alignof__(struct geniv_req_ctx)); -} - -static u8 *iv_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - if (geniv_integrity_aead(ctx)) - return (u8 *)ALIGN((unsigned long)((char *)subreq + ctx->iv_start), - crypto_aead_alignmask(crypto_aead_reqtfm(subreq->rctx->r.req_aead)) + 1); - else - return (u8 *)ALIGN((unsigned long)((char *)subreq + ctx->iv_start), - crypto_skcipher_alignmask(crypto_skcipher_reqtfm(subreq->rctx->r.req)) + 1); -} - -static const struct crypt_geniv_operations crypt_geniv_plain_ops; -static const struct crypt_geniv_operations crypt_geniv_plain64_ops; -static const struct crypt_geniv_operations crypt_geniv_essiv_ops; -static const struct crypt_geniv_operations crypt_geniv_benbi_ops; -static const struct crypt_geniv_operations crypt_geniv_null_ops; -static const struct crypt_geniv_operations crypt_geniv_lmk_ops; -static const struct crypt_geniv_operations crypt_geniv_tcw_ops; -static const struct crypt_geniv_operations crypt_geniv_random_ops; - -static int geniv_init_iv(struct geniv_ctx *ctx) -{ - int ret; - - DMDEBUG("IV Generation algorithm : %s\n", ctx->ivmode); - - if (ctx->ivmode == NULL) - ctx->iv_gen_ops = NULL; - else if (strcmp(ctx->ivmode, "plain") == 0) - ctx->iv_gen_ops = &crypt_geniv_plain_ops; - else if (strcmp(ctx->ivmode, "plain64") == 0) - ctx->iv_gen_ops = &crypt_geniv_plain64_ops; - else if (strcmp(ctx->ivmode, "essiv") == 0) - ctx->iv_gen_ops = &crypt_geniv_essiv_ops; - else if (strcmp(ctx->ivmode, "benbi") == 0) - ctx->iv_gen_ops = &crypt_geniv_benbi_ops; - else if (strcmp(ctx->ivmode, "null") == 0) - ctx->iv_gen_ops = &crypt_geniv_null_ops; - else if (strcmp(ctx->ivmode, "lmk") == 0) { - ctx->iv_gen_ops = &crypt_geniv_lmk_ops; - /* - * Version 2 and 3 is recognised according - * to length of provided multi-key string. - * If present (version 3), last key is used as IV seed. - * All keys (including IV seed) are always the same size. - */ - if (ctx->key_size % ctx->key_parts) { - ctx->key_parts++; - ctx->key_extra_size = ctx->key_size / ctx->key_parts; - } - } else if (strcmp(ctx->ivmode, "tcw") == 0) { - ctx->iv_gen_ops = &crypt_geniv_tcw_ops; - ctx->key_parts += 2; /* IV + whitening */ - ctx->key_extra_size = ctx->iv_size + TCW_WHITENING_SIZE; - } else if (strcmp(ctx->ivmode, "random") == 0) { - ctx->iv_gen_ops = &crypt_geniv_random_ops; - /* Need storage space in integrity fields. */ - ctx->integrity_iv_size = ctx->iv_size; - } else { - DMERR("Invalid IV mode %s\n", ctx->ivmode); - return -EINVAL; - } - - /* Allocate IV */ - if (ctx->iv_gen_ops && ctx->iv_gen_ops->ctr) { - ret = ctx->iv_gen_ops->ctr(ctx); - if (ret < 0) { - DMERR("Error creating IV for %s\n", ctx->ivmode); - return ret; - } - } - - /* Initialize IV (set keys for ESSIV etc) */ - if (ctx->iv_gen_ops && ctx->iv_gen_ops->init) { - ret = ctx->iv_gen_ops->init(ctx); - if (ret < 0) { - DMERR("Error creating IV for %s\n", ctx->ivmode); - return ret; - } - } - - return 0; -} - -static void geniv_free_tfms_aead(struct geniv_ctx *ctx) -{ - if (!ctx->tfms.tfms_aead) - return; - - if (ctx->tfms.tfms_aead[0] && IS_ERR(ctx->tfms.tfms_aead[0])) { - crypto_free_aead(ctx->tfms.tfms_aead[0]); - ctx->tfms.tfms_aead[0] = NULL; - } - - kfree(ctx->tfms.tfms_aead); - ctx->tfms.tfms_aead = NULL; -} - -static void geniv_free_tfms_skcipher(struct geniv_ctx *ctx) -{ - unsigned int i; - - if (!ctx->tfms.tfms) - return; - - for (i = 0; i < ctx->tfms_count; i++) - if (ctx->tfms.tfms[i] && IS_ERR(ctx->tfms.tfms[i])) { - crypto_free_skcipher(ctx->tfms.tfms[i]); - ctx->tfms.tfms[i] = NULL; - } - - kfree(ctx->tfms.tfms); - ctx->tfms.tfms = NULL; -} - -static void geniv_free_tfms(struct geniv_ctx *ctx) -{ - if (geniv_integrity_aead(ctx)) - geniv_free_tfms_aead(ctx); - else - geniv_free_tfms_skcipher(ctx); -} - -static int geniv_alloc_tfms_aead(struct crypto_aead *parent, - struct geniv_ctx *ctx) -{ - unsigned int reqsize, align; - - ctx->tfms.tfms_aead = kcalloc(1, sizeof(struct crypto_aead *), - GFP_KERNEL); - if (!ctx->tfms.tfms_aead) - return -ENOMEM; - - /* First instance is already allocated in geniv_init_tfm */ - ctx->tfms.tfms_aead[0] = ctx->tfm_child.tfm_aead; - - /* Setup the current cipher's request structure */ - align = crypto_aead_alignmask(parent); - align &= ~(crypto_tfm_ctx_alignment() - 1); - reqsize = align + sizeof(struct geniv_req_ctx) + - crypto_aead_reqsize(ctx->tfms.tfms_aead[0]); - - crypto_aead_set_reqsize(parent, reqsize); - - return 0; -} - -/* - * Allocate memory for the underlying cipher algorithm. Ex: cbc(aes) - */ -static int geniv_alloc_tfms_skcipher(struct crypto_skcipher *parent, - struct geniv_ctx *ctx) -{ - unsigned int i, reqsize, align, err; - - ctx->tfms.tfms = kcalloc(ctx->tfms_count, sizeof(struct crypto_skcipher *), - GFP_KERNEL); - if (!ctx->tfms.tfms) - return -ENOMEM; - - /* First instance is already allocated in geniv_init_tfm */ - ctx->tfms.tfms[0] = ctx->tfm_child.tfm; - for (i = 1; i < ctx->tfms_count; i++) { - ctx->tfms.tfms[i] = crypto_alloc_skcipher(ctx->ciphermode, 0, 0); - if (IS_ERR(ctx->tfms.tfms[i])) { - err = PTR_ERR(ctx->tfms.tfms[i]); - geniv_free_tfms(ctx); - return err; - } - - /* Setup the current cipher's request structure */ - align = crypto_skcipher_alignmask(parent); - align &= ~(crypto_tfm_ctx_alignment() - 1); - reqsize = align + sizeof(struct geniv_req_ctx) + - crypto_skcipher_reqsize(ctx->tfms.tfms[i]); - - crypto_skcipher_set_reqsize(parent, reqsize); - } - - return 0; -} - -static unsigned int geniv_authenckey_size(struct geniv_ctx *ctx) -{ - return ctx->key_size - ctx->key_extra_size + - RTA_SPACE(sizeof(struct crypto_authenc_key_param)); -} - -/* - * Initialize the cipher's context with the key, ivmode and other parameters. - * Also allocate IV generation template ciphers and initialize them. - */ -static int geniv_setkey_init(void *parent, struct geniv_key_info *info) -{ - struct geniv_ctx *ctx; - int ret; - - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags)) - ctx = crypto_aead_ctx((struct crypto_aead *)parent); - else - ctx = crypto_skcipher_ctx((struct crypto_skcipher *)parent); - - ctx->tfms_count = info->tfms_count; - ctx->key = info->key; - ctx->cipher_flags = info->cipher_flags; - ctx->ivopts = info->ivopts; - ctx->iv_offset = info->iv_offset; - ctx->sector_size = info->sector_size; - ctx->sector_shift = __ffs(ctx->sector_size) - SECTOR_SHIFT; - - ctx->key_size = info->key_size; - ctx->key_parts = info->key_parts; - ctx->key_mac_size = info->key_mac_size; - ctx->on_disk_tag_size = info->on_disk_tag_size; - - if (geniv_integrity_hmac(ctx)) { - ctx->authenc_key = kmalloc(geniv_authenckey_size(ctx), GFP_KERNEL); - if (!ctx->authenc_key) - return -ENOMEM; - } - - if (geniv_integrity_aead(ctx)) - ret = geniv_alloc_tfms_aead((struct crypto_aead *)parent, ctx); - else - ret = geniv_alloc_tfms_skcipher((struct crypto_skcipher *)parent, ctx); - if (ret) - return ret; - - ret = geniv_init_iv(ctx); - - if (geniv_integrity_aead(ctx)) - ctx->integrity_tag_size = ctx->on_disk_tag_size - ctx->integrity_iv_size; - - return ret; -} - -/* - * If AEAD is composed like authenc(hmac(sha256),xts(aes)), - * the key must be for some reason in special format. - * This function converts cc->key to this special format. - */ -static void crypt_copy_authenckey(char *p, const void *key, - unsigned int enckeylen, unsigned int authkeylen) -{ - struct crypto_authenc_key_param *param; - struct rtattr *rta; - - rta = (struct rtattr *)p; - param = RTA_DATA(rta); - param->enckeylen = cpu_to_be32(enckeylen); - rta->rta_len = RTA_LENGTH(sizeof(*param)); - rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; - p += RTA_SPACE(sizeof(*param)); - memcpy(p, key + enckeylen, authkeylen); - p += authkeylen; - memcpy(p, key, enckeylen); -} - -static int geniv_setkey_tfms_aead(struct crypto_aead *parent, struct geniv_ctx *ctx, - struct geniv_key_info *info) -{ - unsigned int key_size; - unsigned int authenc_key_size; - struct crypto_aead *child_aead; - int ret = 0; - - /* Ignore extra keys (which are used for IV etc) */ - key_size = ctx->key_size - ctx->key_extra_size; - authenc_key_size = key_size + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); - - child_aead = ctx->tfms.tfms_aead[0]; - crypto_aead_clear_flags(child_aead, CRYPTO_TFM_REQ_MASK); - crypto_aead_set_flags(child_aead, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - - if (geniv_integrity_hmac(ctx)) { - if (key_size < ctx->key_mac_size) - return -EINVAL; - - crypt_copy_authenckey(ctx->authenc_key, ctx->key, key_size - ctx->key_mac_size, - ctx->key_mac_size); - } - - if (geniv_integrity_hmac(ctx)) - ret = crypto_aead_setkey(child_aead, ctx->authenc_key, authenc_key_size); - else - ret = crypto_aead_setkey(child_aead, ctx->key, key_size); - if (ret) { - DMERR("Error setting key for tfms[0]\n"); - goto out; - } - - crypto_aead_set_flags(parent, crypto_aead_get_flags(child_aead) & CRYPTO_TFM_RES_MASK); - -out: - if (geniv_integrity_hmac(ctx)) - memzero_explicit(ctx->authenc_key, authenc_key_size); - - return ret; -} - -static int geniv_setkey_tfms_skcipher(struct crypto_skcipher *parent, struct geniv_ctx *ctx, - struct geniv_key_info *info) -{ - unsigned int subkey_size; - char *subkey; - struct crypto_skcipher *child; - int ret, i; - - /* Ignore extra keys (which are used for IV etc) */ - subkey_size = (ctx->key_size - ctx->key_extra_size) - >> ilog2(ctx->tfms_count); - - for (i = 0; i < ctx->tfms_count; i++) { - child = ctx->tfms.tfms[i]; - crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); - - subkey = ctx->key + (subkey_size) * i; - - ret = crypto_skcipher_setkey(child, subkey, subkey_size); - if (ret) { - DMERR("Error setting key for tfms[%d]\n", i); - return ret; - } - - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - } - - return 0; -} - -static int geniv_setkey_set(struct geniv_ctx *ctx) -{ - if (ctx->iv_gen_ops && ctx->iv_gen_ops->init) - return ctx->iv_gen_ops->init(ctx); - else - return 0; -} - -static int geniv_setkey_wipe(struct geniv_ctx *ctx) -{ - int ret; - - if (ctx->iv_gen_ops && ctx->iv_gen_ops->wipe) { - ret = ctx->iv_gen_ops->wipe(ctx); - if (ret) - return ret; - } - - if (geniv_integrity_hmac(ctx)) - kzfree(ctx->authenc_key); - - return 0; -} - -static int geniv_setkey(void *parent, const u8 *key, unsigned int keylen) -{ - int err = 0; - struct geniv_ctx *ctx; - struct geniv_key_info *info = (struct geniv_key_info *) key; - - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags)) - ctx = crypto_aead_ctx((struct crypto_aead *)parent); - else - ctx = crypto_skcipher_ctx((struct crypto_skcipher *)parent); - - DMDEBUG("SETKEY Operation : %d\n", info->keyop); - - switch (info->keyop) { - case SETKEY_OP_INIT: - err = geniv_setkey_init(parent, info); - break; - case SETKEY_OP_SET: - err = geniv_setkey_set(ctx); - break; - case SETKEY_OP_WIPE: - err = geniv_setkey_wipe(ctx); - break; - } - - if (err) - return err; - - if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags)) - return geniv_setkey_tfms_aead((struct crypto_aead *)parent, ctx, info); - else - return geniv_setkey_tfms_skcipher((struct crypto_skcipher *)parent, ctx, info); -} - -static int geniv_aead_setkey(struct crypto_aead *parent, - const u8 *key, unsigned int keylen) -{ - return geniv_setkey(parent, key, keylen); -} - -static int geniv_skcipher_setkey(struct crypto_skcipher *parent, - const u8 *key, unsigned int keylen) -{ - return geniv_setkey(parent, key, keylen); -} - -static void geniv_async_done(struct crypto_async_request *async_req, int error); - -static int geniv_alloc_subreq_aead(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - u32 req_flags) -{ - struct aead_request *req; - - if (!rctx->subreq) { - rctx->subreq = mempool_alloc(ctx->subreq_pool, GFP_NOIO); - if (!rctx->subreq) - return -ENOMEM; - } - - req = &rctx->subreq->r.req_aead; - rctx->subreq->rctx = rctx; - - aead_request_set_tfm(req, ctx->tfms.tfms_aead[0]); - aead_request_set_callback(req, req_flags, - geniv_async_done, rctx->subreq); - - return 0; -} - -/* req_flags: flags from parent request */ -static int geniv_alloc_subreq_skcipher(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - u32 req_flags) -{ - int key_index; - struct skcipher_request *req; - - if (!rctx->subreq) { - rctx->subreq = mempool_alloc(ctx->subreq_pool, GFP_NOIO); - if (!rctx->subreq) - return -ENOMEM; - } - - req = &rctx->subreq->r.req; - rctx->subreq->rctx = rctx; - - key_index = rctx->cc_sector & (ctx->tfms_count - 1); - - skcipher_request_set_tfm(req, ctx->tfms.tfms[key_index]); - skcipher_request_set_callback(req, req_flags, - geniv_async_done, rctx->subreq); - - return 0; -} - -/* - * Asynchronous IO completion callback for each sector in a segment. When all - * pending i/o are completed the parent cipher's async function is called. - */ -static void geniv_async_done(struct crypto_async_request *async_req, int error) -{ - struct geniv_subreq *subreq = async_req->data; - struct geniv_req_ctx *rctx = subreq->rctx; - struct skcipher_request *req = NULL; - struct aead_request *req_aead = NULL; - struct geniv_ctx *ctx; - u8 *iv; - - if (!rctx->is_aead_request) { - req = rctx->r.req; - ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - } else { - req_aead = rctx->r.req_aead; - ctx = crypto_aead_ctx(crypto_aead_reqtfm(req_aead)); - } - - /* - * A request from crypto driver backlog is going to be processed now, - * finish the completion and continue in crypt_convert(). - * (Callback will be called for the second time for this request.) - */ - if (error == -EINPROGRESS) { - complete(&rctx->restart); - return; - } - - iv = iv_of_subreq(ctx, subreq); - if (!error && ctx->iv_gen_ops && ctx->iv_gen_ops->post) - error = ctx->iv_gen_ops->post(ctx, rctx, subreq, iv); - - mempool_free(subreq, ctx->subreq_pool); - - /* - * req_pending needs to be checked before req->base.complete is called - * as we need 'req_pending' to be equal to 1 to ensure all subrequests - * are processed. - */ - if (atomic_dec_and_test(&rctx->req_pending)) { - /* Call the parent cipher's completion function */ - if (!rctx->is_aead_request) - skcipher_request_complete(req, error); - else - aead_request_complete(req_aead, error); - - } -} - -static unsigned int geniv_get_sectors(struct scatterlist *sg1, - struct scatterlist *sg2, - unsigned int segments) -{ - unsigned int i, n1, n2; - - n1 = n2 = 0; - for (i = 0; i < segments ; i++) { - n1 += sg1[i].length >> SECTOR_SHIFT; - n1 += (sg1[i].length & SECTOR_MASK) ? 1 : 0; - } - - for (i = 0; i < segments ; i++) { - n2 += sg2[i].length >> SECTOR_SHIFT; - n2 += (sg2[i].length & SECTOR_MASK) ? 1 : 0; - } - - return max(n1, n2); -} - -/* - * Iterate scatterlist of segments to retrieve the 512-byte sectors so that - * unique IVs could be generated for each 512-byte sector. This split may not - * be necessary e.g. when these ciphers are modelled in hardware, where it can - * make use of the hardware's IV generation capabilities. - */ -static int geniv_iter_block(void *req_in, - struct geniv_ctx *ctx, struct geniv_req_ctx *rctx) - -{ - unsigned int rem; - struct scatterlist *src_org, *dst_org; - struct scatterlist *src1, *dst1; - struct scatterlist_iter *iter = &rctx->iter; - - if (unlikely(iter->seg_no >= rctx->nents)) - return 0; - - if (geniv_integrity_aead(ctx)) { - struct aead_request *req_aead = (struct aead_request *)req_in; - src_org = &req_aead->src[0]; - dst_org = &req_aead->dst[0]; - } else { - struct skcipher_request *req = (struct skcipher_request *)req_in; - src_org = &req->src[0]; - dst_org = &req->dst[0]; - } - - src1 = &src_org[iter->seg_no]; - dst1 = &dst_org[iter->seg_no]; - iter->done += iter->len; - - if (iter->done >= src1->length) { - iter->seg_no++; - - if (iter->seg_no >= rctx->nents) - return 0; - - src1 = &src_org[iter->seg_no]; - dst1 = &dst_org[iter->seg_no]; - iter->done = 0; - } - - rem = src1->length - iter->done; - - iter->len = rem > ctx->sector_size ? ctx->sector_size : rem; - - DMDEBUG("segment:(%d/%u), done:%d, rem:%d\n", - iter->seg_no, rctx->nents, iter->done, rem); - - return iter->len; -} - -static u8 *org_iv_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - return iv_of_subreq(ctx, subreq) + ctx->iv_size; -} - -static uint64_t *org_sector_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - u8 *ptr = iv_of_subreq(ctx, subreq) + ctx->iv_size + ctx->iv_size; - - return (uint64_t *) ptr; -} - -static unsigned int *org_tag_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - u8 *ptr = iv_of_subreq(ctx, subreq) + ctx->iv_size + - ctx->iv_size + sizeof(uint64_t); - - return (unsigned int *)ptr; -} - -static void *tag_from_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - return &subreq->rctx->integrity_metadata[*org_tag_of_subreq(ctx, subreq) * - ctx->on_disk_tag_size]; -} - -static void *iv_tag_from_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq) -{ - return tag_from_subreq(ctx, subreq) + ctx->integrity_tag_size; -} - -static int geniv_convert_block_aead(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, - unsigned int tag_offset) -{ - struct scatterlist *sg_in, *sg_out; - u8 *iv, *org_iv, *tag_iv, *tag; - uint64_t *sector; - int r = 0; - struct scatterlist_iter *iter = &rctx->iter; - struct aead_request *req_aead; - struct aead_request *parent_req = rctx->r.req_aead; - - BUG_ON(ctx->integrity_iv_size && ctx->integrity_iv_size != ctx->iv_size); - - /* Reject unexpected unaligned bio. */ - if (unlikely(iter->len & (ctx->sector_size - 1))) - return -EIO; - - subreq->iv_sector = rctx->cc_sector; - if (test_bit(CRYPT_IV_LARGE_SECTORS, &ctx->cipher_flags)) - subreq->iv_sector >>= ctx->sector_shift; - - *org_tag_of_subreq(ctx, subreq) = tag_offset; - - sector = org_sector_of_subreq(ctx, subreq); - *sector = cpu_to_le64(rctx->cc_sector - ctx->iv_offset); - - iv = iv_of_subreq(ctx, subreq); - org_iv = org_iv_of_subreq(ctx, subreq); - tag = tag_from_subreq(ctx, subreq); - tag_iv = iv_tag_from_subreq(ctx, subreq); - - sg_in = subreq->sg_in; - sg_out = subreq->sg_out; - - /* - * AEAD request: - * |----- AAD -------|------ DATA -------|-- AUTH TAG --| - * | (authenticated) | (auth+encryption) | | - * | sector_LE | IV | sector in/out | tag in/out | - */ - sg_init_table(sg_in, 4); - sg_set_buf(&sg_in[0], sector, sizeof(uint64_t)); - sg_set_buf(&sg_in[1], org_iv, ctx->iv_size); - sg_set_page(&sg_in[2], sg_page(&parent_req->src[iter->seg_no]), - iter->len, parent_req->src[iter->seg_no].offset + iter->done); - sg_set_buf(&sg_in[3], tag, ctx->integrity_tag_size); - - sg_init_table(sg_out, 4); - sg_set_buf(&sg_out[0], sector, sizeof(uint64_t)); - sg_set_buf(&sg_out[1], org_iv, ctx->iv_size); - sg_set_page(&sg_out[2], sg_page(&parent_req->dst[iter->seg_no]), - iter->len, parent_req->dst[iter->seg_no].offset + iter->done); - sg_set_buf(&sg_out[3], tag, ctx->integrity_tag_size); - - if (ctx->iv_gen_ops) { - /* For READs use IV stored in integrity metadata */ - if (ctx->integrity_iv_size && !rctx->is_write) { - memcpy(org_iv, tag_iv, ctx->iv_size); - } else { - r = ctx->iv_gen_ops->generator(ctx, rctx, subreq, org_iv); - if (r < 0) - return r; - /* Store generated IV in integrity metadata */ - if (ctx->integrity_iv_size) - memcpy(tag_iv, org_iv, ctx->iv_size); - } - /* Working copy of IV, to be modified in crypto API */ - memcpy(iv, org_iv, ctx->iv_size); - } - - req_aead = &subreq->r.req_aead; - aead_request_set_ad(req_aead, sizeof(uint64_t) + ctx->iv_size); - if (rctx->is_write) { - aead_request_set_crypt(req_aead, subreq->sg_in, subreq->sg_out, - ctx->sector_size, iv); - r = crypto_aead_encrypt(req_aead); - if (ctx->integrity_tag_size + ctx->integrity_iv_size != ctx->on_disk_tag_size) - memset(tag + ctx->integrity_tag_size + ctx->integrity_iv_size, 0, - ctx->on_disk_tag_size - (ctx->integrity_tag_size + ctx->integrity_iv_size)); - } else { - aead_request_set_crypt(req_aead, subreq->sg_in, subreq->sg_out, - ctx->sector_size + ctx->integrity_tag_size, iv); - r = crypto_aead_decrypt(req_aead); - } - - if (r == -EBADMSG) - DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", - (unsigned long long)le64_to_cpu(*sector)); - - if (!r && ctx->iv_gen_ops && ctx->iv_gen_ops->post) - r = ctx->iv_gen_ops->post(ctx, rctx, subreq, org_iv); - - return r; -} - -static int geniv_convert_block_skcipher(struct geniv_ctx *ctx, - struct geniv_req_ctx *rctx, - struct geniv_subreq *subreq, - unsigned int tag_offset) -{ - struct scatterlist *sg_in, *sg_out; - u8 *iv, *org_iv, *tag_iv; - uint64_t *sector; - int r = 0; - struct scatterlist_iter *iter = &rctx->iter; - struct skcipher_request *req; - struct skcipher_request *parent_req = rctx->r.req; - - /* Reject unexpected unaligned bio. */ - if (unlikely(iter->len & (ctx->sector_size - 1))) - return -EIO; - - subreq->iv_sector = rctx->cc_sector; - if (test_bit(CRYPT_IV_LARGE_SECTORS, &ctx->cipher_flags)) - subreq->iv_sector >>= ctx->sector_shift; - - *org_tag_of_subreq(ctx, subreq) = tag_offset; - - iv = iv_of_subreq(ctx, subreq); - org_iv = org_iv_of_subreq(ctx, subreq); - tag_iv = iv_tag_from_subreq(ctx, subreq); - - sector = org_sector_of_subreq(ctx, subreq); - *sector = cpu_to_le64(rctx->cc_sector - ctx->iv_offset); - - /* For skcipher we use only the first sg item */ - sg_in = subreq->sg_in; - sg_out = subreq->sg_out; - - sg_init_table(sg_in, 1); - sg_set_page(sg_in, sg_page(&parent_req->src[iter->seg_no]), - iter->len, parent_req->src[iter->seg_no].offset + iter->done); - - sg_init_table(sg_out, 1); - sg_set_page(sg_out, sg_page(&parent_req->dst[iter->seg_no]), - iter->len, parent_req->dst[iter->seg_no].offset + iter->done); - - if (ctx->iv_gen_ops) { - /* For READs use IV stored in integrity metadata */ - if (ctx->integrity_iv_size && !rctx->is_write) { - memcpy(org_iv, tag_iv, ctx->integrity_iv_size); - } else { - r = ctx->iv_gen_ops->generator(ctx, rctx, subreq, org_iv); - if (r < 0) - return r; - /* Store generated IV in integrity metadata */ - if (ctx->integrity_iv_size) - memcpy(tag_iv, org_iv, ctx->integrity_iv_size); - } - /* Working copy of IV, to be modified in crypto API */ - memcpy(iv, org_iv, ctx->iv_size); - } - - req = &subreq->r.req; - skcipher_request_set_crypt(req, sg_in, sg_out, ctx->sector_size, iv); - - if (rctx->is_write) - r = crypto_skcipher_encrypt(req); - else - r = crypto_skcipher_decrypt(req); - - if (!r && ctx->iv_gen_ops && ctx->iv_gen_ops->post) - r = ctx->iv_gen_ops->post(ctx, rctx, subreq, org_iv); - - return r; -} - -/* - * Common encryt/decrypt function for geniv template cipher. Before the crypto - * operation, it splits the memory segments (in the scatterlist) into 512 byte - * sectors. The initialization vector(IV) used is based on a unique sector - * number which is generated here. - */ -static int geniv_crypt(struct geniv_ctx *ctx, void *parent_req, bool is_encrypt) -{ - struct skcipher_request *req = NULL; - struct aead_request *req_aead = NULL; - struct geniv_req_ctx *rctx; - struct geniv_req_info *rinfo; - int i, bytes, cryptlen, ret = 0; - unsigned int sectors; - unsigned int tag_offset = 0; - unsigned int sector_step = ctx->sector_size >> SECTOR_SHIFT; - char *str __maybe_unused = is_encrypt ? "encrypt" : "decrypt"; - - if (geniv_integrity_aead(ctx)) { - req_aead = (struct aead_request *)parent_req; - rctx = geniv_aead_req_ctx(req_aead); - rctx->r.req_aead = req_aead; - rinfo = (struct geniv_req_info *)req_aead->iv; - } else { - req = (struct skcipher_request *)parent_req; - rctx = geniv_skcipher_req_ctx(req); - rctx->r.req = req; - rinfo = (struct geniv_req_info *)req->iv; - } - - /* Instance of 'struct geniv_req_info' is stored in IV ptr */ - rctx->is_write = is_encrypt; - rctx->is_aead_request = geniv_integrity_aead(ctx); - rctx->cc_sector = rinfo->cc_sector; - rctx->nents = rinfo->nents; - rctx->integrity_metadata = rinfo->integrity_metadata; - rctx->subreq = NULL; - cryptlen = req->cryptlen; - - rctx->iter.seg_no = 0; - rctx->iter.done = 0; - rctx->iter.len = 0; - - DMDEBUG("geniv:%s: starting sector=%d, #segments=%u\n", str, - (unsigned int)rctx->cc_sector, rctx->nents); - - if (geniv_integrity_aead(ctx)) - sectors = geniv_get_sectors(req_aead->src, req_aead->dst, rctx->nents); - else - sectors = geniv_get_sectors(req->src, req->dst, rctx->nents); - - init_completion(&rctx->restart); - atomic_set(&rctx->req_pending, 1); - - for (i = 0; i < sectors; i++) { - struct geniv_subreq *subreq; - - if (geniv_integrity_aead(ctx)) - ret = geniv_alloc_subreq_aead(ctx, rctx, req_aead->base.flags); - else - ret = geniv_alloc_subreq_skcipher(ctx, rctx, req->base.flags); - if (ret) - return -ENOMEM; - - subreq = rctx->subreq; - - if (geniv_integrity_aead(ctx)) - bytes = geniv_iter_block(req_aead, ctx, rctx); - else - bytes = geniv_iter_block(req, ctx, rctx); - - if (bytes == 0) - break; - - cryptlen -= bytes; - atomic_inc(&rctx->req_pending); - - if (geniv_integrity_aead(ctx)) - ret = geniv_convert_block_aead(ctx, rctx, subreq, tag_offset); - else - ret = geniv_convert_block_skcipher(ctx, rctx, subreq, tag_offset); - - switch (ret) { - /* - * The request was queued by a crypto driver - * but the driver request queue is full, let's wait. - */ - case -EBUSY: - wait_for_completion(&rctx->restart); - reinit_completion(&rctx->restart); - /* fall through */ - /* - * The request is queued and processed asynchronously, - * completion function geniv_async_done() is called. - */ - case -EINPROGRESS: - /* Marking this NULL lets the creation of a new sub- - * request when 'geniv_alloc_subreq' is called. - */ - rctx->subreq = NULL; - rctx->cc_sector += sector_step; - tag_offset++; - cond_resched(); - break; - /* - * The request was already processed (synchronously). - */ - case 0: - atomic_dec(&rctx->req_pending); - rctx->cc_sector += sector_step; - tag_offset++; - cond_resched(); - continue; - - /* There was an error while processing the request. */ - default: - atomic_dec(&rctx->req_pending); - mempool_free(rctx->subreq, ctx->subreq_pool); - atomic_dec(&rctx->req_pending); - return ret; - } - } - - if (rctx->subreq) - mempool_free(rctx->subreq, ctx->subreq_pool); - - if (atomic_dec_and_test(&rctx->req_pending)) - return 0; - else - return -EINPROGRESS; -} - -static int geniv_skcipher_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm); - - return geniv_crypt(ctx, req, true); -} - -static int geniv_skcipher_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm); - - return geniv_crypt(ctx, req, false); -} - -static int geniv_aead_encrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct geniv_ctx *ctx = crypto_aead_ctx(tfm); - - return geniv_crypt(ctx, req, true); -} - -static int geniv_aead_decrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct geniv_ctx *ctx = crypto_aead_ctx(tfm); - - return geniv_crypt(ctx, req, false); -} - -/* - * Workaround to parse cipher algorithm from crypto API spec. - * The ctx->cipher is currently used only in ESSIV. - * This should be probably done by crypto-api calls (once available...) - */ -static int geniv_blkdev_cipher(struct geniv_ctx *ctx, bool is_crypto_aead) -{ - const char *alg_name = NULL; - char *start, *end; - - alg_name = ctx->ciphermode; - if (!alg_name) - return -EINVAL; - - if (is_crypto_aead) { - alg_name = strchr(alg_name, ','); - if (!alg_name) - alg_name = ctx->ciphermode; - alg_name++; - } - - start = strchr(alg_name, '('); - end = strchr(alg_name, ')'); - - if (!start && !end) { - ctx->cipher = kstrdup(alg_name, GFP_KERNEL); - return ctx->cipher ? 0 : -ENOMEM; - } - - if (!start || !end || ++start >= end) - return -EINVAL; - - ctx->cipher = kzalloc(end - start + 1, GFP_KERNEL); - if (!ctx->cipher) - return -ENOMEM; - - strncpy(ctx->cipher, start, end - start); - - return 0; -} - -static int geniv_init_tfm(void *tfm_tmp, bool is_crypto_aead) -{ - struct geniv_ctx *ctx; - struct crypto_skcipher *tfm; - struct crypto_aead *tfm_aead; - unsigned int reqsize; - size_t iv_size_padding; - char *algname; - int psize, ret; - - if (is_crypto_aead) { - tfm_aead = (struct crypto_aead *)tfm_tmp; - ctx = crypto_aead_ctx(tfm_aead); - algname = (char *) crypto_tfm_alg_name(crypto_aead_tfm(tfm_aead)); - } else { - tfm = (struct crypto_skcipher *)tfm_tmp; - ctx = crypto_skcipher_ctx(tfm); - algname = (char *) crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); - } - - ctx->ciphermode = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); - if (!ctx->ciphermode) - return -ENOMEM; - - ctx->algname = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); - if (!ctx->algname) { - ret = -ENOMEM; - goto free_ciphermode; - } - - strlcpy(ctx->algname, algname, CRYPTO_MAX_ALG_NAME); - algname = ctx->algname; - - /* Parse the algorithm name 'ivmode(ciphermode)' */ - ctx->ivmode = strsep(&algname, "("); - strlcpy(ctx->ciphermode, algname, CRYPTO_MAX_ALG_NAME); - ctx->ciphermode[strlen(algname) - 1] = '\0'; - - DMDEBUG("ciphermode=%s, ivmode=%s\n", ctx->ciphermode, ctx->ivmode); - - /* - * Usually the underlying cipher instances are spawned here, but since - * the value of tfms_count (which is equal to the key_count) is not - * known yet, create only one instance and delay the creation of the - * rest of the instances of the underlying cipher 'cbc(aes)' until - * the setkey operation is invoked. - * The first instance created i.e. ctx->child will later be assigned as - * the 1st element in the array ctx->tfms. Creation of atleast one - * instance of the cipher is necessary to be created here to uncover - * any errors earlier than during the setkey operation later where the - * remaining instances are created. - */ - if (is_crypto_aead) - ctx->tfm_child.tfm_aead = crypto_alloc_aead(ctx->ciphermode, 0, 0); - else - ctx->tfm_child.tfm = crypto_alloc_skcipher(ctx->ciphermode, 0, 0); - if (IS_ERR(ctx->tfm_child.tfm)) { - ret = PTR_ERR(ctx->tfm_child.tfm); - DMERR("Failed to create cipher %s. err %d\n", - ctx->ciphermode, ret); - goto free_algname; - } - - /* Setup the current cipher's request structure */ - if (is_crypto_aead) { - reqsize = sizeof(struct geniv_req_ctx) + __alignof__(struct geniv_req_ctx); - crypto_aead_set_reqsize(tfm_aead, reqsize); - - ctx->iv_start = sizeof(struct geniv_subreq); - ctx->iv_start += crypto_aead_reqsize(ctx->tfm_child.tfm_aead); - - ctx->iv_size = crypto_aead_ivsize(tfm_aead); - } else { - reqsize = sizeof(struct geniv_req_ctx) + __alignof__(struct geniv_req_ctx); - crypto_skcipher_set_reqsize(tfm, reqsize); - - ctx->iv_start = sizeof(struct geniv_subreq); - ctx->iv_start += crypto_skcipher_reqsize(ctx->tfm_child.tfm); - - ctx->iv_size = crypto_skcipher_ivsize(tfm); - } - /* at least a 64 bit sector number should fit in our buffer */ - if (ctx->iv_size) - ctx->iv_size = max(ctx->iv_size, - (unsigned int)(sizeof(u64) / sizeof(u8))); - - if (is_crypto_aead) { - if (crypto_aead_alignmask(tfm_aead) < CRYPTO_MINALIGN) { - /* Allocate the padding exactly */ - iv_size_padding = -ctx->iv_start - & crypto_aead_alignmask(ctx->tfm_child.tfm_aead); - } else { - /* - * If the cipher requires greater alignment than kmalloc - * alignment, we don't know the exact position of the - * initialization vector. We must assume worst case. - */ - iv_size_padding = crypto_aead_alignmask(ctx->tfm_child.tfm_aead); - } - } else { - if (crypto_skcipher_alignmask(tfm) < CRYPTO_MINALIGN) { - iv_size_padding = -ctx->iv_start - & crypto_skcipher_alignmask(ctx->tfm_child.tfm); - } else { - iv_size_padding = crypto_skcipher_alignmask(ctx->tfm_child.tfm); - } - } - - /* - * create memory pool for sub-request structure - * ...| IV + padding | original IV | original sec. number | bio tag offset | - */ - psize = ctx->iv_start + iv_size_padding + ctx->iv_size + ctx->iv_size + - sizeof(uint64_t) + sizeof(unsigned int); - - ctx->subreq_pool = mempool_create_kmalloc_pool(MIN_IOS, psize); - if (!ctx->subreq_pool) { - ret = -ENOMEM; - DMERR("Could not allocate crypt sub-request mempool\n"); - goto free_tfm; - } - - ret = geniv_blkdev_cipher(ctx, is_crypto_aead); - if (ret < 0) { - ret = -ENOMEM; - DMERR("Cannot allocate cipher string\n"); - goto free_tfm; - } + __cpu_to_le32s(&md5state.hash[i]); + memcpy(iv, &md5state.hash, cc->iv_size);
return 0; - -free_tfm: - if (is_crypto_aead) - crypto_free_aead(ctx->tfm_child.tfm_aead); - else - crypto_free_skcipher(ctx->tfm_child.tfm); -free_algname: - kfree(ctx->algname); -free_ciphermode: - kfree(ctx->ciphermode); - return ret; }
-static int geniv_skcipher_init_tfm(struct crypto_skcipher *tfm) +static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - return geniv_init_tfm(tfm, 0); -} + struct scatterlist *sg; + u8 *src; + int r = 0;
-static int geniv_aead_init_tfm(struct crypto_aead *tfm) -{ - return geniv_init_tfm(tfm, 1); -} + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + sg = crypt_get_sg_data(cc, dmreq->sg_in); + src = kmap_atomic(sg_page(sg)); + r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); + kunmap_atomic(src); + } else + memset(iv, 0, cc->iv_size);
-static void geniv_exit_tfm(struct geniv_ctx *ctx) -{ - if (ctx->iv_gen_ops && ctx->iv_gen_ops->dtr) - ctx->iv_gen_ops->dtr(ctx); - - mempool_destroy(ctx->subreq_pool); - geniv_free_tfms(ctx); - kzfree(ctx->ciphermode); - kzfree(ctx->algname); - kzfree(ctx->cipher); + return r; }
-static void geniv_skcipher_exit_tfm(struct crypto_skcipher *tfm) +static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm); - - geniv_exit_tfm(ctx); -} + struct scatterlist *sg; + u8 *dst; + int r;
-static void geniv_aead_exit_tfm(struct crypto_aead *tfm) -{ - struct geniv_ctx *ctx = crypto_aead_ctx(tfm); + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) + return 0;
- geniv_exit_tfm(ctx); -} + sg = crypt_get_sg_data(cc, dmreq->sg_out); + dst = kmap_atomic(sg_page(sg)); + r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
-static void geniv_skcipher_free(struct skcipher_instance *inst) -{ - struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); + /* Tweak the first block of plaintext sector */ + if (!r) + crypto_xor(dst + sg->offset, iv, cc->iv_size);
- crypto_drop_skcipher(spawn); - kfree(inst); + kunmap_atomic(dst); + return r; }
-static void geniv_aead_free(struct aead_instance *inst) +static void crypt_iv_tcw_dtr(struct crypt_config *cc) { - struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + + kzfree(tcw->iv_seed); + tcw->iv_seed = NULL; + kzfree(tcw->whitening); + tcw->whitening = NULL;
- crypto_drop_aead(spawn); - kfree(inst); + if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) + crypto_free_shash(tcw->crc32_tfm); + tcw->crc32_tfm = NULL; }
-static int geniv_skcipher_create(struct crypto_template *tmpl, - struct rtattr **tb, char *algname) +static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) { - struct crypto_attr_type *algt; - struct skcipher_instance *inst; - struct skcipher_alg *alg; - struct crypto_skcipher_spawn *spawn; - const char *cipher_name; - int err; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- algt = crypto_get_attr_type(tb); + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + ti->error = "Unsupported sector size for TCW"; + return -EINVAL; + }
- cipher_name = crypto_attr_alg_name(tb[1]); + if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { + ti->error = "Wrong key size for TCW"; + return -EINVAL; + }
- if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); + tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0); + if (IS_ERR(tcw->crc32_tfm)) { + ti->error = "Error initializing CRC32 in TCW"; + return PTR_ERR(tcw->crc32_tfm); + }
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) + tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); + tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); + if (!tcw->iv_seed || !tcw->whitening) { + crypt_iv_tcw_dtr(cc); + ti->error = "Error allocating seed storage in TCW"; return -ENOMEM; + }
- spawn = skcipher_instance_ctx(inst); - - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); - - if (err) - goto err_free_inst; - - alg = crypto_spawn_skcipher_alg(spawn); + return 0; +}
- err = -EINVAL; +static int crypt_iv_tcw_init(struct crypt_config *cc) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
- /* Only support blocks of size which is of a power of 2 */ - if (!is_power_of_2(alg->base.cra_blocksize)) - goto err_drop_spawn; + memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); + memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], + TCW_WHITENING_SIZE);
- /* algname: essiv, base.cra_name: cbc(aes) */ - err = -ENAMETOOLONG; - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - algname, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) - goto err_drop_spawn; - if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", algname, alg->base.cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_spawn; + return 0; +}
- inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.base.cra_priority = alg->base.cra_priority; - inst->alg.base.cra_blocksize = alg->base.cra_blocksize; - inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = alg->base.cra_blocksize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = sizeof(struct geniv_key_info); - inst->alg.max_keysize = sizeof(struct geniv_key_info); +static int crypt_iv_tcw_wipe(struct crypt_config *cc) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- inst->alg.setkey = geniv_skcipher_setkey; - inst->alg.encrypt = geniv_skcipher_encrypt; - inst->alg.decrypt = geniv_skcipher_decrypt; + memset(tcw->iv_seed, 0, cc->iv_size); + memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
- inst->alg.base.cra_ctxsize = sizeof(struct geniv_ctx); + return 0; +}
- inst->alg.init = geniv_skcipher_init_tfm; - inst->alg.exit = geniv_skcipher_exit_tfm; +static int crypt_iv_tcw_whitening(struct crypt_config *cc, + struct dm_crypt_request *dmreq, + u8 *data) +{ + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + __le64 sector = cpu_to_le64(dmreq->iv_sector); + u8 buf[TCW_WHITENING_SIZE]; + SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); + int i, r;
- inst->free = geniv_skcipher_free; + /* xor whitening with sector number */ + crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); + crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
- err = skcipher_register_instance(tmpl, inst); - if (err) - goto err_drop_spawn; + /* calculate crc32 for every 32bit part and xor it */ + desc->tfm = tcw->crc32_tfm; + desc->flags = 0; + for (i = 0; i < 4; i++) { + r = crypto_shash_init(desc); + if (r) + goto out; + r = crypto_shash_update(desc, &buf[i * 4], 4); + if (r) + goto out; + r = crypto_shash_final(desc, &buf[i * 4]); + if (r) + goto out; + } + crypto_xor(&buf[0], &buf[12], 4); + crypto_xor(&buf[4], &buf[8], 4);
+ /* apply whitening (8 bytes) to whole sector */ + for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) + crypto_xor(data + i * 8, buf, 8); out: - return err; - -err_drop_spawn: - crypto_drop_skcipher(spawn); -err_free_inst: - kfree(inst); - goto out; + memzero_explicit(buf, sizeof(buf)); + return r; }
- -static int geniv_aead_create(struct crypto_template *tmpl, - struct rtattr **tb, char *algname) +static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - struct crypto_attr_type *algt; - struct aead_instance *inst; - struct aead_alg *alg; - struct crypto_aead_spawn *spawn; - const char *cipher_name; - int err; - - algt = crypto_get_attr_type(tb); - - cipher_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; + struct scatterlist *sg; + struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + __le64 sector = cpu_to_le64(dmreq->iv_sector); + u8 *src; + int r = 0;
- spawn = aead_instance_ctx(inst); + /* Remove whitening from ciphertext */ + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { + sg = crypt_get_sg_data(cc, dmreq->sg_in); + src = kmap_atomic(sg_page(sg)); + r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); + kunmap_atomic(src); + }
- crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); - err = crypto_grab_aead(spawn, cipher_name, 0, - crypto_requires_sync(algt->type, - algt->mask)); - if (err) - goto err_free_inst; + /* Calculate IV */ + crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); + if (cc->iv_size > 8) + crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, + cc->iv_size - 8);
- alg = crypto_spawn_aead_alg(spawn); + return r; +}
- /* Only support blocks of size which is of a power of 2 */ - if (!is_power_of_2(alg->base.cra_blocksize)) { - err = -EINVAL; - goto err_drop_spawn; - } +static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + struct scatterlist *sg; + u8 *dst; + int r;
- /* algname: essiv, base.cra_name: cbc(aes) */ - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", - algname, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) { - err = -ENAMETOOLONG; - goto err_drop_spawn; - } + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) + return 0;
- if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", algname, alg->base.cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) { - err = -ENAMETOOLONG; - goto err_drop_spawn; - } + /* Apply whitening on ciphertext */ + sg = crypt_get_sg_data(cc, dmreq->sg_out); + dst = kmap_atomic(sg_page(sg)); + r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); + kunmap_atomic(dst);
- inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.base.cra_priority = alg->base.cra_priority; - inst->alg.base.cra_blocksize = alg->base.cra_blocksize; - inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = crypto_aead_alg_ivsize(alg); - inst->alg.chunksize = crypto_aead_alg_chunksize(alg); - inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); + return r; +}
- inst->alg.setkey = geniv_aead_setkey; - inst->alg.encrypt = geniv_aead_encrypt; - inst->alg.decrypt = geniv_aead_decrypt; +static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + /* Used only for writes, there must be an additional space to store IV */ + get_random_bytes(iv, cc->iv_size); + return 0; +}
- inst->alg.base.cra_ctxsize = sizeof(struct geniv_ctx); +static const struct crypt_iv_operations crypt_iv_plain_ops = { + .generator = crypt_iv_plain_gen +};
- inst->alg.init = geniv_aead_init_tfm; - inst->alg.exit = geniv_aead_exit_tfm; +static const struct crypt_iv_operations crypt_iv_plain64_ops = { + .generator = crypt_iv_plain64_gen +};
- inst->free = geniv_aead_free; +static const struct crypt_iv_operations crypt_iv_plain64be_ops = { + .generator = crypt_iv_plain64be_gen +};
- err = aead_register_instance(tmpl, inst); - if (err) - goto err_drop_spawn; +static const struct crypt_iv_operations crypt_iv_essiv_ops = { + .ctr = crypt_iv_essiv_ctr, + .dtr = crypt_iv_essiv_dtr, + .init = crypt_iv_essiv_init, + .wipe = crypt_iv_essiv_wipe, + .generator = crypt_iv_essiv_gen +};
- return 0; +static const struct crypt_iv_operations crypt_iv_benbi_ops = { + .ctr = crypt_iv_benbi_ctr, + .dtr = crypt_iv_benbi_dtr, + .generator = crypt_iv_benbi_gen +};
-err_drop_spawn: - crypto_drop_aead(spawn); -err_free_inst: - kfree(inst); - return err; -} +static const struct crypt_iv_operations crypt_iv_null_ops = { + .generator = crypt_iv_null_gen +};
-static int geniv_create(struct crypto_template *tmpl, - struct rtattr **tb, char *algname) -{ - if (!crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER)) - return geniv_skcipher_create(tmpl, tb, algname); - else if (!crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD)) - return geniv_aead_create(tmpl, tb, algname); - else - return -EINVAL; -} +static const struct crypt_iv_operations crypt_iv_lmk_ops = { + .ctr = crypt_iv_lmk_ctr, + .dtr = crypt_iv_lmk_dtr, + .init = crypt_iv_lmk_init, + .wipe = crypt_iv_lmk_wipe, + .generator = crypt_iv_lmk_gen, + .post = crypt_iv_lmk_post +};
-static int geniv_template_create(struct crypto_template *tmpl, - struct rtattr **tb) -{ - return geniv_create(tmpl, tb, tmpl->name); -} +static const struct crypt_iv_operations crypt_iv_tcw_ops = { + .ctr = crypt_iv_tcw_ctr, + .dtr = crypt_iv_tcw_dtr, + .init = crypt_iv_tcw_init, + .wipe = crypt_iv_tcw_wipe, + .generator = crypt_iv_tcw_gen, + .post = crypt_iv_tcw_post +};
-#define DECLARE_CRYPTO_TEMPLATE(type) \ - { .name = type, \ - .create = geniv_template_create, \ - .module = THIS_MODULE, }, - -static struct crypto_template geniv_tmpl[] = { - DECLARE_CRYPTO_TEMPLATE("plain") - DECLARE_CRYPTO_TEMPLATE("plain64") - DECLARE_CRYPTO_TEMPLATE("essiv") - DECLARE_CRYPTO_TEMPLATE("benbi") - DECLARE_CRYPTO_TEMPLATE("null") - DECLARE_CRYPTO_TEMPLATE("lmk") - DECLARE_CRYPTO_TEMPLATE("tcw") - DECLARE_CRYPTO_TEMPLATE("random") +static struct crypt_iv_operations crypt_iv_random_ops = { + .generator = crypt_iv_random_gen };
/* @@ -3435,8 +1925,27 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc) return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); }
+/* + * If AEAD is composed like authenc(hmac(sha256),xts(aes)), + * the key must be for some reason in special format. + * This funcion converts cc->key to this special format. + */ static void crypt_copy_authenckey(char *p, const void *key, - unsigned enckeylen, unsigned authkeylen); + unsigned enckeylen, unsigned authkeylen) +{ + struct crypto_authenc_key_param *param; + struct rtattr *rta; + + rta = (struct rtattr *)p; + param = RTA_DATA(rta); + param->enckeylen = cpu_to_be32(enckeylen); + rta->rta_len = RTA_LENGTH(sizeof(*param)); + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + p += RTA_SPACE(sizeof(*param)); + memcpy(p, key + enckeylen, authkeylen); + p += authkeylen; + memcpy(p, key, enckeylen); +}
static int crypt_setkey(struct crypt_config *cc) { diff --git a/include/crypto/geniv.h b/include/crypto/geniv.h deleted file mode 100644 index 6fcf93b..00000000 --- a/include/crypto/geniv.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * geniv.h: common interface for IV generation algorithms - * - * Copyright (C) 2018, Linaro - * - * This file define the data structure the user should pass to the template. - */ - -#ifndef _CRYPTO_GENIV_H -#define _CRYPTO_GENIV_H - -#include <linux/types.h> - -enum setkey_op { - SETKEY_OP_INIT, - SETKEY_OP_SET, - SETKEY_OP_WIPE, -}; - -struct geniv_key_info { - enum setkey_op keyop; - unsigned int tfms_count; - u8 *key; - char *ivopts; - sector_t iv_offset; - unsigned long cipher_flags; - - unsigned short int sector_size; - unsigned int key_size; - unsigned int key_parts; - unsigned int key_mac_size; - unsigned int on_disk_tag_size; -}; - -struct geniv_req_info { - sector_t cc_sector; - unsigned int nents; - u8 *integrity_metadata; -}; - -#endif
From: Milan Broz gmazyland@gmail.com
mainline inclusion from mainline-5.6-rc1 commit 4ea9471fbd1addb25a4d269991dc724e200ca5b5 category: bugfix bugzilla: 31797 CVE: NA
-----------------------------------------
If benbi IV is used in AEAD construction, for example: cryptsetup luksFormat <device> --cipher twofish-xts-benbi --key-size 512 --integrity=hmac-sha256 the constructor uses wrong skcipher function and crashes:
BUG: kernel NULL pointer dereference, address: 00000014 ... EIP: crypt_iv_benbi_ctr+0x15/0x70 [dm_crypt] Call Trace: ? crypt_subkey_size+0x20/0x20 [dm_crypt] crypt_ctr+0x567/0xfc0 [dm_crypt] dm_table_add_target+0x15f/0x340 [dm_mod]
Fix this by properly using crypt_aead_blocksize() in this case.
Fixes: ef43aa38063a6 ("dm crypt: add cryptographic data integrity protection (authenticated encryption)") Cc: stable@vger.kernel.org # v4.12+ Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=941051 Reported-by: Jerad Simpson jbsimpson@gmail.com Signed-off-by: Milan Broz gmazyland@gmail.com Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: ZhangXiaoxu zhangxiaoxu5@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/dm-crypt.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e225246..3a0aa8f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -482,8 +482,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); - int log = ilog2(bs); + unsigned bs; + int log; + + if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) + bs = crypto_aead_blocksize(any_tfm_aead(cc)); + else + bs = crypto_skcipher_blocksize(any_tfm(cc)); + log = ilog2(bs);
/* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */
From: yu kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 30213 CVE: CVE-2019-19770
---------------------------
If 'kobject_name(q->kobj.parent)' is too long, 'sprintf' will trigger memory leak.
Fix the problem by removing device name from the name of new dir, and replace 'sprintf' with 'snprintf'.
Fixes: commit 0ebb1affa76c ("block: rename 'q->debugfs_dir' and 'q->blk_trace->dir' in blk_unregister_queue()") Signed-off-by: yu kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-sysfs.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index d249d7f..ce84526 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -12,6 +12,7 @@ #include <linux/blk-mq.h> #include <linux/blk-cgroup.h> #include <linux/debugfs.h> +#include <linux/atomic.h>
#include "blk.h" #include "blk-mq.h" @@ -960,6 +961,25 @@ int blk_register_queue(struct gendisk *disk) EXPORT_SYMBOL_GPL(blk_register_queue);
#ifdef CONFIG_DEBUG_FS +void blk_rename_debugfs_dir(struct dentry **old) +{ + static atomic_t i = ATOMIC_INIT(0); + struct dentry *new; + char name[DNAME_INLINE_LEN]; + u32 index = atomic_fetch_inc(&i); + + snprintf(name, sizeof(name), "ready_to_remove_%u", index); + new = debugfs_lookup(name, blk_debugfs_root); + if (WARN_ON(new)) { + dput(new); + return; + } + new = debugfs_rename(blk_debugfs_root, *old, blk_debugfs_root, name); + if (WARN_ON(!new)) + return; + *old = new; +} + /* * blk_prepare_release_queue - rename q->debugfs_dir and q->blk_trace->dir * @q: request_queue of which the dir to be renamed belong to. @@ -971,31 +991,17 @@ int blk_register_queue(struct gendisk *disk) */ static void blk_prepare_release_queue(struct request_queue *q) { - struct dentry *new = NULL; - struct dentry **old = NULL; - char name[DNAME_INLINE_LEN]; - int i = 0; - #ifdef CONFIG_BLK_DEBUG_FS if (!IS_ERR_OR_NULL(q->debugfs_dir)) - old = &q->debugfs_dir; + blk_rename_debugfs_dir(&q->debugfs_dir); + #endif #ifdef CONFIG_BLK_DEV_IO_TRACE - /* q->debugfs_dir and q->blk_trace->dir can't both exist */ + mutex_lock(&q->blk_trace_mutex); if (q->blk_trace && !IS_ERR_OR_NULL(q->blk_trace->dir)) - old = &q->blk_trace->dir; + blk_rename_debugfs_dir(&q->blk_trace->dir); + mutex_unlock(&q->blk_trace_mutex); #endif - if (old == NULL) - return; -#define MAX_ATTEMPT 1024 - while (new == NULL && i < MAX_ATTEMPT) { - sprintf(name, "ready_to_remove_%s_%d", - kobject_name(q->kobj.parent), i++); - new = debugfs_rename(blk_debugfs_root, *old, - blk_debugfs_root, name); - } - if (new) - *old = new; } #else #define blk_prepare_release_queue(q) do { } while (0)