Add some integrity data processing to dm-crypt. This patch adds optional integrity parameter to mapping table. If specified, and underlying device supports "DM-DIF-EXT-TAG" integrity profile (implemented through dm-integrity target), it allocates bio integrity metadata fields to every bio. Example of mapping table with 32 bytes-per-sector of integrity data: dmsetup create x --table "0 $SIZE integrity 0 32" dmsetup create y --table "0 $SIZE crypt aes-cbc-essiv:sha256 11ff33c6fb942655efb3e30cf4c0fd95f5ef483afca72166c530ae26151dd83b 0 /dev/mapper/x 0 1 integrity:sha256" For now, only simple hash is implemented (ONLY FOR TESTING!). The real implementation will use HMAC+sector salt and also authenticated mode (like GCM). Integrity check is Encrypt-then-MAC, For writes, the integrity fields are calculated after encryption, stored in bio integrity fields and sent to underlying dm-integrity target for storage. For reads, the integrity metadata are verified before decryption (they are added in dm-integrity, but the integrity fields are preallocated in dm-crypt) Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 215 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 214 insertions(+), 1 deletion(-) Index: linux-4.7-rc3-devel/drivers/md/dm-crypt.c =================================================================== --- linux-4.7-rc3-devel.orig/drivers/md/dm-crypt.c 2016-06-13 17:22:23.000000000 +0200 +++ linux-4.7-rc3-devel/drivers/md/dm-crypt.c 2016-06-14 22:26:17.000000000 +0200 @@ -29,8 +29,10 @@ #include #include #include +#include #include +#include #define DM_MSG_PREFIX "crypt" @@ -178,6 +180,10 @@ struct crypt_config { unsigned int key_size; unsigned int key_parts; /* independent parts in key buffer */ unsigned int key_extra_size; /* additional keys length */ + + unsigned int integrity_tag_size; + struct crypto_shash *integrity_hash_tfm; + u8 key[0]; }; @@ -803,6 +809,183 @@ static struct crypt_iv_operations crypt_ .post = crypt_iv_tcw_post }; +/* + * Integrity extensions + */ +static void dm_crypt_integrity_init(struct crypt_config *cc) +{ + struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); + + if (!bi) + return; + + if (strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { + DMINFO("Disabling integrity tag for unknown profile."); + return; + } + + cc->integrity_tag_size = bi->tag_size; + DMINFO("Enabling integrity tag size %u.", cc->integrity_tag_size); +} + +static int dm_crypt_integrity_bio_set(struct dm_crypt_io *io, struct bio *bio) +{ + void *meta = NULL; + struct bio_integrity_payload *bip; + unsigned int meta_len; + int ret; + + if (!bio_sectors(bio)) + return 0; + + meta_len = io->cc->integrity_tag_size * (bio->bi_iter.bi_size >> SECTOR_SHIFT); + + meta = kmalloc(meta_len, GFP_NOIO); + if (!meta) + return -ENOMEM; + + bip = bio_integrity_alloc(bio, GFP_NOIO, 1); + if (IS_ERR(bip)) { + kfree(meta); + return PTR_ERR(bip); + } + + bip->bip_iter.bi_size = meta_len; + bip->bip_iter.bi_sector = io->cc->start + io->sector; + + ret = bio_integrity_add_page(bio, virt_to_page(meta), + meta_len, offset_in_page(meta)); + if (ret != meta_len) { + kfree(meta); + return -ENOMEM; + } + + return 0; +} + +static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct crypto_shash *tfm; + + if (!opts) { + ti->error = "Integrity algorithm missing"; + return -EINVAL; + } + + /* Allocate hash algorithm */ + tfm = crypto_alloc_shash(opts, 0, 0); + if (IS_ERR(tfm)) { + ti->error = "Error initializing integrity hash"; + return PTR_ERR(tfm); + } + + if (crypto_shash_digestsize(tfm) > cc->integrity_tag_size) { + DMERR("space: %d > %d\n", crypto_shash_digestsize(tfm), cc->integrity_tag_size); + crypto_free_shash(tfm); + ti->error = "Not enough space for integrity tag."; + return -EINVAL; + } + + DMINFO("Initialized integrity hash %s", opts); + cc->integrity_hash_tfm = tfm; + return 0; +} + +/* Just an example, this is not secure! */ +static void calculate_integrity_sector(struct crypt_config *cc, void *data, void *csum) +{ + SHASH_DESC_ON_STACK(desc, cc->integrity_hash_tfm); + + desc->tfm = cc->integrity_hash_tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + memset(csum, 0, cc->integrity_tag_size); + + if (crypto_shash_init(desc)) { + DMINFO("Hash init error"); + return; + } + + if (crypto_shash_update(desc, data, 1 << SECTOR_SHIFT)) { + DMINFO("Hash update error"); + return; + } + + if (crypto_shash_final(desc, csum)) { + DMINFO("Hash final error"); + return; + } +} + +static int crypt_integrity_write(struct crypt_config *cc, struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + void *bio_buf = page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; + struct bio_vec bvec; + struct bvec_iter iter; + u8 *src; + sector_t sector_offset = 0; + unsigned int offset; + + bio_for_each_segment(bvec, bio, iter) { + src = kmap_atomic(bvec.bv_page) + bvec.bv_offset; + for (offset = 0; offset < bvec.bv_len; offset += (1 << SECTOR_SHIFT)) { + calculate_integrity_sector(cc, src + offset, bio_buf); + //DMINFO("DATA: %8phN", src + offset); + //DMINFO("WRITE TAG %llu: %8phN", bip->bip_iter.bi_sector + sector_offset, bio_buf); + bio_buf += cc->integrity_tag_size; + sector_offset++; + } + kunmap_atomic(src); + } + + BUG_ON(sector_offset != bio_sectors(bio)); + return 0; +} + +static int crypt_integrity_read(struct crypt_config *cc, struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + void *bio_buf; + char tmp_buf[cc->integrity_tag_size]; + struct bio_vec bvec; + struct bvec_iter iter; + u8 *src; + int ret = 0; + sector_t sector_offset = 0; + unsigned int offset; + + if (!bip) + return 0; + + bio_buf = page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; + + bio_for_each_segment(bvec, bio, iter) { + src = kmap_atomic(bvec.bv_page) + bvec.bv_offset; + for (offset = 0; offset < bvec.bv_len; offset += (1 << SECTOR_SHIFT)) { + calculate_integrity_sector(cc, src + offset, tmp_buf); + if (memcmp(bio_buf, tmp_buf, cc->integrity_tag_size)) { + DMERR("INTEGRITY ERROR, sector %llu", (unsigned long long)(bip->bip_iter.bi_sector + sector_offset)); + ret = -EIO; + } + bio_buf += cc->integrity_tag_size; + sector_offset++; + } + kunmap_atomic(src); + } + + BUG_ON(sector_offset != bio_sectors(bio)); + + return ret; +} + +static void crypt_integrity_dtr(struct crypt_config *cc) +{ + if (cc->integrity_hash_tfm && !IS_ERR(cc->integrity_hash_tfm)) + crypto_free_shash(cc->integrity_hash_tfm); +} + static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -1034,6 +1217,13 @@ return_clone: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_unlock(&cc->bio_alloc_lock); + /* + * Allocate space for integrity tags + * FIXME: handle alloc failure + */ + if (io->cc->integrity_tag_size) + dm_crypt_integrity_bio_set(io, clone); + return clone; } @@ -1116,6 +1306,11 @@ static void crypt_endio(struct bio *clon crypt_free_buffer_pages(cc, clone); error = clone->bi_error; + + /* Integrity, Encrypt-then-MAC */ + if (rw == READ && !error && io->cc->integrity_tag_size) + error = crypt_integrity_read(cc, clone); + bio_put(clone); if (rw == READ && !error) { @@ -1159,6 +1354,9 @@ static int kcryptd_io_read(struct dm_cry clone_init(io, clone); clone->bi_iter.bi_sector = cc->start + io->sector; + if (io->cc->integrity_tag_size) + dm_crypt_integrity_bio_set(io, clone); + generic_make_request(clone); return 0; } @@ -1265,6 +1463,10 @@ static void kcryptd_crypt_write_io_submi clone->bi_iter.bi_sector = cc->start + io->sector; + /* Encrypt-then-MAC - add integrity data */ + if (io->cc->integrity_tag_size) + crypt_integrity_write(cc, clone); + if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { generic_make_request(clone); return; @@ -1563,6 +1765,8 @@ static void crypt_dtr(struct dm_target * kzfree(cc->cipher); kzfree(cc->cipher_string); + crypt_integrity_dtr(cc); + /* Must zero key material before freeing */ kzfree(cc); } @@ -1859,7 +2063,13 @@ static int crypt_ctr(struct dm_target *t else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); - else { + else if (!strncasecmp(opt_string, "integrity:", 10)) { + dm_crypt_integrity_init(cc); + ret = crypt_integrity_ctr(cc, ti, &opt_string[10]); + if (ret) + goto bad; + + } else { ti->error = "Invalid feature arguments"; goto bad; } @@ -1963,6 +2173,7 @@ static void crypt_status(struct dm_targe num_feature_args += !!ti->num_discard_bios; num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); + num_feature_args += (cc->integrity_tag_size ? 1 : 0); if (num_feature_args) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -1971,6 +2182,8 @@ static void crypt_status(struct dm_targe DMEMIT(" same_cpu_crypt"); if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) DMEMIT(" submit_from_crypt_cpus"); + if (cc->integrity_tag_size) + DMEMIT(" integrity:%s", crypto_shash_alg_name(cc->integrity_hash_tfm)); } break;