Add testing dm-integrity target. This target registers special "DM-DIF-EXT-TAG" DIF integrity profile. For now, it allocates space for integrity tag in memory. The whole target behaves like a linear target, except it adds additional metadata/integrity tags to any bio it receives. (Bio integrity pages are owned by layer above.) Underlying device does not see these integrity metadata, bio clones are created and integrity data are temporarily striped from bio for underlying device. This tatget is intedned to be used stacked with modified dm-crypt that (according to mapping table) adds and processes integrity metadata in bio operation. IOW stack looks like this: sdb physical disk (no DIF extensions) `-x dm-integrity device (stores DIF/metadata in memory) `-y dm-crypt device (allocates bio integrity data, verifies it) Signed-off-by: Milan Broz --- drivers/md/Kconfig | 7 drivers/md/Makefile | 1 drivers/md/dm-integrity.c | 738 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 746 insertions(+) Index: linux-4.7-rc3-devel/drivers/md/dm-integrity.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-4.7-rc3-devel/drivers/md/dm-integrity.c 2016-06-14 22:32:03.000000000 +0200 @@ -0,0 +1,738 @@ +/* + * Copyright (C) 2016 Milan Broz + * + * This file is released under the GPL. + */ + +#include +//#include +#include +#include +#include "dm-bufio.h" + +#define DM_MSG_PREFIX "integrity" + +#define DEFAULT_INTERLEAVE_SECTORS 32768 +#define DEFAULT_BUFFER_SECTORS 128 +#define DEFAULT_JOURNAL_SIZE_FACTOR 7 +#define DEFAULT_MAX_JOURNAL_SECTORS 32768 +#define MIN_INTERLEAVE_SECTORS 3 +#define MAX_INTERLEAVE_SECTORS 31 + +#define SB_MAGIC "integrt" +#define SB_VERSION 1 + +struct superblock { + __u8 magic[8]; + __u8 version; + __s8 log2_interleave_sectors; + __u16 integrity_tag_size; + __u32 journal_sections; + __u64 provided_data_sectors; +}; + +#define INTEGRITY_TAG_SIZE_ROUNDUP 8 + +struct journal_entry { + __u64 sector; + __u64 last_8_bytes; + __u8 tag[0]; +}; + +#define JOURNAL_SECTOR_SPACE 504 + +struct journal_sector { + __u8 entries[JOURNAL_SECTOR_SPACE]; + __u64 commit_id; +}; + +#define MAX_TAG_SIZE (JOURNAL_SECTOR_SPACE - sizeof(struct journal_entry)) + + +/* + * Integrity: maps a integrity tags in memory of a block device. + */ +struct dm_integrity_c { + struct dm_dev *dev; + unsigned tag_size; + __s8 log2_tag_size; + struct dm_io_client *io; + struct dm_bufio_client *bufio; + struct workqueue_struct *wq; + struct superblock *sb; + + unsigned short journal_entry_size; + unsigned char journal_entries_per_sector; + unsigned char journal_section_sectors; + sector_t device_sectors; + sector_t initial_sectors; + unsigned metadata_run; + __s8 log2_metadata_run; + sector_t last_area; + sector_t metadata_sector_limit; + sector_t last_sector; + __u8 log2_buffer_sectors; +}; + +struct dm_integrity_io { + struct work_struct work; + + struct dm_integrity_c *ic; + sector_t logical_sector; + unsigned n_sectors; + bool write; + + sector_t area; + sector_t offset; + + sector_t metadata_block; + unsigned metadata_offset; + + sector_t data_sector; + + atomic_t in_flight; + + bio_end_io_t *orig_bi_end_io; + struct bio_integrity_payload *orig_bi_integrity; + + int bi_error; +}; + +/* + * DM Integrity profile, protection is performed layer above (dm-crypt) + */ +static struct blk_integrity_profile dm_integrity_profile = { + .name = "DM-DIF-EXT-TAG", + .generate_fn = NULL, + .verify_fn = NULL, +}; + +static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, sector_t *area, sector_t *offset) +{ + __s8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; + if (log2_interleave_sectors >= 0) { + *area = data_sector >> log2_interleave_sectors; + *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1); + } else { + *area = 0; + *offset = data_sector; + } +} + +static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, sector_t offset, unsigned log2_block_sectors, unsigned *metadata_offset) +{ + __u64 ms; + unsigned mo; + if (ic->sb->log2_interleave_sectors >= 0) { + ms = area << ic->sb->log2_interleave_sectors; + if (likely(ic->log2_metadata_run >= 0)) + ms += area << ic->log2_metadata_run; + else + ms += area * ic->metadata_run; + ms >>= log2_block_sectors; + } else { + ms = 0; + } + if (likely(ic->log2_tag_size >= 0)) { + ms += offset >> (SECTOR_SHIFT + log2_block_sectors - ic->log2_tag_size); + mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << log2_block_sectors) - 1); + } else { + ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + log2_block_sectors); + mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << log2_block_sectors) - 1); + } + *metadata_offset = mo; + return ms; +} + +static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) +{ + sector_t result; + if (area == ic->last_area) { + result = ic->metadata_sector_limit; + } else { + result = area << ic->sb->log2_interleave_sectors; + if (likely(ic->log2_metadata_run >= 0)) + result += (area + 1) << ic->log2_metadata_run; + else + result += (area + 1) * ic->metadata_run; + } + result += ic->initial_sectors + offset; + return result; +} + +static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, unsigned *metadata_offset, unsigned total_size, bool write) +{ + do { + unsigned char *data; + struct dm_buffer *b; + unsigned to_copy; + + data = dm_bufio_read(ic->bufio, *metadata_block, &b); + if (unlikely(IS_ERR(data))) + return PTR_ERR(data); + + to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); + if (likely(!write)) { + memcpy(tag, data + *metadata_offset, to_copy); + } else { + memcpy(data + *metadata_offset, tag, to_copy); + dm_bufio_mark_buffer_dirty(b); + } + dm_bufio_release(b); + + tag += to_copy; + *metadata_offset += to_copy; + if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { + (*metadata_block)++; + *metadata_offset = 0; + } + total_size -= to_copy; + } while (unlikely(total_size)); + + return 0; +} + +static void dec_in_flight(struct dm_integrity_io *dio) +{ + if (atomic_dec_and_test(&dio->in_flight)) { + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); + if (dio->orig_bi_integrity) { + bio->bi_integrity = dio->orig_bi_integrity; + bio->bi_rw |= REQ_INTEGRITY; + } + bio->bi_end_io = dio->orig_bi_end_io; + if (unlikely(dio->bi_error) && !bio->bi_error) + bio->bi_error = dio->bi_error; + bio_endio(bio); + } +} + +static void integrity_end_io(struct bio *bio) +{ + struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); + dec_in_flight(dio); +} + +static void integrity_work(struct work_struct *w) +{ + struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); + struct dm_integrity_c *ic = dio->ic; + + int r; + struct bio_integrity_payload *bip; + + bip = dio->orig_bi_integrity; + if (bip) { + struct bio_vec iv; + struct bvec_iter iter; + unsigned data_processed = 0; + + bip_for_each_vec(iv, bip, iter) { + unsigned char *tag; + int r; + + BUG_ON(PageHighMem(iv.bv_page)); + tag = page_address(iv.bv_page) + iv.bv_offset; + r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, iv.bv_len, dio->write); + if (unlikely(r)) + goto error; + data_processed += iv.bv_len; + } + + if (unlikely(data_processed != dio->n_sectors * ic->tag_size)) { + DMERR("Bad size of integrity data: %u != %u * %u", data_processed, dio->n_sectors, ic->tag_size); + r = -EINVAL; + goto error; + } + } + dec_in_flight(dio); + return; + +error: + dio->bi_error = r; + dec_in_flight(dio); +} + +static int dm_integrity_map(struct dm_target *ti, struct bio *bio) +{ + struct dm_integrity_c *ic = ti->private; + struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); + + dio->ic = ic; + dio->bi_error = 0; + bio->bi_bdev = ic->dev->bdev; + + if (unlikely(bio->bi_rw & REQ_FLUSH)) + return DM_MAPIO_REMAPPED; + + dio->logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); + dio->n_sectors = bio_sectors(bio); + dio->write = bio_rw(bio) == WRITE; + if (unlikely(dio->logical_sector + dio->n_sectors > le64_to_cpu(ic->sb->provided_data_sectors))) { + DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx", (unsigned long long)dio->logical_sector, dio->n_sectors, (unsigned long long)le64_to_cpu(ic->sb->provided_data_sectors)); + return -EIO; + } + + get_area_and_offset(ic, dio->logical_sector, &dio->area, &dio->offset); + dio->metadata_block = get_metadata_sector_and_offset(ic, dio->area, dio->offset, ic->log2_buffer_sectors, &dio->metadata_offset); + dio->data_sector = get_data_sector(ic, dio->area, dio->offset); + + bio->bi_iter.bi_sector = dio->data_sector; + + if (unlikely(bio->bi_rw & REQ_DISCARD)) + return DM_MAPIO_REMAPPED; + + atomic_set(&dio->in_flight, 2); + + dio->orig_bi_integrity = bio_integrity(bio); + bio->bi_integrity = NULL; + bio->bi_rw &= ~REQ_INTEGRITY; + dio->orig_bi_end_io = bio->bi_end_io; + bio->bi_end_io = integrity_end_io; + generic_make_request(bio); + + INIT_WORK(&dio->work, integrity_work); + queue_work(ic->wq, &dio->work); + + return DM_MAPIO_SUBMITTED; +} + +static void dm_integrity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) +{ + struct dm_integrity_c *ic = (struct dm_integrity_c *) ti->private; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s %u", ic->dev->name, ic->tag_size); + break; + } +} + +static int dm_integrity_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode) +{ + struct dm_integrity_c *ic = (struct dm_integrity_c *) ti->private; + struct dm_dev *dev = ic->dev; + + *bdev = dev->bdev; + + return 0; +} + +static int dm_integrity_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct dm_integrity_c *ic = ti->private; + + return fn(ti, ic->dev, 0, ti->len, data); +} + +static void calculate_journal_section_size(struct dm_integrity_c *ic) +{ + unsigned tag_size_padded; + tag_size_padded = roundup(ic->tag_size, INTEGRITY_TAG_SIZE_ROUNDUP); + ic->journal_entry_size = sizeof(struct journal_entry) + tag_size_padded; + ic->journal_entries_per_sector = JOURNAL_SECTOR_SPACE / ic->journal_entry_size; + ic->journal_section_sectors = (1 + ic->journal_entries_per_sector) * 8; +} + +static int calculate_device_limits(struct dm_integrity_c *ic) +{ + __u64 initial_sectors; + sector_t last_offset; + __u64 metadata_sector; + unsigned metadata_offset; + + calculate_journal_section_size(ic); + initial_sectors = 8 + (__u64)ic->journal_section_sectors * le32_to_cpu(ic->sb->journal_sections); + if (initial_sectors + 16 > ic->device_sectors) { + return -EINVAL; + } + ic->initial_sectors = initial_sectors; + + if (ic->sb->log2_interleave_sectors >= 0) { + ic->metadata_run = roundup((__u64)ic->tag_size << ic->sb->log2_interleave_sectors, (__u64)4096) >> SECTOR_SHIFT; + if (!(ic->metadata_run & (ic->metadata_run - 1))) + ic->log2_metadata_run = __ffs(ic->metadata_run); + else + ic->log2_metadata_run = -1; + } else { + ic->metadata_run = 0; + ic->log2_metadata_run = -1; + } + get_area_and_offset(ic, le64_to_cpu(ic->sb->provided_data_sectors), &ic->last_area, &last_offset); + metadata_sector = get_metadata_sector_and_offset(ic, ic->last_area, last_offset, 0, &metadata_offset); + if (metadata_offset) + metadata_sector++; + metadata_sector = roundup(metadata_sector, 8); + ic->metadata_sector_limit = metadata_sector; + ic->last_sector = initial_sectors + metadata_sector + last_offset; + /* check for integer overflow */ + if (ic->last_sector != initial_sectors + metadata_sector + last_offset) { + return -EINVAL; + } + if (ic->last_sector > ic->device_sectors) { + return -EINVAL; + } + return 0; +} + +static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors) +{ + unsigned journal_sections; + int test_bit; + + memcpy(ic->sb->magic, SB_MAGIC, 8); + ic->sb->version = SB_VERSION; + ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); + + calculate_journal_section_size(ic); + journal_sections = journal_sectors / ic->journal_section_sectors; + if (!journal_sections) + journal_sections = 1; + ic->sb->journal_sections = cpu_to_le32(journal_sections); + + if (!interleave_sectors) + ic->sb->log2_interleave_sectors = -1; + else { + ic->sb->log2_interleave_sectors = __fls(interleave_sectors); + ic->sb->log2_interleave_sectors = max((s8)MIN_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); + ic->sb->log2_interleave_sectors = min((s8)MAX_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); + } + + ic->sb->provided_data_sectors = cpu_to_le64(0); + for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) { + __u64 prev_data_sectors = ic->sb->provided_data_sectors; + ic->sb->provided_data_sectors |= cpu_to_le64((sector_t)1 << test_bit); + if (calculate_device_limits(ic)) + ic->sb->provided_data_sectors = prev_data_sectors; + } + + if (!le64_to_cpu(ic->sb->provided_data_sectors)) + return -EINVAL; + + return 0; +} + +static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) +{ + struct gendisk *disk = dm_disk(dm_table_get_md(ti->table)); + struct blk_integrity bi; + + bi.profile = &dm_integrity_profile; + bi.tuple_size = ic->tag_size * (queue_logical_block_size(disk->queue) >> SECTOR_SHIFT); + bi.tag_size = ic->tag_size; + + blk_integrity_register(disk, &bi); + blk_queue_max_integrity_segments(disk->queue, UINT_MAX); +} + +static void dm_integrity_dtr(struct dm_target *ti); + +/* + * Construct a integrity mapping: + * + * Arguments: + * device + * tag size + * optional arguments + * journal-sectors + * interleave-sectors + * buffer-sectors + */ +static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + struct dm_integrity_c *ic; + char dummy; + int r, i; + unsigned extra_args; + struct dm_arg_set as; + static struct dm_arg _args[] = { + {0, 3, "Invalid number of feature args"}, + }; + unsigned journal_sectors, interleave_sectors, buffer_sectors; + struct dm_io_request io_req; + struct dm_io_region io_loc; + bool should_write_sb; + + if (argc < 3) { + ti->error = "Invalid argument count"; + return -EINVAL; + } + + ic = kzalloc(sizeof(*ic), GFP_KERNEL); + if (!ic) { + ti->error = "Cannot allocate integrity context"; + return -ENOMEM; + } + ti->private = ic; + ti->per_io_data_size = sizeof(struct dm_integrity_io); + + ic->io = dm_io_client_create(); + if (IS_ERR(ic->io)) { + r = PTR_ERR(ic->io); + ic->io = NULL; + ti->error = "Cannot allocate dm io"; + goto bad; + } + + ic->wq = alloc_workqueue("dm-integrity", WQ_MEM_RECLAIM, 0); + if (!ic->wq) { + ti->error = "Cannot allocate workqueue"; + r = -ENOMEM; + goto bad; + } + + r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); + if (r) { + ti->error = "Device lookup failed"; + goto bad; + } + + if (sscanf(argv[1], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size || ic->tag_size > MAX_TAG_SIZE) { + ti->error = "Invalid tag size"; + r = -EINVAL; + goto bad; + } + if (!(ic->tag_size & (ic->tag_size - 1))) + ic->log2_tag_size = __ffs(ic->tag_size); + else + ic->log2_tag_size = -1; + + ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; + journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, + ic->device_sectors >> + DEFAULT_JOURNAL_SIZE_FACTOR); + interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; + buffer_sectors = DEFAULT_BUFFER_SECTORS; + + as.argc = argc - 2; + as.argv = argv + 2; + r = dm_read_arg_group(_args, &as, &extra_args, &ti->error); + if (r) + goto bad; + + while (extra_args--) { + const char *opt_string; + unsigned val; + opt_string = dm_shift_arg(&as); + if (!opt_string) { + r = -EINVAL; + ti->error = "Not enough feature arguments"; + goto bad; + } + if (sscanf(opt_string, "journal-sectors:%u%c", &val, &dummy) == 1) + journal_sectors = val; + else if (sscanf(opt_string, "interleave-sectors:%u%c", &val, &dummy) == 1) + interleave_sectors = val; + else if (sscanf(opt_string, "buffer-sectors:%u%c", &val, &dummy) == 1) + buffer_sectors = val; + else { + r = -EINVAL; + ti->error = "Invalid argument"; + goto bad; + } + } + + ic->sb = alloc_pages_exact(4096, GFP_KERNEL); + if (!ic->sb) { + r = -ENOMEM; + ti->error = "Cannot allocate superblock area"; + goto bad; + } + + io_req.bi_rw = READ; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = ic->sb; + io_req.notify.fn = NULL; + io_req.client = ic->io; + io_loc.bdev = ic->dev->bdev; + io_loc.sector = 0; + io_loc.count = 8; + r = dm_io(&io_req, 1, &io_loc, NULL); + if (r) { + ti->error = "Error reading superblock"; + goto bad; + } + if (!memcmp(ic->sb->magic, SB_MAGIC, 8)) { + should_write_sb = false; + goto have_sb; + } + for (i = 0; i < 512; i += 8) { + if (*(__u64 *)((__u8 *)ic->sb + i)) { + r = -EINVAL; + ti->error = "The device is not initialized"; + goto bad; + } + } + + r = initialize_superblock(ic, journal_sectors, interleave_sectors); + if (r) { + ti->error = "Could not initialize superblock"; + goto bad; + } + should_write_sb = true; + +have_sb: + if (ic->sb->version != SB_VERSION) { + r = -EINVAL; + ti->error = "Unknown version"; + goto bad; + } + if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { + r = -EINVAL; + ti->error = "Invalid tag size"; + goto bad; + } + /* make sure that ti->max_io_len doesn't overflow */ + if (ic->sb->log2_interleave_sectors != -1 && (ic->sb->log2_interleave_sectors < MIN_INTERLEAVE_SECTORS || ic->sb->log2_interleave_sectors > MAX_INTERLEAVE_SECTORS)) { + r = -EINVAL; + ti->error = "Invalid interleave_sectors in the superblock"; + goto bad; + } + r = calculate_device_limits(ic); + if (r) { + ti->error = "The device is too small"; + goto bad; + } + + if (!buffer_sectors) + buffer_sectors = 1; + ic->log2_buffer_sectors = min3(__fls(buffer_sectors), __ffs(ic->metadata_run), 31UL - SECTOR_SHIFT); + + printk("initialized:\n"); + printk(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); + printk(" journal_entry_size %u\n", ic->journal_entry_size); + printk(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); + printk(" journal_section_sectors %u\n", ic->journal_section_sectors); + printk(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); + printk(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); + printk(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors); + printk(" initial_sectors 0x%llx\n", (unsigned long long)ic->initial_sectors); + printk(" metadata_run 0x%x\n", ic->metadata_run); + printk(" log2_metadata_run %d\n", ic->log2_metadata_run); + printk(" last_area 0x%llx\n", (unsigned long long)ic->last_area); + printk(" metadata_sector_limit 0x%llx\n", (unsigned long long)ic->metadata_sector_limit); + printk(" last_sector 0x%llx\n", (unsigned long long)ic->last_sector); + printk(" provided_data_sectors 0x%llx\n", (unsigned long long)le64_to_cpu(ic->sb->provided_data_sectors)); + printk(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); + +#if 0 +#define PRINT_SECTOR(s) \ + { \ + sector_t area, offset, data; \ + __u64 ms; \ + unsigned mo; \ + get_area_and_offset(ic, s, &area, &offset); \ + ms = get_metadata_sector_and_offset(ic, area, offset, ic->log2_buffer_sectors, &mo);\ + ms += ic->initial_sectors; \ + data = get_data_sector(ic, area, offset); \ + printk("logical sector 0x%llx, metadata sector 0x%llx offset 0x%x, data 0x%llx\n", (unsigned long long)(s), (unsigned long long)ms, mo, (unsigned long long)data);\ + } + PRINT_SECTOR(0); + PRINT_SECTOR(0x7fff); + PRINT_SECTOR(0x8000); + if (ic->sb->log2_interleave_sectors >= 0) + PRINT_SECTOR(ic->last_area << ic->sb->log2_interleave_sectors); + PRINT_SECTOR(le64_to_cpu(ic->sb->provided_data_sectors) - 1); +#endif + + if (should_write_sb) { + io_req.bi_rw = WRITE; + io_req.mem.type = DM_IO_KMEM; + io_req.mem.ptr.addr = ic->sb; + io_req.notify.fn = NULL; + io_req.client = ic->io; + io_loc.bdev = ic->dev->bdev; + io_loc.sector = 0; + io_loc.count = 8; + r = dm_io(&io_req, 1, &io_loc, NULL); + if (r) { + ti->error = "Error writing superblock"; + goto bad; + } + } + + if (ic->sb->log2_interleave_sectors >= 0) { + r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); + if (r) + goto bad; + ti->split_discard_bios = true; + } + + ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); + if (IS_ERR(ic->bufio)) { + r = PTR_ERR(ic->bufio); + ti->error = "Cannot initialize dm-bufio"; + ic->bufio = NULL; + goto bad; + } + dm_bufio_set_sector_range(ic->bufio, ic->initial_sectors, ic->metadata_sector_limit); + + dm_integrity_set(ti, ic); + + ti->num_flush_bios = 1; + ti->num_discard_bios = 1; + + return 0; + +bad: + dm_integrity_dtr(ti); + return r; +} + +static void dm_integrity_dtr(struct dm_target *ti) +{ + struct dm_integrity_c *ic = ti->private; + + if (ic->bufio) + dm_bufio_client_destroy(ic->bufio); + if (ic->io) + dm_io_client_destroy(ic->io); + if (ic->wq) + destroy_workqueue(ic->wq); + if (ic->dev) + dm_put_device(ti, ic->dev); + if (ic->sb) + free_pages_exact(ic->sb, 4096); + kfree(ic); +} + +static struct target_type integrity_target = { + .name = "integrity", + .version = {0, 0, 1}, + .module = THIS_MODULE, + .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, + .ctr = dm_integrity_ctr, + .dtr = dm_integrity_dtr, + .map = dm_integrity_map, + .status = dm_integrity_status, + .prepare_ioctl = dm_integrity_prepare_ioctl, + .iterate_devices = dm_integrity_iterate_devices, +}; + +int __init dm_integrity_init(void) +{ + int r = dm_register_target(&integrity_target); + + if (r < 0) + DMERR("register failed %d", r); + + return r; +} + +void dm_integrity_exit(void) +{ + dm_unregister_target(&integrity_target); +} + +module_init(dm_integrity_init); +module_exit(dm_integrity_exit); + +MODULE_AUTHOR("Milan Broz"); +MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension"); +MODULE_LICENSE("GPL"); Index: linux-4.7-rc3-devel/drivers/md/Makefile =================================================================== --- linux-4.7-rc3-devel.orig/drivers/md/Makefile 2016-06-14 22:25:28.000000000 +0200 +++ linux-4.7-rc3-devel/drivers/md/Makefile 2016-06-14 22:27:47.000000000 +0200 @@ -62,6 +62,7 @@ obj-$(CONFIG_DM_ERA) += dm-era.o obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o obj-$(CONFIG_DM_ZEROED) += dm-zeroed.o obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o +obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o ifeq ($(CONFIG_DM_UEVENT),y) dm-mod-objs += dm-uevent.o Index: linux-4.7-rc3-devel/drivers/md/Kconfig =================================================================== --- linux-4.7-rc3-devel.orig/drivers/md/Kconfig 2016-06-14 22:25:27.000000000 +0200 +++ linux-4.7-rc3-devel/drivers/md/Kconfig 2016-06-14 22:27:47.000000000 +0200 @@ -524,4 +524,11 @@ config DM_WRITECACHE ---help--- This is the writecache target. +config DM_INTEGRITY + tristate "Integrity target" + depends on BLK_DEV_DM + select BLK_DEV_INTEGRITY + ---help--- + This is the integrity target. + endif # MD