Index: linux-2.6/drivers/md/Kconfig =================================================================== --- linux-2.6.orig/drivers/md/Kconfig +++ linux-2.6/drivers/md/Kconfig @@ -642,4 +642,14 @@ config DM_ZONED If unsure, say N. +config DM_UPDATE + tristate "Update target support" + depends on BLK_DEV_DM + select DM_BUFIO + select CRYPTO + help + The dm-update target allows transparent updates for embedded devices + + If unsure, say N. + endif # MD Index: linux-2.6/drivers/md/Makefile =================================================================== --- linux-2.6.orig/drivers/md/Makefile +++ linux-2.6/drivers/md/Makefile @@ -83,6 +83,7 @@ obj-$(CONFIG_DM_LOG_WRITES) += dm-log-wr obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o obj-$(CONFIG_DM_ZONED) += dm-zoned.o obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o +obj-$(CONFIG_DM_UPDATE) += dm-update.o ifeq ($(CONFIG_DM_INIT),y) dm-mod-objs += dm-init.o Index: linux-2.6/drivers/md/dm-update.c =================================================================== --- /dev/null +++ linux-2.6/drivers/md/dm-update.c @@ -0,0 +1,659 @@ +#include +#include +#include +#include + +#include "dm-update.h" + +#define DM_MSG_PREFIX "update" + +#define N_BUFFERS 16 + +#define B_EMPTY 0 +#define B_LOADING 1 +#define B_VALID 2 + +struct dm_update_buffer { + int status; + char *compressed_chunk; + char *decompressed_chunk; + uint64_t src; + struct update_entry *e; + struct bio_list waiting_bios; + struct work_struct work; + struct dm_update *u; +}; + +struct dm_update { + struct dm_dev *system_dev; + struct dm_dev *update_dev; + struct dm_target *ti; + struct dm_io_client *dm_io; + unsigned update_lbs; + unsigned char update_lbs_bits; + struct update_superblock *sb; + struct crypto_comp *cc; + struct update_entry *entries; + + struct mutex mutex; + struct workqueue_struct *decompress_wq; + struct bio_list waiting_bios; + + struct workqueue_struct *bg_wq; + struct work_struct bg_work; + char *bg_compressed_chunk; + char *bg_decompressed_chunk; + size_t bg_index; + + unsigned buffer_replacement; + struct dm_update_buffer buffer[N_BUFFERS]; +}; + +static int update_rw(struct dm_update *u, bool system_dev, int req_op, sector_t sector, sector_t n_sectors, void *ptr) +{ + struct dm_io_region region; + struct dm_io_request req; + + region.bdev = system_dev ? u->system_dev->bdev : u->update_dev->bdev; + region.sector = sector; + region.count = n_sectors; + + req.bi_op = req_op; + req.bi_op_flags = REQ_SYNC; + req.mem.type = DM_IO_VMA; + req.mem.ptr.vma = ptr; + req.client = u->dm_io; + req.notify.fn = NULL; + req.notify.context = NULL; + + return dm_io(&req, 1, ®ion, NULL); +} + +static int update_decompress(struct dm_update *u, void *src, size_t src_size, void *dst, size_t dst_size) +{ + int r; + if (!u->cc) { + if (unlikely(src_size > dst_size)) + return -EOVERFLOW; + memcpy(dst, src, src_size); + } else { + unsigned dst_int; + if (unlikely(src_size != (unsigned)src_size) || + unlikely(dst_size != (unsigned)dst_size)) + return -EOVERFLOW; + dst_int = dst_size; + r = crypto_comp_decompress(u->cc, src, src_size, dst, &dst_int); + if (unlikely(r)) + return r; + } + return 0; +} + +static void update_fill_from_buffer(struct dm_update *u, struct dm_update_buffer *b, struct bio *bio) +{ + struct bio_vec bv; + struct bvec_iter iter; + + struct update_entry *e = bio->bi_private; + size_t data_offset = (size_t)le32_to_cpu(e->offset) << u->sb->block_bits; + size_t bio_offset = (bio->bi_iter.bi_sector & ((1 << (u->sb->block_bits - SECTOR_SHIFT)) - 1)) << SECTOR_SHIFT; + const char *data = b->decompressed_chunk + data_offset + bio_offset; + + bio_for_each_segment(bv, bio, iter) { + char *addr = kmap_local_page(bv.bv_page); + memcpy(addr + bv.bv_offset, data, bv.bv_len); + flush_dcache_page(bv.bv_page); + kunmap_local(addr); + data += bv.bv_len; + } + + bio_endio(bio); +} + +static void update_process_bio(struct dm_update *u, struct bio *bio) +{ + struct update_entry *e; + struct dm_update_buffer *b; + uint64_t src; + int i; + + e = bio->bi_private; + + src = le32_to_cpu(e->src_lo) + ((uint64_t)le16_to_cpu(e->src_hi) << 32); + for (i = 0; i < N_BUFFERS; i++) { + b = &u->buffer[i]; + if (b->status == B_EMPTY) + continue; + if (b->src == src) { + if (b->status == B_LOADING) { + bio_list_add(&b->waiting_bios, bio); + } else { + update_fill_from_buffer(u, b, bio); + } + return; + } + } + for (i = 0; i < N_BUFFERS; i++) { + b = &u->buffer[i]; + if (b->status == B_EMPTY) { +replace_buffer: + bio_list_add(&b->waiting_bios, bio); + b->status = B_LOADING; + b->src = src; + b->e = e; + queue_work(u->decompress_wq, &b->work); + return; + } + } + for (i = 0; i < N_BUFFERS; i++) { + b = &u->buffer[u->buffer_replacement]; + u->buffer_replacement = (u->buffer_replacement + 1) % N_BUFFERS; + if (b->status == B_VALID) + goto replace_buffer; + } + bio_list_add(&u->waiting_bios, bio); +} + +static void dm_update_get_locations(struct dm_update *u, struct update_entry *e, uint64_t *src, sector_t *sector, sector_t *n_sectors, size_t *front_pad, size_t *compressed_length) +{ + uint64_t next_src; + *src = le32_to_cpu(e->src_lo) + ((uint64_t)le16_to_cpu(e->src_hi) << 32); + do { + e++; + next_src = le32_to_cpu(e->src_lo) + ((uint64_t)le16_to_cpu(e->src_hi) << 32); + } while (next_src == *src); + + *compressed_length = next_src - *src; + *front_pad = *src & (u->update_lbs - 1); + *sector = *src >> u->update_lbs_bits << (u->update_lbs_bits - SECTOR_SHIFT); + *n_sectors = round_up(*front_pad + *compressed_length, u->update_lbs) >> SECTOR_SHIFT; +} + +static void dm_update_buffer_work(struct work_struct *w) +{ + struct dm_update_buffer *b = container_of(w, struct dm_update_buffer, work); + struct dm_update *u = b->u; + uint64_t src; + size_t front_pad, compressed_length; + sector_t sector, n_sectors; + struct bio *bio, *waiting_bios; + int r; + + dm_update_get_locations(u, b->e, &src, §or, &n_sectors, &front_pad, &compressed_length); + + r = update_rw(u, false, REQ_OP_READ, sector, n_sectors, b->compressed_chunk); + if (unlikely(r)) + goto io_error; + + r = update_decompress(u, b->compressed_chunk + front_pad, compressed_length, b->decompressed_chunk, 1UL << u->sb->chunk_bits); + if (unlikely(r)) + goto io_error; + +io_error: + mutex_lock(&u->mutex); + b->status = likely(!r) ? B_VALID : B_EMPTY; + while ((bio = bio_list_pop(&b->waiting_bios))) { + if (unlikely(r)) { + bio->bi_status = errno_to_blk_status(r); + bio_endio(bio); + } else { + update_fill_from_buffer(u, b, bio); + } + } + + waiting_bios = bio_list_get(&u->waiting_bios); + while (waiting_bios != NULL) { + bio = waiting_bios; + waiting_bios = bio->bi_next; + bio->bi_next = NULL; + update_process_bio(u, bio); + } + + mutex_unlock(&u->mutex); +} + +static int update_map(struct dm_target *ti, struct bio *bio) +{ + struct dm_update *u = ti->private; + sector_t block; + size_t first, last, half; + struct update_entry *e; + + if (bio_data_dir(bio) == WRITE) + return DM_MAPIO_KILL; + + block = bio->bi_iter.bi_sector >> (u->sb->block_bits - SECTOR_SHIFT); + + first = 0; + last = le64_to_cpu(u->sb->dir_n) - 1; + while (first < last) { + sector_t test_block; + half = first / 2 + last / 2 + (first & last & 1); + e = &u->entries[half]; + test_block = le32_to_cpu(e->dest_lo) + ((uint64_t)le16_to_cpu(e->dest_hi) << 32); + if (test_block == block) + goto found; + if (test_block < block) { + first = half + 1; + } else { + last = half; + } + } + + bio_set_dev(bio, u->system_dev->bdev); + return DM_MAPIO_REMAPPED; + +found: + bio->bi_private = e; + + mutex_lock(&u->mutex); + update_process_bio(u, bio); + mutex_unlock(&u->mutex); + + return DM_MAPIO_SUBMITTED; +} + +static void update_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) +{ + struct dm_update *u = ti->private; + unsigned sz = 0; + + switch (type) { + case STATUSTYPE_INFO: + break; + case STATUSTYPE_TABLE: + DMEMIT("%s %s", u->system_dev->name, u->update_dev->name); + break; + case STATUSTYPE_IMA: + DMEMIT_TARGET_NAME_VERSION(ti->type); + DMEMIT(",update_system_device=%s", u->system_dev->name); + DMEMIT(",update_update_device=%s", u->update_dev->name); + DMEMIT(";"); + break; + } +} + +static int update_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) +{ + struct dm_update *u = ti->private; + + return fn(ti, u->system_dev, 0, ti->len, data); +} + +static void update_background_work(struct work_struct *w) +{ + struct dm_update *u = container_of(w, struct dm_update, bg_work); + uint64_t src; + size_t front_pad, compressed_length; + sector_t sector, n_sectors; + int r; + + if (u->bg_index >= le64_to_cpu(u->sb->dir_n) - 1) + return; + + dm_update_get_locations(u, &u->entries[u->bg_index], &src, §or, &n_sectors, &front_pad, &compressed_length); + + r = update_rw(u, false, REQ_OP_READ, sector, n_sectors, u->bg_compressed_chunk); + if (unlikely(r)) { + DMERR("error reading update device (%d), aborting backgroup update", r); + return; + } + + r = update_decompress(u, u->bg_compressed_chunk + front_pad, compressed_length, u->bg_decompressed_chunk, 1UL << u->sb->chunk_bits); + if (unlikely(r)) { + DMERR("error decompressing update data (%d), aborting backgroup update", r); + return; + } + + while (u->bg_index < le64_to_cpu(u->sb->dir_n) - 1) { + uint64_t s, dest; + size_t offset; + struct update_entry *e = &u->entries[u->bg_index]; + s = le32_to_cpu(e->src_lo) + ((uint64_t)le16_to_cpu(e->src_hi) << 32); + if (s != src) + break; + + dest = le32_to_cpu(e->dest_lo) + ((uint64_t)le16_to_cpu(e->dest_hi) << 32); + offset = (size_t)le32_to_cpu(e->offset) << u->sb->block_bits; + + r = update_rw(u, true, REQ_OP_WRITE, dest << (u->sb->block_bits - SECTOR_SHIFT), 1UL << (u->sb->block_bits - SECTOR_SHIFT), u->bg_decompressed_chunk + offset); + if (unlikely(r)) { + DMERR("error writing system device (%d), aborting backgroup update", r); + return; + } + + u->bg_index++; + } + + queue_work(u->bg_wq, &u->bg_work); +} + +static void update_presuspend(struct dm_target *ti) +{ + struct dm_update *u = ti->private; + cancel_work_sync(&u->bg_work); +} + +static void update_resume(struct dm_target *ti) +{ + struct dm_update *u = ti->private; + queue_work(u->bg_wq, &u->bg_work); +} + +static void update_dtr(struct dm_target *ti) +{ + struct dm_update *u = ti->private; + int i; + + if (u->bg_wq) + destroy_workqueue(u->bg_wq); + if (u->decompress_wq) + destroy_workqueue(u->decompress_wq); + + vfree(u->bg_compressed_chunk); + vfree(u->bg_decompressed_chunk); + + for (i = 0; i < N_BUFFERS; i++) { + vfree(u->buffer[i].compressed_chunk); + vfree(u->buffer[i].decompressed_chunk); + } + vfree(u->sb); + vfree(u->entries); + if (u->dm_io) + dm_io_client_destroy(u->dm_io); + if (u->system_dev) + dm_put_device(ti, u->system_dev); + if (u->update_dev) + dm_put_device(ti, u->update_dev); + if (u->cc) + crypto_free_comp(u->cc); + + mutex_init(&u->mutex); + + kfree(u); +} + +static bool validate_sb(struct dm_update *u) +{ + struct update_superblock *sb = u->sb; + + if (sb->block_bits < SECTOR_SHIFT || sb->block_bits >= 64 + SECTOR_SHIFT) + return false; + + if (sb->chunk_bits < sb->block_bits || sb->chunk_bits >= 31) + return false; + + if (le64_to_cpu(sb->dir_n) < 1) + return false; + + return true; +} + +static int update_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + struct dm_update *u; + int r; + uint64_t compressed_dir_size; + void *compressed_dir = NULL; + size_t dst_len; + int i; + size_t o; + sector_t max_compressed_sectors; + + u = kzalloc(sizeof(struct dm_update), GFP_KERNEL); + if (!u) { + ti->error = "Cannot allocate dm_update structure"; + return -ENOMEM; + } + + ti->private = u; + u->ti = ti; + + mutex_init(&u->mutex); + bio_list_init(&u->waiting_bios); + INIT_WORK(&u->bg_work, update_background_work); + + u->decompress_wq = alloc_workqueue("dm-update", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0); + if (!u->decompress_wq) { + ti->error = "Cannot allocate workqueue"; + r = -ENOMEM; + goto bad; + } + u->bg_wq = alloc_workqueue("dm-update-background", WQ_CPU_INTENSIVE | WQ_UNBOUND, 1); + if (!u->bg_wq) { + ti->error = "Cannot allocate workqueue"; + r = -ENOMEM; + goto bad; + } + + if (argc < 2) { + ti->error = "Not enough arguments"; + r = -EINVAL; + goto bad; + } + + r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &u->system_dev); + if (r) { + ti->error = "System device lookup failed"; + goto bad; + } + + r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &u->update_dev); + if (r) { + ti->error = "Update device lookup failed"; + goto bad; + } + + u->update_lbs = bdev_logical_block_size(u->update_dev->bdev); + u->update_lbs_bits = __ffs(u->update_lbs); + + u->dm_io = dm_io_client_create(); + if (IS_ERR(u->dm_io)) { + r = PTR_ERR(u->dm_io); + u->dm_io = NULL; + ti->error = "Unable to allocate dm-io client"; + goto bad; + } + + u->sb = vmalloc(u->update_lbs); + if (!u->sb) { + r = -ENOMEM; + ti->error = "Cannot allocate superblock"; + goto bad; + } + + r = update_rw(u, false, REQ_OP_READ, 0, u->update_lbs >> SECTOR_SHIFT, u->sb); + if (r) { + ti->error = "Cannot read superblock"; + goto bad; + } + + if (memcmp(u->sb->magic, UPDATE_MAGIC, 8)) { + r = -EINVAL; + ti->error = "Invalid magic in the superblock"; + //printk("%02x %02x %02x %02x %02x %02x %02x %02x\n", u->sb->magic[0], u->sb->magic[1], u->sb->magic[2], u->sb->magic[3], u->sb->magic[4], u->sb->magic[5], u->sb->magic[6], u->sb->magic[7]); + goto bad; + } + + if (u->sb->version != UPDATE_VERSION) { + r = -EINVAL; + ti->error = "Invalid version in the superblock"; + goto bad; + } + + if (!validate_sb(u)) { + r = -EINVAL; + ti->error = "Invalid values in the superblock"; + goto bad; + } + + r = dm_set_target_max_io_len(ti, (sector_t)1 << (u->sb->block_bits - SECTOR_SHIFT)); + if (r) { + ti->error = "Invalid block size in the superblock"; + goto bad; + } + + if (!memchr(u->sb->compression, 0, sizeof u->sb->compression)) { + r = -EINVAL; + ti->error = "Invalid compression algorithm in the superblock"; + goto bad; + } + if (strcmp(u->sb->compression, "none")) { + u->cc = crypto_alloc_comp(u->sb->compression, 0, 0); + if (!u->cc) + u->cc = ERR_PTR(-ENOMEM); + if (IS_ERR(u->cc)) { + r = PTR_ERR(u->cc); + u->cc = NULL; + ti->error = "Unsupported compression method"; + goto bad; + } + } + + compressed_dir_size = roundup((le64_to_cpu(u->sb->dir_offset) & (u->update_lbs - 1)) + le64_to_cpu(u->sb->dir_compressed_size), u->update_lbs); + if (compressed_dir_size != (size_t)compressed_dir_size) { + r = -EOVERFLOW; + ti->error = "Compressed directory is too large for 32-bit system"; + goto bad; + } + + compressed_dir = vmalloc(compressed_dir_size); + if (!compressed_dir) { + r = -ENOMEM; + ti->error = "Cannot allocate compressed directory"; + goto bad; + } + + r = update_rw(u, false, REQ_OP_READ, round_down(le64_to_cpu(u->sb->dir_offset), u->update_lbs) >> SECTOR_SHIFT, compressed_dir_size >> SECTOR_SHIFT, compressed_dir); + if (r) { + ti->error = "Cannot read compressed directory"; + goto bad; + } + + dst_len = le64_to_cpu(u->sb->dir_n) * sizeof(struct update_entry); + if (dst_len / sizeof(struct update_entry) != le64_to_cpu(u->sb->dir_n)) { + r = -EOVERFLOW; + ti->error = "Decompressed directory is too large for 32-bit system"; + goto bad; + } + u->entries = vmalloc(dst_len); + if (!u->entries) { + r = -ENOMEM; + ti->error = "Cannot allocate decompressed directory"; + goto bad; + } + + r = update_decompress(u, compressed_dir + (le64_to_cpu(u->sb->dir_offset) & (u->update_lbs - 1)), le64_to_cpu(u->sb->dir_compressed_size), (void *)u->entries, dst_len); + if (r) { + ti->error = "Cannot decompress directory"; + goto bad; + } + + if (dst_len != le64_to_cpu(u->sb->dir_n) * sizeof(struct update_entry)) { + r = -EINVAL; + ti->error = "Non-matching length of compressed directory"; + goto bad; + } + + vfree(compressed_dir); + compressed_dir = NULL; + + o = 0; + max_compressed_sectors = 1; + while (o < le64_to_cpu(u->sb->dir_n) - 1) { + struct update_entry *e = &u->entries[o]; + uint64_t src, s; + size_t front_pad, compressed_length; + sector_t sector, n_sectors; + dm_update_get_locations(u, e, &src, §or, &n_sectors, &front_pad, &compressed_length); + if (n_sectors > max_compressed_sectors) + max_compressed_sectors = n_sectors; + do { + o++; + if (o >= le64_to_cpu(u->sb->dir_n) - 1) + break; + e = &u->entries[o]; + s = le32_to_cpu(e->src_lo) + ((uint64_t)le16_to_cpu(e->src_hi) << 32); + } while (s == src); + } + + for (i = 0; i < N_BUFFERS; i++) { + struct dm_update_buffer *b = &u->buffer[i]; + b->decompressed_chunk = vmalloc(1UL << u->sb->chunk_bits); + if (!b->decompressed_chunk) { + r = -ENOMEM; + ti->error = "Cannot allocate buffers"; + goto bad; + } + memset(b->decompressed_chunk, 0xfe, 1UL << u->sb->chunk_bits); /* !!! FIXME: debug */ + b->compressed_chunk = vmalloc(max_compressed_sectors << SECTOR_SHIFT); + if (!b->compressed_chunk) { + r = -ENOMEM; + ti->error = "Cannot allocate buffers"; + goto bad; + } + memset(b->compressed_chunk, 0xfd, max_compressed_sectors << SECTOR_SHIFT); /* !!! FIXME: debug */ + bio_list_init(&b->waiting_bios); + INIT_WORK(&b->work, dm_update_buffer_work); + b->u = u; + } + + u->bg_decompressed_chunk = vmalloc(1UL << u->sb->chunk_bits); + if (!u->bg_decompressed_chunk) { + r = -ENOMEM; + ti->error = "Cannot allocate buffers"; + goto bad; + } + u->bg_compressed_chunk = vmalloc(max_compressed_sectors << SECTOR_SHIFT); + if (!u->bg_compressed_chunk) { + r = -ENOMEM; + ti->error = "Cannot allocate buffers"; + goto bad; + } + + return 0; + +bad: + if (compressed_dir) + vfree(compressed_dir); + update_dtr(ti); + return r; +} + +static struct target_type update_target = { + .name = "update", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = update_ctr, + .dtr = update_dtr, + .map = update_map, + .status = update_status, + .iterate_devices = update_iterate_devices, + .presuspend = update_presuspend, + .resume = update_resume, +}; + +static int __init dm_update_init(void) +{ + int r; + + r = dm_register_target(&update_target); + if (r < 0) { + DMERR("register failed %d", r); + return r; + } + + return 0; +} + +static void __exit dm_update_exit(void) +{ + dm_unregister_target(&update_target); +} + +module_init(dm_update_init); +module_exit(dm_update_exit); + +MODULE_DESCRIPTION(DM_NAME " update target"); +MODULE_AUTHOR("Mikulas Patocka "); +MODULE_LICENSE("GPL"); Index: linux-2.6/drivers/md/dm-update.h =================================================================== --- /dev/null +++ linux-2.6/drivers/md/dm-update.h @@ -0,0 +1,24 @@ +#define UPDATE_MAGIC "update\0" +#define UPDATE_VERSION 0 + +struct update_superblock { + char magic[8]; + uint8_t version; + uint8_t block_bits; + uint8_t chunk_bits; + uint8_t pad1; + __le32 pad2; + char compression[16]; + __le64 dir_offset; + __le64 dir_compressed_size; + __le64 dir_n; + __le64 pad; +}; + +struct update_entry { + __le32 dest_lo; + __le16 dest_hi; + __le16 src_hi; + __le32 src_lo; + __le32 offset; +};