--- drivers/md/dm-bufio.c | 63 ++++++++++++++++++++++++++++++++++---------------- drivers/md/dm-bufio.h | 8 ++++++ 2 files changed, 52 insertions(+), 19 deletions(-) Index: linux-4.6/drivers/md/dm-bufio.c =================================================================== --- linux-4.6.orig/drivers/md/dm-bufio.c 2016-05-20 17:22:34.000000000 +0200 +++ linux-4.6/drivers/md/dm-bufio.c 2016-05-20 19:41:16.000000000 +0200 @@ -109,6 +109,9 @@ struct dm_bufio_client { struct rb_root buffer_tree; wait_queue_head_t free_buffer_wait; + sector_t start; + sector_t limit; + int async_write_error; struct list_head client_list; @@ -569,8 +572,8 @@ static void dmio_complete(unsigned long b->bio.bi_end_io(&b->bio); } -static void use_dmio(struct dm_buffer *b, int rw, sector_t block, - bio_end_io_t *end_io) +static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, + unsigned n_sectors, bio_end_io_t *end_io) { int r; struct dm_io_request io_req = { @@ -581,8 +584,8 @@ static void use_dmio(struct dm_buffer *b }; struct dm_io_region region = { .bdev = b->c->bdev, - .sector = block << b->c->sectors_per_block_bits, - .count = b->c->block_size >> SECTOR_SHIFT, + .sector = sector, + .count = n_sectors, }; if (b->data_mode != DATA_MODE_VMALLOC) { @@ -617,8 +620,8 @@ static void inline_endio(struct bio *bio end_fn(bio); } -static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, - bio_end_io_t *end_io) +static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, + unsigned n_sectors, bio_end_io_t *end_io) { char *ptr; int len; @@ -626,7 +629,7 @@ static void use_inline_bio(struct dm_buf bio_init(&b->bio); b->bio.bi_io_vec = b->bio_vec; b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; - b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; + b->bio.bi_iter.bi_sector = sector; b->bio.bi_bdev = b->c->bdev; b->bio.bi_end_io = inline_endio; /* @@ -640,7 +643,7 @@ static void use_inline_bio(struct dm_buf * If len < PAGE_SIZE the buffer doesn't cross page boundary. */ ptr = b->data; - len = b->c->block_size; + len = n_sectors << SECTOR_SHIFT; if (len >= PAGE_SIZE) BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); @@ -652,7 +655,7 @@ static void use_inline_bio(struct dm_buf len < PAGE_SIZE ? len : PAGE_SIZE, offset_in_page(ptr))) { BUG_ON(b->c->block_size <= PAGE_SIZE); - use_dmio(b, rw, block, end_io); + use_dmio(b, rw, sector, n_sectors, end_io); return; } @@ -663,17 +666,31 @@ static void use_inline_bio(struct dm_buf submit_bio(rw, &b->bio); } -static void submit_io(struct dm_buffer *b, int rw, sector_t block, - bio_end_io_t *end_io) +static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io) { + unsigned n_sectors; + sector_t sector, limit; + if (rw == WRITE && b->c->write_callback) b->c->write_callback(b); - if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE && + sector = b->block << b->c->sectors_per_block_bits; + n_sectors = 1 << b->c->sectors_per_block_bits; + limit = b->c->limit; + if (unlikely(limit != 0)) { + if (unlikely(sector + n_sectors >= limit) || + unlikely(!(sector + n_sectors))) { + BUG_ON(sector >= limit); + n_sectors = limit - sector; + } + } + sector += b->c->start; + + if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && b->data_mode != DATA_MODE_VMALLOC) - use_inline_bio(b, rw, block, end_io); + use_inline_bio(b, rw, sector, n_sectors, end_io); else - use_dmio(b, rw, block, end_io); + use_dmio(b, rw, sector, n_sectors, end_io); } /*---------------------------------------------------------------- @@ -725,7 +742,7 @@ static void __write_dirty_buffer(struct wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); if (!write_list) - submit_io(b, WRITE, b->block, write_endio); + submit_io(b, WRITE, write_endio); else list_add_tail(&b->write_list, write_list); } @@ -738,7 +755,7 @@ static void __flush_write_list(struct li struct dm_buffer *b = list_entry(write_list->next, struct dm_buffer, write_list); list_del(&b->write_list); - submit_io(b, WRITE, b->block, write_endio); + submit_io(b, WRITE, write_endio); dm_bufio_cond_resched(); } blk_finish_plug(&plug); @@ -1095,7 +1112,7 @@ static void *new_read(struct dm_bufio_cl return NULL; if (need_submit) - submit_io(b, READ, b->block, read_endio); + submit_io(b, READ, read_endio); wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); @@ -1165,7 +1182,7 @@ void dm_bufio_prefetch(struct dm_bufio_c dm_bufio_unlock(c); if (need_submit) - submit_io(b, READ, b->block, read_endio); + submit_io(b, READ, read_endio); dm_bufio_release(b); dm_bufio_cond_resched(); @@ -1405,7 +1422,7 @@ retry: old_block = b->block; __unlink_buffer(b); __link_buffer(b, new_block, b->list_mode); - submit_io(b, WRITE, new_block, write_endio); + submit_io(b, WRITE, write_endio); wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); __unlink_buffer(b); @@ -1771,6 +1788,14 @@ void dm_bufio_client_destroy(struct dm_b } EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); +void dm_bufio_set_sector_range(struct dm_bufio_client *c, sector_t start, + sector_t n_sectors) +{ + c->start = start; + c->limit = n_sectors; +} +EXPORT_SYMBOL_GPL(dm_bufio_set_sector_range); + static unsigned get_max_age_hz(void) { unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); Index: linux-4.6/drivers/md/dm-bufio.h =================================================================== --- linux-4.6.orig/drivers/md/dm-bufio.h 2016-05-20 17:22:34.000000000 +0200 +++ linux-4.6/drivers/md/dm-bufio.h 2016-05-20 19:41:08.000000000 +0200 @@ -32,6 +32,14 @@ dm_bufio_client_create(struct block_devi void dm_bufio_client_destroy(struct dm_bufio_client *c); /* + * Set the sector range. + * When this function is called, there must be no I/O in progress on the bufio + * client. + */ +void dm_bufio_set_sector_range(struct dm_bufio_client *c, sector_t start, + sector_t n_sectors); + +/* * WARNING: to avoid deadlocks, these conditions are observed: * * - At most one thread can hold at most "reserved_buffers" simultaneously.