From 28cc025a7aa18dbbae0fd0db7b7ab7fa96f5257f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 18 Apr 2012 20:51:43 -0400 Subject: [PATCH 1/3] dm thin: support for non power of 2 blocksize --- drivers/md/dm-thin.c | 80 +++++++++++++++++++++++++++++++++++++++++-------- 1 files changed, 67 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 301db0f..fa68cb6 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -673,9 +673,61 @@ static void requeue_io(struct thin_c *tc) * target. */ +/* + * do_div variants for power-of-2 and non-power-of-2 pool block_size + */ +static inline sector_t pool_block_pow2_do_div(sector_t a, struct pool *pool) +{ + return (a >> pool->block_shift); +} + +static inline sector_t pool_block_non_pow2_do_div(sector_t a, struct pool *pool) +{ + sector_t r = a; + + do_div(r, pool->sectors_per_block); + return r; +} + +static inline sector_t pool_block_do_div(sector_t a, struct pool *pool) +{ + if (pool->block_shift) + return pool_block_pow2_do_div(a, pool); + else + return pool_block_non_pow2_do_div(a, pool); +} + +/* + * do_mod variants for power-of-2 and non-power-of-2 pool block_size + */ +static inline sector_t pool_block_pow2_do_mod(sector_t a, struct pool *pool) +{ + return (a & pool->offset_mask); +} + +static inline sector_t non_pow2_do_mod(sector_t a, __u32 b) +{ + sector_t r = a; + + return do_div(r, b); +} + +static inline sector_t pool_block_non_pow2_do_mod(sector_t a, struct pool *pool) +{ + return non_pow2_do_mod(a, pool->sectors_per_block); +} + +static inline sector_t pool_block_do_mod(sector_t a, struct pool *pool) +{ + if (pool->block_shift) + return pool_block_pow2_do_mod(a, pool); + else + return pool_block_non_pow2_do_mod(a, pool); +} + static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) { - return bio->bi_sector >> tc->pool->block_shift; + return pool_block_do_div(bio->bi_sector, tc->pool); } static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) @@ -683,8 +735,8 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) struct pool *pool = tc->pool; bio->bi_bdev = tc->pool_dev->bdev; - bio->bi_sector = (block << pool->block_shift) + - (bio->bi_sector & pool->offset_mask); + bio->bi_sector = (block * pool->sectors_per_block) + + pool_block_do_mod(bio->bi_sector, pool); } static void remap_to_origin(struct thin_c *tc, struct bio *bio) @@ -929,9 +981,8 @@ static void process_prepared(struct pool *pool, struct list_head *head, */ static int io_overlaps_block(struct pool *pool, struct bio *bio) { - return !(bio->bi_sector & pool->offset_mask) && + return !pool_block_do_mod(bio->bi_sector, pool) && (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT)); - } static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -1684,7 +1735,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, return (struct pool *)pmd; } - pool = kmalloc(sizeof(*pool), GFP_KERNEL); + pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { *error = "Error allocating memory for pool"; err_p = ERR_PTR(-ENOMEM); @@ -1693,8 +1744,10 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->pmd = pmd; pool->sectors_per_block = block_size; - pool->block_shift = ffs(block_size) - 1; - pool->offset_mask = block_size - 1; + if (is_power_of_2(block_size)) { + pool->block_shift = ffs(block_size) - 1; + pool->offset_mask = block_size - 1; + } pool->low_water_blocks = 0; pool_features_init(&pool->pf); pool->prison = prison_create(PRISON_CELLS); @@ -1938,7 +1991,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) if (kstrtoul(argv[2], 10, &block_size) || !block_size || block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || - !is_power_of_2(block_size)) { + non_pow2_do_mod(block_size, 128)) { ti->error = "Invalid block size"; r = -EINVAL; goto out; @@ -2086,7 +2139,7 @@ static int pool_preresume(struct dm_target *ti) if (r) return r; - data_size = ti->len >> pool->block_shift; + data_size = pool_block_do_div(ti->len, pool); r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); if (r) { DMERR("failed to retrieve data device size"); @@ -2695,17 +2748,18 @@ static int thin_iterate_devices(struct dm_target *ti, { dm_block_t blocks; struct thin_c *tc = ti->private; + struct pool *pool = tc->pool; /* * We can't call dm_pool_get_data_dev_size() since that blocks. So * we follow a more convoluted path through to the pool's target. */ - if (!tc->pool->ti) + if (!pool->ti) return 0; /* nothing is bound */ - blocks = tc->pool->ti->len >> tc->pool->block_shift; + blocks = pool_block_do_div(pool->ti->len, pool); if (blocks) - return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data); + return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); return 0; } -- 1.7.1