From: Joe Thornber Subject: dm cache: promotion optimisation for writes If a write triggers promotion and covers a whole block we can avoid a copy. FIXME: CC [M] drivers/md/dm-cache-target.o drivers/md/dm-cache-target.c: In function ‘issue_copy’: drivers/md/dm-cache-target.c:974: warning: unused variable ‘bio’ drivers/md/dm-cache-target.c: At top level: drivers/md/dm-cache-target.c:943: warning: ‘issue_overwrite’ defined but not used drivers/md/dm-cache-target.c:953: warning: ‘bio_writes_complete_block’ defined but not used Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-cache-target.c | 58 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) Index: linux/drivers/md/dm-cache-target.c =================================================================== --- linux.orig/drivers/md/dm-cache-target.c +++ linux/drivers/md/dm-cache-target.c @@ -256,6 +256,7 @@ struct dm_cache_migration { bool writeback:1; bool demote:1; bool promote:1; + bool requeue_holder:1; struct dm_bio_prison_cell *old_ocell; struct dm_bio_prison_cell *new_ocell; @@ -865,7 +866,12 @@ static void migration_success_post_commi cleanup_migration(mg); } else { - cell_defer(cache, mg->new_ocell, true); + if (mg->requeue_holder) + cell_defer(cache, mg->new_ocell, true); + else { + bio_endio(mg->new_ocell->holder, 0); + cell_defer(cache, mg->new_ocell, false); + } clear_dirty(cache, mg->new_oblock, mg->cblock); cleanup_migration(mg); } @@ -914,6 +920,42 @@ static void issue_copy_real(struct dm_ca migration_failure(mg); } +static void overwrite_endio(struct bio *bio, int err) +{ + struct dm_cache_migration *mg = bio->bi_private; + struct cache *cache = mg->cache; + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + unsigned long flags; + + if (err) + mg->err = true; + + spin_lock_irqsave(&cache->lock, flags); + list_add_tail(&mg->list, &cache->completed_migrations); + unhook_bio(&pb->hook_info, bio); + mg->requeue_holder = false; + spin_unlock_irqrestore(&cache->lock, flags); + + wake_worker(cache); +} + +static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) +{ + size_t pb_data_size = get_per_bio_data_size(mg->cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + + hook_bio(&pb->hook_info, bio, overwrite_endio, mg); + remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); + generic_make_request(bio); +} + +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ + return (bio_data_dir(bio) == WRITE) && + (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); +} + static void avoid_copy(struct dm_cache_migration *mg) { atomic_inc(&mg->cache->stats.copies_avoided); @@ -928,8 +970,17 @@ static void issue_copy(struct dm_cache_m if (mg->writeback || mg->demote) avoid = !is_dirty(cache, mg->cblock) || is_discarded_oblock(cache, mg->old_oblock); - else + else { + struct bio *bio = mg->new_ocell->holder; + avoid = is_discarded_oblock(cache, mg->new_oblock); +#if 0 + if (!avoid && bio_writes_complete_block(cache, bio)) { + issue_overwrite(mg, bio); + return; + } +#endif + } avoid ? avoid_copy(mg) : issue_copy_real(mg); } @@ -1020,6 +1071,7 @@ static void promote(struct cache *cache, mg->writeback = false; mg->demote = false; mg->promote = true; + mg->requeue_holder = true; mg->cache = cache; mg->new_oblock = oblock; mg->cblock = cblock; @@ -1041,6 +1093,7 @@ static void writeback(struct cache *cach mg->writeback = true; mg->demote = false; mg->promote = false; + mg->requeue_holder = true; mg->cache = cache; mg->old_oblock = oblock; mg->cblock = cblock; @@ -1064,6 +1117,7 @@ static void demote_then_promote(struct c mg->writeback = false; mg->demote = true; mg->promote = true; + mg->requeue_holder = true; mg->cache = cache; mg->old_oblock = old_oblock; mg->new_oblock = new_oblock;