dm-optimize: introduce get/put_live_table_fast Normal RCU is faster than sleepable RCU. This patch introduces two new functions: dm_get_live_table_fast and dm_put_live_table_fast. These functions can be used to get and release table pointer, but the code must not sleep between them. When we change map pointer, we call synchronize_rcu_expedited to synchronize against dm_get/put_live_table_fast. Signed-off-by: Mikulas Patocka --- drivers/md/dm.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) Index: linux-3.3-fast/drivers/md/dm.c =================================================================== --- linux-3.3-fast.orig/drivers/md/dm.c 2012-04-29 04:36:37.000000000 +0200 +++ linux-3.3-fast/drivers/md/dm.c 2012-04-29 05:18:12.000000000 +0200 @@ -563,6 +563,21 @@ void dm_put_live_table(struct mapped_dev } /* + * A fast alternative to dm_get_live_table/dm_put_live_table. + * The caller must not block between these two functions. + */ +static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) +{ + rcu_read_lock(); + return rcu_dereference(md->map); +} + +static void dm_put_live_table_fast(struct mapped_device *md) +{ + rcu_read_unlock(); +} + +/* * Get the geometry associated with a dm device */ int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) @@ -1348,8 +1363,7 @@ static int dm_merge_bvec(struct request_ struct bio_vec *biovec) { struct mapped_device *md = q->queuedata; - int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); + struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; sector_t max_sectors; int max_size = 0; @@ -1389,7 +1403,7 @@ static int dm_merge_bvec(struct request_ max_size = 0; out: - dm_put_live_table(md, srcu_idx); + dm_put_live_table_fast(md); /* * Always allow an entire first page */ @@ -1681,15 +1695,14 @@ static int dm_lld_busy(struct request_qu { int r; struct mapped_device *md = q->queuedata; - int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); + struct dm_table *map = dm_get_live_table_fast(md); if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) r = 1; else r = dm_table_any_busy_target(map); - dm_put_live_table(md, srcu_idx); + dm_put_live_table_fast(md); return r; } @@ -1701,8 +1714,7 @@ static int dm_any_congested(void *conges struct dm_table *map; if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - int srcu_idx; - map = dm_get_live_table(md, &srcu_idx); + map = dm_get_live_table_fast(md); if (map) { /* * Request-based dm cares about only own queue for @@ -1714,7 +1726,7 @@ static int dm_any_congested(void *conges else r = dm_table_any_congested(map, bdi_bits); } - dm_put_live_table(md, srcu_idx); + dm_put_live_table_fast(md); } return r; @@ -2106,6 +2118,7 @@ static struct dm_table *__bind(struct ma else clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); synchronize_srcu(&md->io_barrier); + synchronize_rcu_expedited(); return old_map; } @@ -2123,6 +2136,7 @@ static struct dm_table *__unbind(struct dm_table_event_callback(map, NULL, NULL); rcu_assign_pointer(md->map, NULL); synchronize_srcu(&md->io_barrier); + synchronize_rcu_expedited(); return map; }