dm optimize: make map_lock percpu This patch changes map_lock to make it a percpu lock similar to io_lock. Majority of code for managing io_lock from the previous patch is reused. Note: this patch triggers a false lockdep warning, the reason is that there is no "write_lock_nested" function to anotate nested rw spinlocks. Signed-off-by: Mikulas Patocka --- drivers/md/dm.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) Index: linux-3.3-devel/drivers/md/dm.c =================================================================== --- linux-3.3-devel.orig/drivers/md/dm.c 2012-04-18 05:06:54.000000000 +0200 +++ linux-3.3-devel/drivers/md/dm.c 2012-04-18 05:07:46.000000000 +0200 @@ -148,6 +148,9 @@ static void up_write_percpu_##name(struc declare_percpu_rw_lock(rw_semaphore, struct rw_semaphore, init_rwsem, down_read, up_read, down_write_nested, up_write) +#define write_lock_nested(l, i) write_lock(l) +declare_percpu_rw_lock(rw_spinlock, rwlock_t, rwlock_init, read_lock, read_unlock, write_lock_nested, write_unlock) + /* * Cookies are numeric values sent with CHANGE and REMOVE * uevents while resuming, removing or renaming the device. @@ -242,7 +245,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); struct mapped_device { struct percpu_rw_semaphore io_lock; struct mutex suspend_lock; - rwlock_t map_lock; + struct percpu_rw_spinlock map_lock; atomic_t holders; atomic_t open_count; @@ -660,13 +663,14 @@ static void queue_io(struct mapped_devic struct dm_table *dm_get_live_table(struct mapped_device *md) { struct dm_table *t; + int lock_cpu; BUG_ON(in_interrupt()); - read_lock(&md->map_lock); + lock_cpu = down_read_percpu_rw_spinlock(&md->map_lock); t = md->map; if (t) dm_table_get(t); - read_unlock(&md->map_lock); + up_read_percpu_rw_spinlock(&md->map_lock, lock_cpu); return t; } @@ -1957,11 +1961,14 @@ static struct mapped_device *alloc_dev(i if (r < 0) goto bad_semaphore; + r = init_percpu_rw_spinlock(&md->map_lock); + if (r < 0) + goto bad_spinlock; + md->type = DM_TYPE_NONE; mutex_init(&md->suspend_lock); mutex_init(&md->type_lock); spin_lock_init(&md->deferred_lock); - rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); atomic_set(&md->event_nr, 0); @@ -2024,6 +2031,8 @@ bad_thread: bad_disk: blk_cleanup_queue(md->queue); bad_queue: + free_percpu_rw_spinlock(&md->map_lock); +bad_spinlock: free_percpu_rw_semaphore(&md->io_lock); bad_semaphore: free_minor(minor); @@ -2051,6 +2060,7 @@ static void free_dev(struct mapped_devic bioset_free(md->bs); blk_integrity_unregister(md->disk); del_gendisk(md->disk); + free_percpu_rw_spinlock(&md->map_lock); free_percpu_rw_semaphore(&md->io_lock); free_minor(minor); @@ -2206,7 +2216,7 @@ static struct dm_table *__bind(struct ma merge_is_optional = dm_table_merge_is_optional(t); - write_lock(&md->map_lock); + down_write_percpu_rw_spinlock(&md->map_lock); old_map = md->map; md->map = t; md->immutable_target_type = dm_table_get_immutable_target_type(t); @@ -2216,7 +2226,7 @@ static struct dm_table *__bind(struct ma set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); else clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); - write_unlock(&md->map_lock); + up_write_percpu_rw_spinlock(&md->map_lock); return old_map; } @@ -2232,9 +2242,9 @@ static struct dm_table *__unbind(struct return NULL; dm_table_event_callback(map, NULL, NULL); - write_lock(&md->map_lock); + down_write_percpu_rw_spinlock(&md->map_lock); md->map = NULL; - write_unlock(&md->map_lock); + up_write_percpu_rw_spinlock(&md->map_lock); return map; }