From: Sasha Levin Switch dm to use the new hashtable implementation. This reduces the amount of generic unrelated code in the dm. This patch depends on d9b482c ("hashtable: introduce a small and naive hashtable") which was merged in v3.6. Signed-off-by: Sasha Levin --- FIXME Needs testing before pushing. (Posted version didn't compile.) --- drivers/md/dm-snap.c | 25 ++++-------- drivers/md/persistent-data/dm-block-manager.c | 1 drivers/md/persistent-data/dm-persistent-data-internal.h | 19 --------- drivers/md/persistent-data/dm-transaction-manager.c | 30 ++++----------- 4 files changed, 17 insertions(+), 58 deletions(-) Index: linux/drivers/md/dm-snap.c =================================================================== --- linux.orig/drivers/md/dm-snap.c +++ linux/drivers/md/dm-snap.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dm-exception-store.h" @@ -34,9 +35,7 @@ static const char dm_snapshot_merge_targ */ #define MIN_IOS 256 -#define DM_TRACKED_CHUNK_HASH_SIZE 16 -#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ - (DM_TRACKED_CHUNK_HASH_SIZE - 1)) +#define DM_TRACKED_CHUNK_HASH_BITS 4 struct dm_exception_table { uint32_t hash_mask; @@ -79,7 +78,7 @@ struct dm_snapshot { /* Chunks with outstanding reads */ spinlock_t tracked_chunk_lock; - struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; + DECLARE_HASHTABLE(tracked_chunk_hash, DM_TRACKED_CHUNK_HASH_BITS); /* The on disk metadata handler */ struct dm_exception_store *store; @@ -212,8 +211,7 @@ static void track_chunk(struct dm_snapsh c->chunk = chunk; spin_lock_irq(&s->tracked_chunk_lock); - hlist_add_head(&c->node, - &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); + hash_add(s->tracked_chunk_hash, &c->node, chunk); spin_unlock_irq(&s->tracked_chunk_lock); } @@ -223,7 +221,7 @@ static void stop_tracking_chunk(struct d unsigned long flags; spin_lock_irqsave(&s->tracked_chunk_lock, flags); - hlist_del(&c->node); + hash_del(&c->node); spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); } @@ -235,8 +233,7 @@ static int __chunk_is_tracked(struct dm_ spin_lock_irq(&s->tracked_chunk_lock); - hlist_for_each_entry(c, hn, - &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { + hash_for_each_possible(s->tracked_chunk_hash, c, hn, node, chunk) { if (c->chunk == chunk) { found = 1; break; @@ -1038,7 +1035,6 @@ static void stop_merge(struct dm_snapsho static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct dm_snapshot *s; - int i; int r = -EINVAL; char *origin_path, *cow_path; unsigned args_used, num_flush_bios = 1; @@ -1125,8 +1121,7 @@ static int snapshot_ctr(struct dm_target goto bad_pending_pool; } - for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) - INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); + hash_init(s->tracked_chunk_hash); spin_lock_init(&s->tracked_chunk_lock); @@ -1248,9 +1243,6 @@ static void __handover_exceptions(struct static void snapshot_dtr(struct dm_target *ti) { -#ifdef CONFIG_DM_DEBUG - int i; -#endif struct dm_snapshot *s = ti->private; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; @@ -1281,8 +1273,7 @@ static void snapshot_dtr(struct dm_targe smp_mb(); #ifdef CONFIG_DM_DEBUG - for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) - BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); + BUG_ON(!hash_empty(s->tracked_chunk_hash)); #endif __free_exceptions(s); Index: linux/drivers/md/persistent-data/dm-block-manager.c =================================================================== --- linux.orig/drivers/md/persistent-data/dm-block-manager.c +++ linux/drivers/md/persistent-data/dm-block-manager.c @@ -4,7 +4,6 @@ * This file is released under the GPL. */ #include "dm-block-manager.h" -#include "dm-persistent-data-internal.h" #include "../dm-bufio.h" #include Index: linux/drivers/md/persistent-data/dm-persistent-data-internal.h =================================================================== --- linux.orig/drivers/md/persistent-data/dm-persistent-data-internal.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (C) 2011 Red Hat, Inc. - * - * This file is released under the GPL. - */ - -#ifndef _DM_PERSISTENT_DATA_INTERNAL_H -#define _DM_PERSISTENT_DATA_INTERNAL_H - -#include "dm-block-manager.h" - -static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask) -{ - const unsigned BIG_PRIME = 4294967291UL; - - return (((unsigned) b) * BIG_PRIME) & hash_mask; -} - -#endif /* _PERSISTENT_DATA_INTERNAL_H */ Index: linux/drivers/md/persistent-data/dm-transaction-manager.c =================================================================== --- linux.orig/drivers/md/persistent-data/dm-transaction-manager.c +++ linux/drivers/md/persistent-data/dm-transaction-manager.c @@ -7,11 +7,11 @@ #include "dm-space-map.h" #include "dm-space-map-disk.h" #include "dm-space-map-metadata.h" -#include "dm-persistent-data-internal.h" #include #include #include +#include #define DM_MSG_PREFIX "transaction manager" @@ -25,8 +25,7 @@ struct shadow_info { /* * It would be nice if we scaled with the size of transaction. */ -#define HASH_SIZE 256 -#define HASH_MASK (HASH_SIZE - 1) +#define DM_HASH_BITS 8 struct dm_transaction_manager { int is_clone; @@ -36,7 +35,7 @@ struct dm_transaction_manager { struct dm_space_map *sm; spinlock_t lock; - struct hlist_head buckets[HASH_SIZE]; + DECLARE_HASHTABLE(hash, DM_HASH_BITS); }; /*----------------------------------------------------------------*/ @@ -44,12 +43,11 @@ struct dm_transaction_manager { static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b) { int r = 0; - unsigned bucket = dm_hash_block(b, HASH_MASK); struct shadow_info *si; struct hlist_node *n; spin_lock(&tm->lock); - hlist_for_each_entry(si, n, tm->buckets + bucket, hlist) + hash_for_each_possible(tm->hash, si, n, hlist, b) if (si->where == b) { r = 1; break; @@ -65,15 +63,13 @@ static int is_shadow(struct dm_transacti */ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b) { - unsigned bucket; struct shadow_info *si; si = kmalloc(sizeof(*si), GFP_NOIO); if (si) { si->where = b; - bucket = dm_hash_block(b, HASH_MASK); spin_lock(&tm->lock); - hlist_add_head(&si->hlist, tm->buckets + bucket); + hash_add(tm->hash, &si->hlist, b); spin_unlock(&tm->lock); } } @@ -82,18 +78,12 @@ static void wipe_shadow_table(struct dm_ { struct shadow_info *si; struct hlist_node *n, *tmp; - struct hlist_head *bucket; int i; spin_lock(&tm->lock); - for (i = 0; i < HASH_SIZE; i++) { - bucket = tm->buckets + i; - hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) - kfree(si); - - INIT_HLIST_HEAD(bucket); - } - + hash_for_each_safe(tm->hash, i, n, tmp, si, hlist) + kfree(si); + hash_init(tm->hash); spin_unlock(&tm->lock); } @@ -102,7 +92,6 @@ static void wipe_shadow_table(struct dm_ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm, struct dm_space_map *sm) { - int i; struct dm_transaction_manager *tm; tm = kmalloc(sizeof(*tm), GFP_KERNEL); @@ -115,8 +104,7 @@ static struct dm_transaction_manager *dm tm->sm = sm; spin_lock_init(&tm->lock); - for (i = 0; i < HASH_SIZE; i++) - INIT_HLIST_HEAD(tm->buckets + i); + hash_init(tm->hash); return tm; }