From: Heinz Mauelshagen Subject: dm cache policy era: fix spinlock based era_counter increment FIXME: what exactly was broken? FIXME: fold into previous era patch Signed-off-by: Heinz Mauelshagen --- drivers/md/dm-cache-policy-era.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) Index: linux/drivers/md/dm-cache-policy-era.c =================================================================== --- linux.orig/drivers/md/dm-cache-policy-era.c +++ linux/drivers/md/dm-cache-policy-era.c @@ -43,7 +43,6 @@ struct era_policy { era_t *cb_to_era; - spinlock_t era_counter_lock; era_t era_counter; }; @@ -57,7 +56,6 @@ static struct era_policy *to_era_policy( static int incr_era_counter(struct era_policy *era, const char *curr_era_str) { era_t curr_era_counter; - unsigned long flags; int r; /* @@ -74,19 +72,17 @@ static int incr_era_counter(struct era_p if (kstrtou32(curr_era_str, 10, &curr_era_counter)) return -EINVAL; - spin_lock_irqsave(&era->era_counter_lock, flags); - + smp_rmb(); if (era->era_counter != curr_era_counter) r = -ECANCELED; else if (era->era_counter >= ERA_MAX_ERA) r = -EOVERFLOW; else { era->era_counter++; + smp_wmb(); r = 0; } - spin_unlock_irqrestore(&era->era_counter_lock, flags); - return r; } @@ -231,6 +227,7 @@ static int era_map(struct dm_cache_polic if (!r && (bio_data_dir(bio) == WRITE) && (result->op == POLICY_HIT)) { cb_idx = from_cblock(result->cblock); BUG_ON(cb_idx >= from_cblock(era->cache_size)); + smp_rmb(); #if DEBUG_ERA DMDEBUG("assigning era %u to cblock %u, oblock %llu due to write hit.", era->era_counter, result->cblock, oblock); @@ -272,10 +269,12 @@ static int era_load_mapping(struct dm_ca * Make sure the era counter starts higher than the highest * persisted era. */ + smp_rmb(); if (recovered_era >= era->era_counter) { era->era_counter = recovered_era; if (era->era_counter < ERA_MAX_ERA) era->era_counter++; + smp_wmb(); #if DEBUG_ERA DMDEBUG("set era_counter to %u.", era->era_counter); #endif @@ -300,6 +299,7 @@ static void era_force_mapping(struct dm_ mutex_lock(&era->lock); if (!policy_lookup(p->child, old_oblock, &cblock)) { + smp_rmb(); #if DEBUG_ERA DMDEBUG("assigning era %u to cblock %u, oblock %llu " "(old_oblock %llu) due to force_mapping.", @@ -340,6 +340,8 @@ static int era_emit_config_values(struct { struct era_policy *era = to_era_policy(p); ssize_t sz = 0; + + smp_rmb(); DMEMIT("era_counter %u ", era->era_counter); return policy_emit_config_values(p->child, result + sz, maxlen - sz); } @@ -369,7 +371,6 @@ static struct dm_cache_policy *era_creat init_policy_functions(era); era->cache_size = cache_size; mutex_init(&era->lock); - spin_lock_init(&era->era_counter_lock); era->cb_to_era = kzalloc(from_cblock(era->cache_size) * sizeof(*(era->cb_to_era)), GFP_KERNEL);