From cbbb688a40503b8292acd2e1720eafa53636e466 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Tue, 6 Aug 2013 15:44:32 -0400 Subject: [PATCH 19/19] dm stats: rename struct dm_stat variables from 'm' to 's' --- drivers/md/dm-stats.c | 212 ++++++++++++++++++++++++------------------------ 1 files changed, 106 insertions(+), 106 deletions(-) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index c996c37..401fb06 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -159,13 +159,13 @@ static void kvfree(void *ptr, size_t alloc_size) static void dm_stat_free(struct rcu_head *head) { int cpu; - struct dm_stat *m = container_of(head, struct dm_stat, rcu_head); + struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); - kfree(m->program); - kfree(m->aux); + kfree(s->program); + kfree(s->aux); for_each_possible_cpu(cpu) - kvfree(m->stat_percpu[cpu], m->percpu_alloc_size); - kvfree(m, m->shared_alloc_size); + kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); + kvfree(s, s->shared_alloc_size); } static int dm_stat_in_flight(struct dm_stat_shared *shared) @@ -193,22 +193,22 @@ void dm_stats_exit_device(struct dm_stats *stats) size_t ni; while (!list_empty(&stats->list)) { - struct dm_stat *m = container_of(stats->list.next, struct dm_stat, list_entry); - list_del(&m->list_entry); - for (ni = 0; ni < m->n_entries; ni++) { - struct dm_stat_shared *shared = &m->stat_shared[ni]; + struct dm_stat *s = container_of(stats->list.next, struct dm_stat, list_entry); + list_del(&s->list_entry); + for (ni = 0; ni < s->n_entries; ni++) { + struct dm_stat_shared *shared = &s->stat_shared[ni]; if (WARN_ON(dm_stat_in_flight(shared))) { DMERR("leaked in-flight counter at index " "%lu (start %llu, end %llu, step %llu): reads %d, writes %d\n", (unsigned long)ni, - (unsigned long long)m->start, - (unsigned long long)m->end, - (unsigned long long)m->step, + (unsigned long long)s->start, + (unsigned long long)s->end, + (unsigned long long)s->step, atomic_read(&shared->in_flight[0]), atomic_read(&shared->in_flight[1])); } } - dm_stat_free(&m->rcu_head); + dm_stat_free(&s->rcu_head); } free_percpu(stats->last); } @@ -220,7 +220,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, struct mapped_device *md) { struct list_head *l; - struct dm_stat *m; + struct dm_stat *s; sector_t n_entries; size_t ni; size_t shared_alloc_size; @@ -250,31 +250,31 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size)) return -ENOMEM; - m = kvzalloc(shared_alloc_size, NUMA_NO_NODE); - if (!m) + s = kvzalloc(shared_alloc_size, NUMA_NO_NODE); + if (!s) return -ENOMEM; - m->n_entries = n_entries; - m->start = start; - m->end = end; - m->step = step; - m->shared_alloc_size = shared_alloc_size; - m->percpu_alloc_size = percpu_alloc_size; + s->n_entries = n_entries; + s->start = start; + s->end = end; + s->step = step; + s->shared_alloc_size = shared_alloc_size; + s->percpu_alloc_size = percpu_alloc_size; - m->program = kstrdup(program, GFP_KERNEL); - if (!m->program) { + s->program = kstrdup(program, GFP_KERNEL); + if (!s->program) { r = -ENOMEM; goto free_ret; } - m->aux = kstrdup(aux, GFP_KERNEL); - if (!m->aux) { + s->aux = kstrdup(aux, GFP_KERNEL); + if (!s->aux) { r = -ENOMEM; goto free_ret; } for (ni = 0; ni < n_entries; ni++) { - atomic_set(&m->stat_shared[ni].in_flight[0], 0); - atomic_set(&m->stat_shared[ni].in_flight[1], 0); + atomic_set(&s->stat_shared[ni].in_flight[0], 0); + atomic_set(&s->stat_shared[ni].in_flight[1], 0); } for_each_possible_cpu(cpu) { @@ -283,7 +283,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, r = -ENOMEM; goto free_ret; } - m->stat_percpu[cpu] = pc; + s->stat_percpu[cpu] = pc; } /* @@ -296,23 +296,23 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, suspend_callback(md); mutex_lock(&stats->mutex); - m->id = 0; + s->id = 0; list_for_each(l, &stats->list) { - struct dm_stat *m = container_of(l, struct dm_stat, list_entry); - if (m->id < m->id) + struct dm_stat *s = container_of(l, struct dm_stat, list_entry); + if (s->id < s->id) BUG(); - if (m->id > m->id) + if (s->id > s->id) break; - if (m->id == INT_MAX) { + if (s->id == INT_MAX) { mutex_unlock(&stats->mutex); resume_callback(md); r = -ENFILE; goto free_ret; } - m->id++; + s->id++; } - ret_id = m->id; - list_add_tail_rcu(&m->list_entry, l); + ret_id = s->id; + list_add_tail_rcu(&s->list_entry, l); mutex_unlock(&stats->mutex); resume_callback(md); @@ -320,19 +320,19 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, return ret_id; free_ret: - dm_stat_free(&m->rcu_head); + dm_stat_free(&s->rcu_head); return r; } static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) { - struct dm_stat *m; + struct dm_stat *s; - list_for_each_entry(m, &stats->list, list_entry) { - if (m->id > id) + list_for_each_entry(s, &stats->list, list_entry) { + if (s->id > id) break; - if (m->id == id) - return m; + if (s->id == id) + return s; } return NULL; @@ -340,33 +340,33 @@ static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) static int dm_stats_delete(struct dm_stats *stats, int id) { - struct dm_stat *m; + struct dm_stat *s; int cpu; mutex_lock(&stats->mutex); - m = __dm_stats_find(stats, id); - if (!m) { + s = __dm_stats_find(stats, id); + if (!s) { mutex_unlock(&stats->mutex); return -ENOENT; } - list_del_rcu(&m->list_entry); + list_del_rcu(&s->list_entry); mutex_unlock(&stats->mutex); /* * vfree can't be called from RCU callback */ for_each_possible_cpu(cpu) - if (is_vmalloc_addr(m->stat_percpu)) + if (is_vmalloc_addr(s->stat_percpu)) goto do_sync_free; - if (is_vmalloc_addr(m)) { + if (is_vmalloc_addr(s)) { do_sync_free: synchronize_rcu_expedited(); - dm_stat_free(&m->rcu_head); + dm_stat_free(&s->rcu_head); } else { dm_stat_need_rcu_barrier = 1; - call_rcu(&m->rcu_head, dm_stat_free); + call_rcu(&s->rcu_head, dm_stat_free); } return 0; } @@ -374,18 +374,18 @@ do_sync_free: static int dm_stats_list(struct dm_stats *stats, const char *program, char *result, unsigned maxlen) { - struct dm_stat *m; + struct dm_stat *s; unsigned sz = 0; mutex_lock(&stats->mutex); - list_for_each_entry(m, &stats->list, list_entry) { - if (!program || !strcmp(program, m->program)) - DMEMIT("%d: %llu-%llu %llu %s %s\n", m->id, - (unsigned long long)m->start, - (unsigned long long)m->end, - (unsigned long long)m->step, - m->program, - m->aux); + list_for_each_entry(s, &stats->list, list_entry) { + if (!program || !strcmp(program, s->program)) + DMEMIT("%d: %llu-%llu %llu %s %s\n", s->id, + (unsigned long long)s->start, + (unsigned long long)s->end, + (unsigned long long)s->step, + s->program, + s->aux); } mutex_unlock(&stats->mutex); @@ -417,12 +417,12 @@ static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu * shared->stamp = now; } -static void dm_stat_for_entry(struct dm_stat *m, size_t entry, +static void dm_stat_for_entry(struct dm_stat *s, size_t entry, unsigned long bi_rw, unsigned len, bool merged, bool end, unsigned long duration) { unsigned long idx = bi_rw & REQ_WRITE; - struct dm_stat_shared *shared = &m->stat_shared[entry]; + struct dm_stat_shared *shared = &s->stat_shared[entry]; struct dm_stat_percpu *p; /* @@ -439,7 +439,7 @@ static void dm_stat_for_entry(struct dm_stat *m, size_t entry, * part_stat_lock()/part_stat_unlock() have this race too. */ preempt_disable(); - p = &m->stat_percpu[smp_processor_id()][entry]; + p = &s->stat_percpu[smp_processor_id()][entry]; if (!end) { dm_stat_round(shared, p); @@ -456,7 +456,7 @@ static void dm_stat_for_entry(struct dm_stat *m, size_t entry, preempt_enable(); } -static void __dm_stat_bio(struct dm_stat *m, unsigned long bi_rw, +static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, sector_t bi_sector, sector_t end_sector, bool end, unsigned long duration, struct dm_stats_aux *aux) @@ -466,26 +466,26 @@ static void __dm_stat_bio(struct dm_stat *m, unsigned long bi_rw, size_t entry; unsigned fragment_len; - if (end_sector <= m->start || bi_sector >= m->end) + if (end_sector <= s->start || bi_sector >= s->end) return; - if (unlikely(bi_sector < m->start)) { + if (unlikely(bi_sector < s->start)) { rel_sector = 0; - todo = end_sector - m->start; + todo = end_sector - s->start; } else { - rel_sector = bi_sector - m->start; + rel_sector = bi_sector - s->start; todo = end_sector - bi_sector; } - if (unlikely(end_sector > m->end)) - todo -= end_sector - m->end; + if (unlikely(end_sector > s->end)) + todo -= end_sector - s->end; - offset = sector_div(rel_sector, m->step); + offset = sector_div(rel_sector, s->step); entry = rel_sector; do { - BUG_ON(entry >= m->n_entries); + BUG_ON(entry >= s->n_entries); fragment_len = todo; - if (fragment_len > m->step - offset) - fragment_len = m->step - offset; - dm_stat_for_entry(m, entry, bi_rw, fragment_len, + if (fragment_len > s->step - offset) + fragment_len = s->step - offset; + dm_stat_for_entry(s, entry, bi_rw, fragment_len, aux->merged, end, duration); todo -= fragment_len; entry++; @@ -497,7 +497,7 @@ void dm_stats_bio(struct dm_stats *stats, unsigned long bi_rw, sector_t bi_sector, unsigned bi_sectors, bool end, unsigned long duration, struct dm_stats_aux *aux) { - struct dm_stat *m; + struct dm_stat *s; sector_t end_sector; struct dm_stats_last_position *last; @@ -521,26 +521,26 @@ void dm_stats_bio(struct dm_stats *stats, rcu_read_lock(); - list_for_each_entry_rcu(m, &stats->list, list_entry) - __dm_stat_bio(m, bi_rw, bi_sector, end_sector, end, duration, aux); + list_for_each_entry_rcu(s, &stats->list, list_entry) + __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, aux); rcu_read_unlock(); } static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared, - struct dm_stat *m, size_t x) + struct dm_stat *s, size_t x) { int cpu; struct dm_stat_percpu *p; local_irq_disable(); - p = &m->stat_percpu[smp_processor_id()][x]; + p = &s->stat_percpu[smp_processor_id()][x]; dm_stat_round(shared, p); local_irq_enable(); memset(&shared->tmp, 0, sizeof(shared->tmp)); for_each_possible_cpu(cpu) { - p = &m->stat_percpu[cpu][x]; + p = &s->stat_percpu[cpu][x]; shared->tmp.sectors[0] += p->sectors[0]; shared->tmp.sectors[1] += p->sectors[1]; shared->tmp.ios[0] += p->ios[0]; @@ -556,7 +556,7 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared } } -static void __dm_stat_clear(struct dm_stat *m, size_t idx_start, size_t idx_end, +static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, bool init_tmp_percpu_totals) { size_t x; @@ -564,11 +564,11 @@ static void __dm_stat_clear(struct dm_stat *m, size_t idx_start, size_t idx_end, struct dm_stat_percpu *p; for (x = idx_start; x < idx_end; x++) { - shared = &m->stat_shared[x]; + shared = &s->stat_shared[x]; if (init_tmp_percpu_totals) - __dm_stat_init_temporary_percpu_totals(shared, m, x); + __dm_stat_init_temporary_percpu_totals(shared, s, x); local_irq_disable(); - p = &m->stat_percpu[smp_processor_id()][x]; + p = &s->stat_percpu[smp_processor_id()][x]; p->sectors[0] -= shared->tmp.sectors[0]; p->sectors[1] -= shared->tmp.sectors[1]; p->ios[0] -= shared->tmp.ios[0]; @@ -587,17 +587,17 @@ static void __dm_stat_clear(struct dm_stat *m, size_t idx_start, size_t idx_end, static int dm_stats_clear(struct dm_stats *stats, int id) { - struct dm_stat *m; + struct dm_stat *s; mutex_lock(&stats->mutex); - m = __dm_stats_find(stats, id); - if (!m) { + s = __dm_stats_find(stats, id); + if (!s) { mutex_unlock(&stats->mutex); return -ENOENT; } - __dm_stat_clear(m, 0, m->n_entries, true); + __dm_stat_clear(s, 0, s->n_entries, true); mutex_unlock(&stats->mutex); @@ -629,7 +629,7 @@ static int dm_stats_print(struct dm_stats *stats, int id, bool clear, char *result, unsigned maxlen) { unsigned sz = 0; - struct dm_stat *m; + struct dm_stat *s; size_t x; sector_t start, end; size_t idx_end; @@ -637,29 +637,29 @@ static int dm_stats_print(struct dm_stats *stats, int id, mutex_lock(&stats->mutex); - m = __dm_stats_find(stats, id); - if (!m) { + s = __dm_stats_find(stats, id); + if (!s) { mutex_unlock(&stats->mutex); return -ENOENT; } idx_end = idx_start + idx_len; if (idx_end < idx_start || - idx_end > m->n_entries) - idx_end = m->n_entries; + idx_end > s->n_entries) + idx_end = s->n_entries; if (idx_start > idx_end) idx_start = idx_end; - start = m->start + m->step * idx_start; + start = s->start + s->step * idx_start; for (x = idx_start; x < idx_end; x++, start = end) { - shared = &m->stat_shared[x]; - end = start + m->step; - if (unlikely(end > m->end)) - end = m->end; + shared = &s->stat_shared[x]; + end = start + s->step; + if (unlikely(end > s->end)) + end = s->end; - __dm_stat_init_temporary_percpu_totals(shared, m, x); + __dm_stat_init_temporary_percpu_totals(shared, s, x); DMEMIT("%llu-%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n", (unsigned long long)start, @@ -683,7 +683,7 @@ static int dm_stats_print(struct dm_stats *stats, int id, } if (clear) - __dm_stat_clear(m, idx_start, idx_end, false); + __dm_stat_clear(s, idx_start, idx_end, false); buffer_overflow: mutex_unlock(&stats->mutex); @@ -693,12 +693,12 @@ buffer_overflow: static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux) { - struct dm_stat *m; + struct dm_stat *s; mutex_lock(&stats->mutex); - m = __dm_stats_find(stats, id); - if (!m) { + s = __dm_stats_find(stats, id); + if (!s) { mutex_unlock(&stats->mutex); return -ENOENT; } @@ -709,8 +709,8 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux) return -ENOMEM; } - kfree(m->aux); - m->aux = aux; + kfree(s->aux); + s->aux = aux; mutex_unlock(&stats->mutex); -- 1.7.1