From 455ddf1252e691d1eaa701de94e1421959a1aac9 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 5 Aug 2013 14:04:14 -0400 Subject: [PATCH] dm stats: factor out dm_stat_initialize_temporary_percpu_totals fixing @stats_clear in the process --- drivers/md/dm-stats.c | 57 ++++++++++++++++++++++++++++-------------------- 1 files changed, 33 insertions(+), 24 deletions(-) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 2aaa18a..2c30f78 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -352,7 +352,32 @@ void dm_stats_bio(struct dm_stats *st, rcu_read_unlock(); } -static void dm_stat_clear(struct dm_stat *m) +static void dm_stat_initialize_temporary_percpu_totals(struct dm_stat_shared *s, + struct dm_stat *m, size_t x) +{ + int cpu; + struct dm_stat_percpu *p; + + local_irq_disable(); + p = &m->stat_percpu[smp_processor_id()][x]; + dm_stat_round(s, p); + local_irq_enable(); + + memset(&s->tmp, 0, sizeof(s->tmp)); + for_each_possible_cpu(cpu) { + p = &m->stat_percpu[cpu][x]; + s->tmp.sectors[0] += p->sectors[0]; + s->tmp.sectors[1] += p->sectors[1]; + s->tmp.ios[0] += p->ios[0]; + s->tmp.ios[1] += p->ios[1]; + s->tmp.ticks[0] += p->ticks[0]; + s->tmp.ticks[1] += p->ticks[1]; + s->tmp.io_ticks += p->io_ticks; + s->tmp.time_in_queue += p->time_in_queue; + } +} + +static void dm_stat_clear(struct dm_stat *m, bool init_tmp_percpu_totals) { size_t x; struct dm_stat_shared *s; @@ -360,6 +385,8 @@ static void dm_stat_clear(struct dm_stat *m) for (x = 0; x < m->n_entries; x++) { s = &m->stat_shared[x]; + if (init_tmp_percpu_totals) + dm_stat_initialize_temporary_percpu_totals(s, m, x); local_irq_disable(); p = &m->stat_percpu[smp_processor_id()][x]; p->sectors[0] -= s->tmp.sectors[0]; @@ -382,7 +409,7 @@ static int dm_stats_clear(struct dm_stats *st, int id) if (!m) return -ENOENT; - dm_stat_clear(m); + dm_stat_clear(m, true); return 1; } @@ -394,6 +421,7 @@ static int dm_stats_print(struct dm_stats *st, int id, bool clear, struct dm_stat *m; size_t x; sector_t start, end; + struct dm_stat_shared *s; m = dm_stats_find(st, id); if (!m) @@ -402,31 +430,12 @@ static int dm_stats_print(struct dm_stats *st, int id, bool clear, start = m->start; for (x = 0; x < m->n_entries; x++, start = end) { - int cpu; - struct dm_stat_shared *s = &m->stat_shared[x]; - struct dm_stat_percpu *p; - + s = &m->stat_shared[x]; end = start + m->step; if (unlikely(end > m->end)) end = m->end; - local_irq_disable(); - p = &m->stat_percpu[smp_processor_id()][x]; - dm_stat_round(s, p); - local_irq_enable(); - - memset(&s->tmp, 0, sizeof(s->tmp)); - for_each_possible_cpu(cpu) { - p = &m->stat_percpu[cpu][x]; - s->tmp.sectors[0] += p->sectors[0]; - s->tmp.sectors[1] += p->sectors[1]; - s->tmp.ios[0] += p->ios[0]; - s->tmp.ios[1] += p->ios[1]; - s->tmp.ticks[0] += p->ticks[0]; - s->tmp.ticks[1] += p->ticks[1]; - s->tmp.io_ticks += p->io_ticks; - s->tmp.time_in_queue += p->time_in_queue; - } + dm_stat_initialize_temporary_percpu_totals(s, m, x); DMEMIT("%llu-%llu %lu %u %lu %lu %lu %u %lu %lu %d %lu %lu\n", (unsigned long long)start, @@ -448,7 +457,7 @@ static int dm_stats_print(struct dm_stats *st, int id, bool clear, } if (clear) - dm_stat_clear(m); + dm_stat_clear(m, false); buffer_overflow: mutex_unlock(&st->mutex); -- 1.7.1