From d644c7459f5621b89cf39ba2ac534d9652ef7eda Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 3 Jun 2020 20:14:36 -0400 Subject: [PATCH] dm rq: reintroduce pending atomic counters to validate blk-mq inflight Reverts portions of these 3 commits to restore request-based pending IO accounting:: dbd3bbd291a dm rq: leverage blk_mq_queue_busy() to check for outstanding IO2adc5c559 dm rq: remove unused arguments from rq_completed() 6f75723190d dm: remove the pending IO accounting Then update md_in_flight() to validate DM's pending IO accounting matches blk-mq's idea of whether requests are infight. WARN_ON() if they don't match. Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 1 + drivers/md/dm-rq.c | 17 ++++++++++++----- drivers/md/dm.c | 12 +++++++++--- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index c4ef1fceead6..d7d79d9e71a5 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -65,6 +65,7 @@ struct mapped_device { */ struct work_struct work; wait_queue_head_t wait; + atomic_t pending[2]; spinlock_t deferred_lock; struct bio_list deferred; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3f8577e2c13b..fde5a08e5f62 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -144,8 +144,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) * the md may be freed in dm_put() at the end of this function. * Or do dm_get() before calling this function and dm_put() later. */ -static void rq_completed(struct mapped_device *md) +static void rq_completed(struct mapped_device *md, int rw) { + atomic_dec(&md->pending[rw]); + /* nudge anyone waiting on suspend queue */ if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); @@ -163,6 +165,7 @@ static void rq_completed(struct mapped_device *md) */ static void dm_end_request(struct request *clone, blk_status_t error) { + int rw = rq_data_dir(clone); struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig; @@ -172,7 +175,7 @@ static void dm_end_request(struct request *clone, blk_status_t error) rq_end_stats(md, rq); blk_mq_end_request(rq, error); - rq_completed(md); + rq_completed(md, rw); } static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) @@ -196,6 +199,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ { struct mapped_device *md = tio->md; struct request *rq = tio->orig; + int rw = rq_data_dir(rq); unsigned long delay_ms = delay_requeue ? 100 : 0; rq_end_stats(md, rq); @@ -205,7 +209,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ } dm_mq_delay_requeue_request(rq, delay_ms); - rq_completed(md); + rq_completed(md, rw); } static void dm_done(struct request *clone, blk_status_t error, bool mapped) @@ -263,13 +267,15 @@ static void dm_softirq_done(struct request *rq) bool mapped = true; struct dm_rq_target_io *tio = tio_from_request(rq); struct request *clone = tio->clone; + int rw; if (!clone) { struct mapped_device *md = tio->md; rq_end_stats(md, rq); + rw = rq_data_dir(rq); blk_mq_end_request(rq, tio->error); - rq_completed(md); + rq_completed(md, rw); return; } @@ -448,6 +454,7 @@ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, static void dm_start_request(struct mapped_device *md, struct request *orig) { blk_mq_start_request(orig); + atomic_inc(&md->pending[rq_data_dir(orig)]); if (unlikely(dm_stats_used(&md->stats))) { struct dm_rq_target_io *tio = tio_from_request(orig); @@ -521,7 +528,7 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (map_request(tio) == DM_MAPIO_REQUEUE) { /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); - rq_completed(md); + rq_completed(md, rq_data_dir(rq)); return BLK_STS_RESOURCE; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1fae647ef108..4152cd6f0e0c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -669,9 +669,13 @@ static bool md_in_flight_bios(struct mapped_device *md) static bool md_in_flight(struct mapped_device *md) { - if (queue_is_mq(md->queue)) - return blk_mq_queue_inflight(md->queue); - else + if (queue_is_mq(md->queue)) { + bool blk_mq_inflight = blk_mq_queue_inflight(md->queue); + bool dm_inflight = (atomic_read(&md->pending[READ]) + + atomic_read(&md->pending[WRITE]) != 0); + WARN_ON(blk_mq_inflight != dm_inflight); + return dm_inflight; + } else return md_in_flight_bios(md); } @@ -1992,6 +1996,8 @@ static struct mapped_device *alloc_dev(int minor) if (!md->disk) goto bad; + atomic_set(&md->pending[0], 0); + atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); -- 2.18.0