dm-buffered: fix a race between async memcpy and __process_any_flush There's a race condition between async memcpy and __process_any_flush. When __process_any_flush is called, there may be async memcpy operations in progress and __process_any_flush would not flush dirty buffers in this case. We must call __process_any_flush when all async memcpy operations complete, just before bio_endio(). Signed-off-by: Mikulas Patocka --- drivers/md/dm-buffered-target.c | 67 ++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 32 deletions(-) Index: linux-2.6/drivers/md/dm-buffered-target.c =================================================================== --- linux-2.6.orig/drivers/md/dm-buffered-target.c +++ linux-2.6/drivers/md/dm-buffered-target.c @@ -91,6 +91,38 @@ static blk_status_t _buffered_flush(stru return errno_to_blk_status(dm_bufio_write_dirty_buffers(bc->bufio)); } +/* Check for and process any buffer flush requests. */ +static void __process_any_flush(struct buffered_c *bc, struct bio *bio) +{ + bool flush = false; + blk_status_t r; + + if (bio_op(bio) == REQ_OP_WRITE) { + if (bio->bi_opf & REQ_FUA) { + atomic_inc(&bc->stats[S_FUA]); + flush = true; + } else if (bc->sync_writes) { + atomic_inc(&bc->stats[S_SYNC_WRITES]); + flush = true; + } + } + + if (flush) { + r = _buffered_flush(bc); + if (r && !bio->bi_status) + bio->bi_status = r; + } +} + +static void _dec_bio(struct buffered_c *bc, struct bio *bio) +{ + struct bio_c *bio_c = dm_per_bio_data(bio, sizeof(*bio_c)); + if (atomic_dec_and_test(&bio_c->memcpy_in_progress)) { + __process_any_flush(bc, bio); + bio_endio(bio); + } +} + static void _complete_buffer(struct buffered_c *bc, struct bio *bio, struct dm_buffer *bp, unsigned buffer_offset, unsigned len) { @@ -105,15 +137,13 @@ static void _complete_memcpy_work(struct { struct async_request *as = container_of(ws, struct async_request, work); struct bio *bio = as->bio; - struct bio_c *bio_c = dm_per_bio_data(bio, sizeof(*bio_c)); struct buffered_c *bc = as->bc; _complete_buffer(bc, bio, as->bp, as->buffer_offset, as->len); mempool_free(as, &bc->async_request_pool); - if (atomic_dec_and_test(&bio_c->memcpy_in_progress)) - bio_endio(bio); + _dec_bio(bc, bio); } static void _complete_memcpy(void *context) @@ -238,31 +268,6 @@ static void _io(struct buffered_c *bc, s } } -/* Check for and process any buffer flush requests. */ -static blk_status_t __process_any_flush(struct buffered_c *bc, struct bio *bio) -{ - bool flush = false; - blk_status_t r = BLK_STS_OK; - - if (bio_op(bio) == REQ_OP_WRITE) { - if (bio->bi_opf & REQ_FUA) { - atomic_inc(&bc->stats[S_FUA]); - flush = true; - } else if (bc->sync_writes) { - atomic_inc(&bc->stats[S_SYNC_WRITES]); - flush = true; - } - } - - if (flush) { - r = _buffered_flush(bc); - if (r && !bio->bi_status) - bio->bi_status = r; - } - - return r; -} - /* * Issue discards to a block range defined by @bio. * @@ -368,8 +373,7 @@ static void __process_bio(struct buffere /* Try processing any REQ_FUA, ... even in case there's a previous I/O error.*/ rio = bio->bi_status; - r = __process_any_flush(bc, bio); - if (unlikely(r || rio)) + if (unlikely(rio)) goto err; break; case REQ_OP_DISCARD: @@ -389,8 +393,7 @@ static void __process_bio(struct buffere } err: - if (atomic_dec_and_test(&bio_c->memcpy_in_progress)) - bio_endio(bio); + _dec_bio(bc, bio); } /* Process I/O on a bio prefetching buffers on a single @bio in a worker. */