From e043b565ce5629e62de5fd75154e1c5217a4bd9c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 1 Oct 2010 15:11:28 +0200 Subject: [PATCH] block: reduce locking frequency in queue_bio() Signed-off-by: Jens Axboe --- block/blk-core.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) Index: linux-rhel6/block/blk-core.c =================================================================== --- linux-rhel6.orig/block/blk-core.c +++ linux-rhel6/block/blk-core.c @@ -743,8 +743,6 @@ static void freed_request(struct request /* * Get a free request, queue_lock must be held. - * Returns NULL on failure, with queue_lock held. - * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) @@ -753,6 +751,7 @@ static struct request *get_request(struc struct request_list *rl = &q->rq; struct io_context *ioc = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; + const bool drop_lock = (gfp_mask & __GFP_WAIT) != 0; int may_queue, priv; may_queue = elv_may_queue(q, rw_flags); @@ -803,7 +802,9 @@ static struct request *get_request(struc if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; - spin_unlock_irq(q->queue_lock); + + if (drop_lock) + spin_unlock_irq(q->queue_lock); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { @@ -814,7 +815,9 @@ static struct request *get_request(struc * Allocating task should really be put onto the front of the * wait queue, but this is pretty rare. */ - spin_lock_irq(q->queue_lock); + if (drop_lock) + spin_lock_irq(q->queue_lock); + freed_request(q, is_sync, priv); /* @@ -841,6 +844,9 @@ rq_starved: ioc->nr_batch_requests--; trace_block_getrq(q, bio, rw_flags & 1); + + if (drop_lock) + spin_lock_irq(q->queue_lock); out: return rq; } @@ -849,7 +855,7 @@ out: * No available requests for this queue, unplug the device and wait for some * requests to become available. * - * Called with q->queue_lock held, and returns with it unlocked. + * Called with q->queue_lock held. */ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) @@ -857,7 +863,7 @@ static struct request *get_request_wait( const bool is_sync = rw_is_sync(rw_flags) != 0; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_ATOMIC); while (!rq) { DEFINE_WAIT(wait); struct io_context *ioc; @@ -897,15 +903,13 @@ struct request *blk_get_request(struct r BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) { + + if (gfp_mask & __GFP_WAIT) rq = get_request_wait(q, rw, NULL); - } else { + else rq = get_request(q, rw, NULL, gfp_mask); - if (!rq) - spin_unlock_irq(q->queue_lock); - } - /* q->queue_lock is unlocked at this point */ + spin_unlock_irq(q->queue_lock); return rq; } EXPORT_SYMBOL(blk_get_request); @@ -1310,8 +1314,7 @@ get_rq: rw_flags |= REQ_SYNC; /* - * Grab a free request. This is might sleep but can not fail. - * Returns with the queue unlocked. + * Grab a free request. */ req = get_request_wait(q, rw_flags, bio); @@ -1323,7 +1326,6 @@ get_rq: */ init_request_from_bio(req, bio); - spin_lock_irq(q->queue_lock); if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || bio_flagged(bio, BIO_CPU_AFFINE)) req->cpu = blk_cpu_to_group(smp_processor_id());