From 62f225f9d4e44f3c3bc3e06e54c01e43fef515f6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 23 May 2013 13:10:40 +0200 Subject: [PATCH 04/18] blk-mq: new multi-queue block IO queueing mechanism Linux currently has two models for block devices: - The classic request_fn based approach, where drivers use struct request units for IO. The block layer provides various helper functionalities to let drivers share code, things like tag management, timeout handling, queueing, etc. - The "stacked" approach, where a driver squeezes in between the block layer and IO submitter. Since this bypasses the IO stack, driver generally have to manage everything themselves. With drivers being written for new high IOPS devices, the classic request_fn based driver doesn't work well enough. The design dates back to when both SMP and high IOPS was rare. It has problems with scaling to bigger machines, and runs into scaling issues even on smaller machines when you have IOPS in the hundreds of thousands per device. The stacked approach is then most often selected as the model for the driver. But this means that everybody has to re-invent everything, and along with that we get all the problems again that the shared approach solved. This commit introduces blk-mq, block multi queue support. The design is centered around per-cpu queues for queueing IO, which then funnel down into x number of hardware submission queues. We might have a 1:1 mapping between the two, or it might be an N:M mapping. That all depends on what the hardware supports. blk-mq provides various helper functions, which include: - Scalable support for request tagging. Most devices need to be able to uniquely identify a request both in the driver and to the hardware. The tagging uses per-cpu caches for freed tags, to enable cache hot reuse. - Timeout handling without tracking request on a per-device basis. Basically the driver should be able to get a notification, if a request happens to fail. - Optional support for non 1:1 mappings between issue and submission queues. blk-mq can redirect IO completions to the desired location. - Support for per-request payloads. Drivers almost always need to associate a request structure with some driver private command structure. Drivers can tell blk-mq this at init time, and then any request handed to the driver will have the required size of memory associated with it. - Support for merging of IO, and plugging. The stacked model gets neither of these. Even for high IOPS devices, merging sequential IO reduces per-command overhead and thus increases bandwidth. For now, this is provided as a potential 3rd queueing model, with the hope being that, as it matures, it can replace both the classic and stacked model. That would get us back to having just 1 real model for block devices, leaving the stacked approach to dm/md devices (as it was originally intended). Signed-off-by: Jens Axboe --- MQ-TODO | 19 + block/Makefile | 5 +- block/blk-core.c | 135 +++-- block/blk-mq-cpu.c | 93 ++++ block/blk-mq-cpumap.c | 100 ++++ block/blk-mq-sysfs.c | 384 ++++++++++++++ block/blk-mq-tag.c | 533 +++++++++++++++++++ block/blk-mq-tag.h | 21 + block/blk-mq.c | 1254 +++++++++++++++++++++++++++++++++++++++++++++ block/blk-mq.h | 48 ++ block/blk-sysfs.c | 10 + block/blk-timeout.c | 73 ++- block/blk.h | 17 + include/linux/bio.h | 2 + include/linux/blk-mq.h | 183 +++++++ include/linux/blk_types.h | 2 + include/linux/blkdev.h | 27 +- 17 files changed, 2820 insertions(+), 86 deletions(-) create mode 100644 MQ-TODO create mode 100644 block/blk-mq-cpu.c create mode 100644 block/blk-mq-cpumap.c create mode 100644 block/blk-mq-sysfs.c create mode 100644 block/blk-mq-tag.c create mode 100644 block/blk-mq-tag.h create mode 100644 block/blk-mq.c create mode 100644 block/blk-mq.h create mode 100644 include/linux/blk-mq.h diff --git a/MQ-TODO b/MQ-TODO new file mode 100644 index 0000000..0e76fec --- /dev/null +++ b/MQ-TODO @@ -0,0 +1,19 @@ +TODO list for multiqueue/new-queue. In no particular order. +----------------------------------------------------------- + +- Better switching scheme for deciding when to run + queues inline and when to punt to kblockd. + +- Implement proper scheme for not having lots of processes + or kblockds hammering on blk_mq_run_hw_queue() at the same + time. If someone else is already running the queue, we need + not do anything. (partially done) + +- Ensure that IO ordering is more sane: + - Let a process "stick" to a mapped queue while it + has IO in flight, instead of always dynamically + mapping it based on the current CPU. + +- Add token based mq aware IO scheduler + +/Jens diff --git a/block/Makefile b/block/Makefile index 39b76ba..21f4618 100644 --- a/block/Makefile +++ b/block/Makefile @@ -5,8 +5,9 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ - blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \ - partition-generic.o partitions/ + blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ + blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ + genhd.o scsi_ioctl.o partition-generic.o partitions/ obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o diff --git a/block/blk-core.c b/block/blk-core.c index 576b7da..6162df6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -47,7 +48,7 @@ DEFINE_IDA(blk_queue_ida); /* * For the allocated request tables */ -static struct kmem_cache *request_cachep; +struct kmem_cache *request_cachep = NULL; /* * For queue allocation @@ -59,42 +60,6 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; -static void drive_stat_acct(struct request *rq, int new_io) -{ - struct hd_struct *part; - int rw = rq_data_dir(rq); - int cpu; - - if (!blk_do_io_stat(rq)) - return; - - cpu = part_stat_lock(); - - if (!new_io) { - part = rq->part; - part_stat_inc(cpu, part, merges[rw]); - } else { - part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); - if (!hd_struct_try_get(part)) { - /* - * The partition is already being removed, - * the request will be accounted on the disk only - * - * We take a reference on disk->part0 although that - * partition will never be deleted, so we can treat - * it as any other partition. - */ - part = &rq->rq_disk->part0; - hd_struct_get(part); - } - part_round_stats(cpu, part); - part_inc_in_flight(part, rw); - rq->part = part; - } - - part_stat_unlock(); -} - void blk_queue_congestion_threshold(struct request_queue *q) { int nr; @@ -1171,7 +1136,12 @@ EXPORT_SYMBOL(blk_get_request); struct request *blk_make_request(struct request_queue *q, struct bio *bio, gfp_t gfp_mask) { - struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + struct request *rq; + + if (q->mq_ops) + rq = blk_mq_alloc_request(q, bio_data_dir(bio), gfp_mask); + else + rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); if (unlikely(!rq)) return ERR_PTR(-ENOMEM); @@ -1220,7 +1190,7 @@ EXPORT_SYMBOL(blk_requeue_request); static void add_acct_request(struct request_queue *q, struct request *rq, int where) { - drive_stat_acct(rq, 1); + blk_account_io_start(rq, true); __elv_add_request(q, rq, where); } @@ -1340,8 +1310,8 @@ void blk_add_request_payload(struct request *rq, struct page *page, } EXPORT_SYMBOL_GPL(blk_add_request_payload); -static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, - struct bio *bio) +bool bio_attempt_back_merge(struct request_queue *q, struct request *req, + struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; @@ -1358,12 +1328,12 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, req->__data_len += bio->bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); - drive_stat_acct(req, 0); + blk_account_io_start(req, false); return true; } -static bool bio_attempt_front_merge(struct request_queue *q, - struct request *req, struct bio *bio) +bool bio_attempt_front_merge(struct request_queue *q, struct request *req, + struct bio *bio) { const int ff = bio->bi_rw & REQ_FAILFAST_MASK; @@ -1388,12 +1358,12 @@ static bool bio_attempt_front_merge(struct request_queue *q, req->__data_len += bio->bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); - drive_stat_acct(req, 0); + blk_account_io_start(req, false); return true; } /** - * attempt_plug_merge - try to merge with %current's plugged list + * blk_attempt_plug_merge - try to merge with %current's plugged list * @q: request_queue new bio is being queued at * @bio: new bio being queued * @request_count: out parameter for number of traversed plugged requests @@ -1409,8 +1379,8 @@ static bool bio_attempt_front_merge(struct request_queue *q, * reliable access to the elevator outside queue lock. Only check basic * merging parameters without querying the elevator. */ -static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int *request_count) +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count) { struct blk_plug *plug; struct request *rq; @@ -1489,7 +1459,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (attempt_plug_merge(q, bio, &request_count)) + if (blk_attempt_plug_merge(q, bio, &request_count)) return; spin_lock_irq(q->queue_lock); @@ -1559,7 +1529,7 @@ get_rq: } } list_add_tail(&req->queuelist, &plug->list); - drive_stat_acct(req, 1); + blk_account_io_start(req, true); } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); @@ -2013,7 +1983,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) } EXPORT_SYMBOL_GPL(blk_rq_err_bytes); -static void blk_account_io_completion(struct request *req, unsigned int bytes) +void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { const int rw = rq_data_dir(req); @@ -2027,7 +1997,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) } } -static void blk_account_io_done(struct request *req) +void blk_account_io_done(struct request *req) { /* * Account IO completion. flush_rq isn't accounted as a @@ -2053,6 +2023,42 @@ static void blk_account_io_done(struct request *req) } } +void blk_account_io_start(struct request *rq, bool new_io) +{ + struct hd_struct *part; + int rw = rq_data_dir(rq); + int cpu; + + if (!blk_do_io_stat(rq)) + return; + + cpu = part_stat_lock(); + + if (!new_io) { + part = rq->part; + part_stat_inc(cpu, part, merges[rw]); + } else { + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + if (!hd_struct_try_get(part)) { + /* + * The partition is already being removed, + * the request will be accounted on the disk only + * + * We take a reference on disk->part0 although that + * partition will never be deleted, so we can treat + * it as any other partition. + */ + part = &rq->rq_disk->part0; + hd_struct_get(part); + } + part_round_stats(cpu, part); + part_inc_in_flight(part, rw); + rq->part = part; + } + + part_stat_unlock(); +} + /** * blk_peek_request - peek at the top of a request queue * @q: request queue to peek at @@ -2460,7 +2466,6 @@ static void blk_finish_request(struct request *req, int error) if (req->cmd_flags & REQ_DONTPREP) blk_unprep_request(req); - blk_account_io_done(req); if (req->end_io) @@ -2968,12 +2973,23 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, } EXPORT_SYMBOL(blk_check_plugged); +static void do_queue_unplug(struct request_queue *q, bool from_schedule, + unsigned int depth, struct list_head *list) +{ + if (q->mq_ops) { + trace_block_unplug(q, depth, !from_schedule); + blk_mq_insert_requests(q, list, 1, from_schedule); + } else + queue_unplugged(q, depth, from_schedule); +} + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; unsigned long flags; struct request *rq; LIST_HEAD(list); + LIST_HEAD(q_list); unsigned int depth; BUG_ON(plug->magic != PLUG_MAGIC); @@ -3003,10 +3019,17 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) * This drops the queue lock */ if (q) - queue_unplugged(q, depth, from_schedule); + do_queue_unplug(q, from_schedule, depth, &q_list); q = rq->q; depth = 0; - spin_lock(q->queue_lock); + if (!q->mq_ops) + spin_lock(q->queue_lock); + } + + if (q->mq_ops) { + depth++; + list_add_tail(&rq->queuelist, &q_list); + continue; } /* @@ -3032,7 +3055,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) * This drops the queue lock */ if (q) - queue_unplugged(q, depth, from_schedule); + do_queue_unplug(q, from_schedule, depth, &q_list); local_irq_restore(flags); } diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c new file mode 100644 index 0000000..f8ea39d --- /dev/null +++ b/block/blk-mq-cpu.c @@ -0,0 +1,93 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "blk-mq.h" + +static LIST_HEAD(blk_mq_cpu_notify_list); +static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); + +static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long) hcpu; + struct blk_mq_cpu_notifier *notify; + + spin_lock(&blk_mq_cpu_notify_lock); + + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) + notify->notify(notify->data, action, cpu); + + spin_unlock(&blk_mq_cpu_notify_lock); + return NOTIFY_OK; +} + +static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action, + unsigned int cpu) +{ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + /* + * If the CPU goes away, ensure that we run any pending + * completions. + */ + struct llist_node *node; + struct request *rq; + + local_irq_disable(); + + node = llist_del_all(&per_cpu(ipi_lists, cpu)); + while (node) { + struct llist_node *next = node->next; + + rq = llist_entry(node, struct request, ll_list); + __blk_mq_end_io(rq, rq->errors); + node = next; + } + + local_irq_enable(); + } +} + +static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = { + .notifier_call = blk_mq_main_cpu_notify, +}; + +void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) +{ + BUG_ON(!notifier->notify); + + spin_lock(&blk_mq_cpu_notify_lock); + list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); + spin_unlock(&blk_mq_cpu_notify_lock); +} + +void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) +{ + spin_lock(&blk_mq_cpu_notify_lock); + list_del(¬ifier->list); + spin_unlock(&blk_mq_cpu_notify_lock); +} + +void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, + void (*fn)(void *, unsigned long, unsigned int), + void *data) +{ + notifier->notify = fn; + notifier->data = data; +} + +static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = { + .notify = blk_mq_cpu_notify, +}; + +void __init blk_mq_cpu_init(void) +{ + register_hotcpu_notifier(&blk_mq_main_cpu_notifier); + blk_mq_register_cpu_notifier(&cpu_notifier); +} diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c new file mode 100644 index 0000000..c19e2fd --- /dev/null +++ b/block/blk-mq-cpumap.c @@ -0,0 +1,100 @@ +#include +#include +#include +#include +#include +#include + +#include +#include "blk.h" +#include "blk-mq.h" + +static void show_map(unsigned int *map, unsigned int nr) +{ + int i; + + pr_info("blk-mq: CPU -> queue map\n"); + for (i = 0; i < nr; i++) + pr_info(" CPU%2u -> Queue %u\n", i, map[i]); +} + +static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, + const int cpu) +{ + return cpu / ((nr_cpus + nr_queues - 1) / nr_queues); +} + +static int get_first_sibling(unsigned int cpu) +{ + unsigned int ret; + + ret = cpumask_first(topology_thread_cpumask(cpu)); + if (ret < nr_cpu_ids) + return ret; + + return cpu; +} + +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +{ + unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; + cpumask_var_t cpus; + + if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) + return 1; + + cpumask_clear(cpus); + nr_cpus = nr_uniq_cpus = 0; + for_each_present_cpu(i) { + nr_cpus++; + first_sibling = get_first_sibling(i); + if (!cpumask_test_cpu(first_sibling, cpus)) + nr_uniq_cpus++; + cpumask_set_cpu(i, cpus); + } + + for (queue = 0, i = 0; i < nr_cpus; i++) { + /* + * Easy case - we have equal or more hardware queues. Or + * there are no thread siblings to take into account. Do + * 1:1 if enough, or sequential mapping if less. + */ + if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { + map[i] = cpu_to_queue_index(nr_cpus, nr_queues, i); + continue; + } + + /* + * Less then nr_cpus queues, and we have some number of + * threads per cores. Map sibling threads to the same + * queue. + */ + first_sibling = get_first_sibling(i); + if (first_sibling == i) { + map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, + queue); + queue++; + } else + map[i] = map[first_sibling]; + } + + show_map(map, nr_cpus); + free_cpumask_var(cpus); + return 0; +} + +unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) +{ + unsigned int *map; + + map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, + reg->numa_node); + if (!map) + return NULL; + + if (!blk_mq_update_queue_map(map, reg->nr_hw_queues)) + return map; + + kfree(map); + return NULL; +} diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c new file mode 100644 index 0000000..165dcf0 --- /dev/null +++ b/block/blk-mq-sysfs.c @@ -0,0 +1,384 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "blk-mq.h" +#include "blk-mq-tag.h" + +static void blk_mq_sysfs_release(struct kobject *kobj) +{ +} + +struct blk_mq_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_ctx *, char *); + ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); +}; + +struct blk_mq_hw_ctx_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_mq_hw_ctx *, char *); + ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); +}; + +static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct blk_mq_ctx_sysfs_entry *entry; + struct blk_mq_ctx *ctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); + ctx = container_of(kobj, struct blk_mq_ctx, kobj); + q = ctx->queue; + + if (!entry->show) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->show(ctx, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct blk_mq_ctx_sysfs_entry *entry; + struct blk_mq_ctx *ctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); + ctx = container_of(kobj, struct blk_mq_ctx, kobj); + q = ctx->queue; + + if (!entry->store) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->store(ctx, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *page) +{ + struct blk_mq_hw_ctx_sysfs_entry *entry; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); + hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); + q = hctx->queue; + + if (!entry->show) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->show(hctx, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, + struct attribute *attr, const char *page, + size_t length) +{ + struct blk_mq_hw_ctx_sysfs_entry *entry; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + ssize_t res; + + entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); + hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); + q = hctx->queue; + + if (!entry->store) + return -EIO; + + res = -ENOENT; + mutex_lock(&q->sysfs_lock); + if (!blk_queue_dying(q)) + res = entry->store(hctx, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], + ctx->rq_dispatched[0]); +} + +static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu\n", ctx->rq_merged); +} + +static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) +{ + return sprintf(page, "%lu %lu\n", ctx->rq_completed[1], + ctx->rq_completed[0]); +} + +static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) +{ + char *start_page = page; + struct request *rq; + + page += sprintf(page, "%s:\n", msg); + + list_for_each_entry(rq, list, queuelist) + page += sprintf(page, "\t%p\n", rq); + + return page - start_page; +} + +static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) +{ + ssize_t ret; + + spin_lock(&ctx->lock); + ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending"); + spin_unlock(&ctx->lock); + + return ret; +} + +static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + return sprintf(page, "%lu\n", hctx->queued); +} + +static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return sprintf(page, "%lu\n", hctx->run); +} + +static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + char *start_page = page; + int i; + + page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]); + + for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) { + unsigned long d = 1U << (i - 1); + + page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]); + } + + return page - start_page; +} + +static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, + char *page) +{ + ssize_t ret; + + spin_lock(&hctx->lock); + ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending"); + spin_unlock(&hctx->lock); + + return ret; +} + +static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + ssize_t ret; + + spin_lock(&hctx->lock); + ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI)); + spin_unlock(&hctx->lock); + + return ret; +} + +static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx, + const char *page, size_t len) +{ + struct blk_mq_ctx *ctx; + unsigned long ret; + unsigned int i; + + if (kstrtoul(page, 10, &ret)) { + pr_err("blk-mq-sysfs: invalid input '%s'\n", page); + return -EINVAL; + } + + spin_lock(&hctx->lock); + if (ret) + hctx->flags |= BLK_MQ_F_SHOULD_IPI; + else + hctx->flags &= ~BLK_MQ_F_SHOULD_IPI; + spin_unlock(&hctx->lock); + + hctx_for_each_ctx(hctx, ctx, i) + ctx->ipi_redirect = !!ret; + + return len; +} + +static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return blk_mq_tag_sysfs_show(hctx->tags, page); +} + +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { + .attr = {.name = "dispatched", .mode = S_IRUGO }, + .show = blk_mq_sysfs_dispatched_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = { + .attr = {.name = "merged", .mode = S_IRUGO }, + .show = blk_mq_sysfs_merged_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = { + .attr = {.name = "completed", .mode = S_IRUGO }, + .show = blk_mq_sysfs_completed_show, +}; +static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = { + .attr = {.name = "rq_list", .mode = S_IRUGO }, + .show = blk_mq_sysfs_rq_list_show, +}; + +static struct attribute *default_ctx_attrs[] = { + &blk_mq_sysfs_dispatched.attr, + &blk_mq_sysfs_merged.attr, + &blk_mq_sysfs_completed.attr, + &blk_mq_sysfs_rq_list.attr, + NULL, +}; + +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = { + .attr = {.name = "queued", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_queued_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = { + .attr = {.name = "run", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_run_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { + .attr = {.name = "dispatched", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_dispatched_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { + .attr = {.name = "pending", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_rq_list_show, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = { + .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUGO}, + .show = blk_mq_hw_sysfs_ipi_show, + .store = blk_mq_hw_sysfs_ipi_store, +}; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { + .attr = {.name = "tags", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_tags_show, +}; + +static struct attribute *default_hw_ctx_attrs[] = { + &blk_mq_hw_sysfs_queued.attr, + &blk_mq_hw_sysfs_run.attr, + &blk_mq_hw_sysfs_dispatched.attr, + &blk_mq_hw_sysfs_pending.attr, + &blk_mq_hw_sysfs_ipi.attr, + &blk_mq_hw_sysfs_tags.attr, + NULL, +}; + +static const struct sysfs_ops blk_mq_sysfs_ops = { + .show = blk_mq_sysfs_show, + .store = blk_mq_sysfs_store, +}; + +static const struct sysfs_ops blk_mq_hw_sysfs_ops = { + .show = blk_mq_hw_sysfs_show, + .store = blk_mq_hw_sysfs_store, +}; + +static struct kobj_type blk_mq_ktype = { + .sysfs_ops = &blk_mq_sysfs_ops, + .release = blk_mq_sysfs_release, +}; + +static struct kobj_type blk_mq_ctx_ktype = { + .sysfs_ops = &blk_mq_sysfs_ops, + .default_attrs = default_ctx_attrs, + .release = blk_mq_sysfs_release, +}; + +static struct kobj_type blk_mq_hw_ktype = { + .sysfs_ops = &blk_mq_hw_sysfs_ops, + .default_attrs = default_hw_ctx_attrs, + .release = blk_mq_sysfs_release, +}; + +void blk_mq_unregister_disk(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + + kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); + kobject_del(&q->mq_kobj); + + kobject_put(&disk_to_dev(disk)->kobj); +} + +int blk_mq_register_disk(struct gendisk *disk) +{ + struct device *dev = disk_to_dev(disk); + struct request_queue *q = disk->queue; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int ret, i, j; + + kobject_init(&q->mq_kobj, &blk_mq_ktype); + + ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); + if (ret < 0) + return ret; + + kobject_uevent(&q->mq_kobj, KOBJ_ADD); + + queue_for_each_hw_ctx(q, hctx, i) { + kobject_init(&hctx->kobj, &blk_mq_hw_ktype); + ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i); + if (ret) + break; + + if (!hctx->nr_ctx) + continue; + + hctx_for_each_ctx(hctx, ctx, j) { + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); + ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); + if (ret) + break; + } + } + + if (ret) { + blk_mq_unregister_disk(disk); + return ret; + } + + return 0; +} diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c new file mode 100644 index 0000000..dcbc2a4 --- /dev/null +++ b/block/blk-mq-tag.c @@ -0,0 +1,533 @@ +#include +#include +#include +#include +#include +#include + +#include +#include "blk.h" +#include "blk-mq.h" +#include "blk-mq-tag.h" + +/* + * Per-cpu cache entries + */ +struct blk_mq_tag_map { + unsigned int nr_free; + unsigned int freelist[]; +}; + +/* + * Per tagged queue (tag address space) map + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int reserved_tags; + unsigned int batch_move; + unsigned int max_cache; + + struct { + spinlock_t lock; + unsigned int nr_free; + unsigned int *freelist; + unsigned int nr_reserved; + unsigned int *reservelist; + struct list_head wait; + } ____cacheline_aligned_in_smp; + + struct blk_mq_tag_map __percpu *free_maps; + + struct blk_mq_cpu_notifier cpu_notifier; +}; + +struct blk_mq_tag_wait { + struct list_head list; + struct task_struct *task; +}; + +#define DEFINE_TAG_WAIT(name) \ + struct blk_mq_tag_wait name = { \ + .list = LIST_HEAD_INIT((name).list), \ + .task = current, \ + } + +static unsigned int move_tags(unsigned int *dst, unsigned int *dst_nr, + unsigned int *src, unsigned int *src_nr, + unsigned int nr_to_move) +{ + nr_to_move = min(nr_to_move, *src_nr); + *src_nr -= nr_to_move; + memcpy(dst + *dst_nr, src + *src_nr, sizeof(int) * nr_to_move); + *dst_nr += nr_to_move; + + return nr_to_move; +} + +static void __wake_waiters(struct blk_mq_tags *tags, unsigned int nr) +{ + while (nr && !list_empty(&tags->wait)) { + struct blk_mq_tag_wait *waiter; + + waiter = list_entry(tags->wait.next, struct blk_mq_tag_wait, + list); + list_del_init(&waiter->list); + wake_up_process(waiter->task); + nr--; + } +} + +static void __blk_mq_tag_return(struct blk_mq_tags *tags, + struct blk_mq_tag_map *map, unsigned int nr) +{ + unsigned int waiters; + + lockdep_assert_held(&tags->lock); + + waiters = move_tags(tags->freelist, &tags->nr_free, map->freelist, + &map->nr_free, nr); + if (!list_empty(&tags->wait)) + __wake_waiters(tags, waiters); +} + +static void blk_mq_tag_return(struct blk_mq_tags *tags, + struct blk_mq_tag_map *map, unsigned int nr) +{ + unsigned long flags; + + spin_lock_irqsave(&tags->lock, flags); + __blk_mq_tag_return(tags, map, nr); + spin_unlock_irqrestore(&tags->lock, flags); +} + +#if NR_CPUS != 1 +static void prune_cache(void *data) +{ + struct blk_mq_tags *tags = data; + struct blk_mq_tag_map *map; + + map = per_cpu_ptr(tags->free_maps, smp_processor_id()); + + spin_lock(&tags->lock); + __blk_mq_tag_return(tags, map, tags->batch_move); + spin_unlock(&tags->lock); +} +#endif + +static void ipi_local_caches(struct blk_mq_tags *tags, unsigned int this_cpu) +{ +#if NR_CPUS != 1 + cpumask_var_t ipi_mask; + unsigned int i, total; + + /* + * We could per-cpu cache this things, but overhead is probably not + * large enough to care about it. If we fail, just punt to doing a + * prune on all CPUs. + */ + if (!alloc_cpumask_var(&ipi_mask, GFP_ATOMIC)) { + smp_call_function(prune_cache, tags, 0); + return; + } + + cpumask_clear(ipi_mask); + + total = 0; + for_each_online_cpu(i) { + struct blk_mq_tag_map *map = per_cpu_ptr(tags->free_maps, i); + + if (!map->nr_free) + continue; + + total += map->nr_free; + cpumask_set_cpu(i, ipi_mask); + + if (total > tags->batch_move) + break; + } + + if (total) { + preempt_disable(); + smp_call_function_many(ipi_mask, prune_cache, tags, 0); + preempt_enable(); + } + + free_cpumask_var(ipi_mask); +#endif +} + +/* + * Wait on a free tag, move batch to map when we have it. Returns with + * local CPU irq flags saved in 'flags'. + */ +static void wait_on_tags(struct blk_mq_tags *tags, struct blk_mq_tag_map **map, + unsigned long *flags) +{ + DEFINE_TAG_WAIT(wait); + + do { + spin_lock_irqsave(&tags->lock, *flags); + + __set_current_state(TASK_UNINTERRUPTIBLE); + + if (list_empty(&wait.list)) + list_add_tail(&wait.list, &tags->wait); + + *map = this_cpu_ptr(tags->free_maps); + if ((*map)->nr_free || tags->nr_free) { + if (!(*map)->nr_free) { + move_tags((*map)->freelist, &(*map)->nr_free, + tags->freelist, &tags->nr_free, + tags->batch_move); + } + + if (!list_empty(&wait.list)) + list_del(&wait.list); + + spin_unlock(&tags->lock); + break; + } + + spin_unlock_irqrestore(&tags->lock, *flags); + ipi_local_caches(tags, raw_smp_processor_id()); + io_schedule(); + } while (1); + + __set_current_state(TASK_RUNNING); +} + +void blk_mq_wait_for_tags(struct blk_mq_tags *tags) +{ + struct blk_mq_tag_map *map; + unsigned long flags; + + ipi_local_caches(tags, raw_smp_processor_id()); + wait_on_tags(tags, &map, &flags); + local_irq_restore(flags); +} + +bool blk_mq_has_free_tags(struct blk_mq_tags *tags) +{ + return !tags || tags->nr_free != 0; +} + +static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) +{ + struct blk_mq_tag_map *map; + unsigned int this_cpu; + unsigned long flags; + unsigned int tag; + + local_irq_save(flags); + this_cpu = smp_processor_id(); + map = per_cpu_ptr(tags->free_maps, this_cpu); + + /* + * Grab from local per-cpu cache, if we can + */ + do { + if (map->nr_free) { + map->nr_free--; + tag = map->freelist[map->nr_free]; + local_irq_restore(flags); + return tag; + } + + /* + * Grab from device map, if we can + */ + if (tags->nr_free) { + spin_lock(&tags->lock); + move_tags(map->freelist, &map->nr_free, tags->freelist, + &tags->nr_free, tags->batch_move); + spin_unlock(&tags->lock); + continue; + } + + local_irq_restore(flags); + + if (!(gfp & __GFP_WAIT)) + break; + + ipi_local_caches(tags, this_cpu); + + /* + * All are busy, wait. Returns with irqs disabled again + * and potentially new 'map' pointer. + */ + wait_on_tags(tags, &map, &flags); + } while (1); + + return BLK_MQ_TAG_FAIL; +} + +static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, + gfp_t gfp) +{ + unsigned int tag = BLK_MQ_TAG_FAIL; + DEFINE_TAG_WAIT(wait); + + if (unlikely(!tags->reserved_tags)) { + WARN_ON_ONCE(1); + return BLK_MQ_TAG_FAIL; + } + + do { + spin_lock_irq(&tags->lock); + if (tags->nr_reserved) { + tags->nr_reserved--; + tag = tags->reservelist[tags->nr_reserved]; + break; + } + + if (!(gfp & __GFP_WAIT)) + break; + + __set_current_state(TASK_UNINTERRUPTIBLE); + + if (list_empty(&wait.list)) + list_add_tail(&wait.list, &tags->wait); + + spin_unlock_irq(&tags->lock); + io_schedule(); + } while (1); + + if (!list_empty(&wait.list)) + list_del(&wait.list); + + spin_unlock_irq(&tags->lock); + return tag; +} + +unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) +{ + if (!reserved) + return __blk_mq_get_tag(tags, gfp); + + return __blk_mq_get_reserved_tag(tags, gfp); +} + +static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) +{ + struct blk_mq_tag_map *map; + unsigned long flags; + + BUG_ON(tag >= tags->nr_tags); + + local_irq_save(flags); + map = this_cpu_ptr(tags->free_maps); + + map->freelist[map->nr_free] = tag; + map->nr_free++; + + if (map->nr_free >= tags->max_cache || + !list_empty_careful(&tags->wait)) { + spin_lock(&tags->lock); + __blk_mq_tag_return(tags, map, tags->batch_move); + spin_unlock(&tags->lock); + } + + local_irq_restore(flags); +} + +static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, + unsigned int tag) +{ + unsigned long flags; + + spin_lock_irqsave(&tags->lock, flags); + tags->reservelist[tags->nr_reserved] = tag; + tags->nr_reserved++; + + if (!list_empty(&tags->wait)) + __wake_waiters(tags, 1); + + spin_unlock_irqrestore(&tags->lock, flags); +} + +void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) +{ + if (tag >= tags->reserved_tags) + __blk_mq_put_tag(tags, tag); + else + __blk_mq_put_reserved_tag(tags, tag); +} + +void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, + void (*fn)(void *, unsigned long *), void *data) +{ + unsigned long flags, *tag_map; + unsigned int i, j; + size_t map_size; + + map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; + tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); + if (!tag_map) + return; + + local_irq_save(flags); + + for_each_online_cpu(i) { + struct blk_mq_tag_map *map = per_cpu_ptr(tags->free_maps, i); + + for (j = 0; j < map->nr_free; j++) + __set_bit(map->freelist[j], tag_map); + } + + if (tags->nr_free || tags->nr_reserved) { + spin_lock(&tags->lock); + + if (tags->nr_reserved) + for (j = 0; j < tags->nr_reserved; j++) + __set_bit(tags->reservelist[j], tag_map); + + if (tags->nr_free) + for (j = 0; j < tags->nr_free; j++) + __set_bit(tags->freelist[j], tag_map); + + spin_unlock(&tags->lock); + } + + local_irq_restore(flags); + + fn(data, tag_map); + kfree(tag_map); +} + +static void blk_mq_tag_notify(void *data, unsigned long action, + unsigned int cpu) +{ + /* + * Move entries from this CPU to global pool + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + struct blk_mq_tags *tags = data; + struct blk_mq_tag_map *map = per_cpu_ptr(tags->free_maps, cpu); + + if (map->nr_free) + blk_mq_tag_return(tags, map, map->nr_free); + } +} + +struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, + unsigned int reserved_tags, int node) +{ + struct blk_mq_tags *tags; + size_t map_size; + + if (nr_tags > BLK_MQ_TAG_MAX) { + pr_err("blk-mq: tag depth too large\n"); + return NULL; + } + + tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); + if (!tags) + return NULL; + + map_size = sizeof(struct blk_mq_tag_map) + nr_tags * sizeof(int); + tags->free_maps = __alloc_percpu(map_size, sizeof(void *)); + if (!tags->free_maps) + goto err_free_maps; + + tags->freelist = kmalloc_node(sizeof(int) * nr_tags, GFP_KERNEL, node); + if (!tags->freelist) + goto err_freelist; + + if (reserved_tags) { + tags->reservelist = kmalloc_node(sizeof(int) * reserved_tags, + GFP_KERNEL, node); + if (!tags->reservelist) + goto err_reservelist; + } + + spin_lock_init(&tags->lock); + INIT_LIST_HEAD(&tags->wait); + tags->nr_tags = nr_tags; + tags->reserved_tags = reserved_tags; + tags->max_cache = nr_tags / num_possible_cpus(); + if (tags->max_cache < 4) + tags->max_cache = 4; + else if (tags->max_cache > 64) + tags->max_cache = 64; + + tags->batch_move = tags->max_cache / 2; + + /* + * Reserved tags are first + */ + if (reserved_tags) { + tags->nr_reserved = 0; + while (reserved_tags--) { + tags->reservelist[tags->nr_reserved] = + tags->nr_reserved; + tags->nr_reserved++; + } + } + + /* + * Rest of the tags start at the queue list + */ + tags->nr_free = 0; + while (nr_tags - tags->nr_reserved) { + tags->freelist[tags->nr_free] = tags->nr_free + + tags->nr_reserved; + nr_tags--; + tags->nr_free++; + } + + blk_mq_init_cpu_notifier(&tags->cpu_notifier, blk_mq_tag_notify, tags); + blk_mq_register_cpu_notifier(&tags->cpu_notifier); + return tags; + +err_reservelist: + kfree(tags->freelist); +err_freelist: + free_percpu(tags->free_maps); +err_free_maps: + kfree(tags); + return NULL; +} + +void blk_mq_free_tags(struct blk_mq_tags *tags) +{ + blk_mq_unregister_cpu_notifier(&tags->cpu_notifier); + free_percpu(tags->free_maps); + kfree(tags->freelist); + kfree(tags->reservelist); + kfree(tags); +} + +ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) +{ + char *orig_page = page; + unsigned long flags; + struct list_head *tmp; + int waiters; + int cpu; + + if (!tags) + return 0; + + spin_lock_irqsave(&tags->lock, flags); + + page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," + " max_cache=%u\n", tags->nr_tags, tags->reserved_tags, + tags->batch_move, tags->max_cache); + + waiters = 0; + list_for_each(tmp, &tags->wait) + waiters++; + + page += sprintf(page, "nr_free=%u, nr_reserved=%u, waiters=%u\n", + tags->nr_free, tags->nr_reserved, waiters); + + for_each_online_cpu(cpu) { + struct blk_mq_tag_map *map = per_cpu_ptr(tags->free_maps, cpu); + + page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, + map->nr_free); + } + + spin_unlock_irqrestore(&tags->lock, flags); + return page - orig_page; +} diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h new file mode 100644 index 0000000..ce4d5b2 --- /dev/null +++ b/block/blk-mq-tag.h @@ -0,0 +1,21 @@ +#ifndef INT_BLK_MQ_TAG_H +#define INT_BLK_MQ_TAG_H + +struct blk_mq_tags; + +extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); +extern void blk_mq_free_tags(struct blk_mq_tags *tags); + +extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); +extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); +extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag); +extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); +extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); +extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); + +enum { + BLK_MQ_TAG_FAIL = -1U, + BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, +}; + +#endif diff --git a/block/blk-mq.c b/block/blk-mq.c new file mode 100644 index 0000000..06f7f3b --- /dev/null +++ b/block/blk-mq.c @@ -0,0 +1,1254 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "blk.h" +#include "blk-mq.h" +#include "blk-mq-tag.h" + +static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); + +DEFINE_PER_CPU(struct llist_head, ipi_lists); + +static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) +{ + return per_cpu_ptr(q->queue_ctx, cpu); +} + +/* + * This assumes per-cpu software queueing queues. They could be per-node + * as well, for instance. For now this is hardcoded as-is. Note that we don't + * care about preemption, since we know the ctx's are persistent. This does + * mean that we can't rely on ctx always matching the currently running CPU. + */ +static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) +{ + return __blk_mq_get_ctx(q, get_cpu()); +} + +static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) +{ + put_cpu(); +} + +/* + * Check if any of the ctx's have pending work in this hardware queue + */ +static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) +{ + unsigned int i; + + for (i = 0; i < hctx->nr_ctx_map; i++) + if (hctx->ctx_map[i]) + return true; + + return false; +} + +/* + * Mark this ctx as having pending work in this hardware queue + */ +static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + if (!test_bit(ctx->index_hw, hctx->ctx_map)) + set_bit(ctx->index_hw, hctx->ctx_map); +} + +static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, + bool reserved) +{ + struct request *rq; + unsigned int tag; + + tag = blk_mq_get_tag(hctx->tags, gfp, reserved); + if (tag != BLK_MQ_TAG_FAIL) { + rq = hctx->rqs[tag]; + rq->tag = tag; + + return rq; + } + + return NULL; +} + +bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) +{ + return blk_mq_has_free_tags(hctx->tags); +} +EXPORT_SYMBOL(blk_mq_can_queue); + +static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, + unsigned int rw_flags) +{ + rq->mq_ctx = ctx; + rq->cmd_flags = rw_flags; + ctx->rq_dispatched[rw_is_sync(rw_flags)]++; +} + +static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + gfp_t gfp, bool reserved) +{ + return blk_mq_alloc_rq(hctx, gfp, reserved); +} + +static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, + int rw, gfp_t gfp, + bool reserved) +{ + struct request *rq; + + do { + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); + struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); + + rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); + if (rq) { + blk_mq_rq_ctx_init(ctx, rq, rw); + break; + } else if (!(gfp & __GFP_WAIT)) + break; + + blk_mq_put_ctx(ctx); + __blk_mq_run_hw_queue(hctx); + blk_mq_wait_for_tags(hctx->tags); + } while (1); + + return rq; +} + +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) +{ + struct request *rq; + + rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); + blk_mq_put_ctx(rq->mq_ctx); + return rq; +} + +struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, + gfp_t gfp) +{ + struct request *rq; + + rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); + blk_mq_put_ctx(rq->mq_ctx); + return rq; +} +EXPORT_SYMBOL(blk_mq_alloc_reserved_request); + +/* + * Re-init and set pdu, if we have it + */ +static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) +{ + blk_rq_init(hctx->queue, rq); + + if (hctx->rq_pdu) + rq->special = blk_mq_rq_to_pdu(rq); +} + +static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, struct request *rq) +{ + const int tag = rq->tag; + + blk_mq_rq_init(hctx, rq); + blk_mq_put_tag(hctx->tags, tag); +} + +void blk_mq_free_request(struct request *rq) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + struct blk_mq_hw_ctx *hctx; + struct request_queue *q = rq->q; + + ctx->rq_completed[rq_is_sync(rq)]++; + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + __blk_mq_free_request(hctx, ctx, rq); +} +EXPORT_SYMBOL(blk_mq_free_request); + +static void blk_mq_finish_request(struct request *rq, int error) +{ + struct bio *bio = rq->bio; + unsigned int bytes = 0; + + while (bio) { + struct bio *next = bio->bi_next; + + bio->bi_next = NULL; + bytes += bio->bi_size; + bio_endio(bio, error); + bio = next; + } + + blk_account_io_completion(rq, bytes); + blk_account_io_done(rq); + blk_mq_free_request(rq); +} + +void __blk_mq_end_io(struct request *rq, int error) +{ + if (blk_mark_rq_complete(rq)) + return; + + trace_block_rq_complete(rq->q, rq); + + /* + * If ->end_io is set, it's responsible for doing the rest of the + * completion. + */ + if (rq->end_io) + rq->end_io(rq, error); + else + blk_mq_finish_request(rq, error); +} + +/* + * Called with interrupts disabled. + */ +static void ipi_end_io(void *data) +{ + struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id()); + struct llist_node *entry, *next; + struct request *rq; + + entry = llist_del_all(list); + + while (entry) { + next = entry->next; + rq = llist_entry(entry, struct request, ll_list); + __blk_mq_end_io(rq, rq->errors); + entry = next; + } +} + +/* + * End IO on this request on a multiqueue enabled driver. We'll either do + * it directly inline, or punt to a local IPI handler on the matching + * remote CPU. + */ +void blk_mq_end_io(struct request *rq, int error) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + int cpu; + + if (!ctx->ipi_redirect) + return __blk_mq_end_io(rq, error); + + cpu = get_cpu(); + if (cpu == ctx->cpu) + __blk_mq_end_io(rq, error); + else { + struct call_single_data *data = &rq->csd; + + rq->errors = error; + rq->ll_list.next = NULL; + + /* + * If the list is non-empty, an existing IPI must already + * be "in flight". If that is the case, we need not schedule + * a new one. + */ + if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) { + data->func = ipi_end_io; + data->flags = 0; + __smp_call_function_single(ctx->cpu, data, 0); + } + } + + put_cpu(); +} +EXPORT_SYMBOL(blk_mq_end_io); + +static void blk_mq_start_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + trace_block_rq_issue(q, rq); + + /* + * Just mark start time and set the started bit. Due to memory + * ordering, we know we'll see the correct deadline as long as + * REQ_ATOMIC_STARTED is seen. + */ + rq->deadline = jiffies + q->rq_timeout; + set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} + +static void blk_mq_requeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + trace_block_rq_requeue(q, rq); + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} + +struct blk_mq_timeout_data { + struct blk_mq_hw_ctx *hctx; + unsigned long *next; + unsigned int *next_set; +}; + +static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) +{ + struct blk_mq_timeout_data *data = __data; + struct blk_mq_hw_ctx *hctx = data->hctx; + unsigned int tag; + + /* It may not be in flight yet (this is where + * the REQ_ATOMIC_STARTED flag comes in). The requests are + * statically allocated, so we know it's always safe to access the + * memory associated with a bit offset into ->rqs[]. + */ + tag = 0; + do { + struct request *rq; + + tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag); + if (tag >= hctx->queue_depth) + break; + + rq = hctx->rqs[tag]; + + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + continue; + + blk_rq_check_expired(rq, data->next, data->next_set); + } while (1); +} + +static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx, + unsigned long *next, + unsigned int *next_set) +{ + struct blk_mq_timeout_data data = { + .hctx = hctx, + .next = next, + .next_set = next_set, + }; + + /* + * Ask the tagging code to iterate busy requests, so we can + * check them for timeout. + */ + blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); +} + +static void blk_mq_rq_timer(unsigned long data) +{ + struct request_queue *q = (struct request_queue *) data; + struct blk_mq_hw_ctx *hctx; + unsigned long next = 0; + int i, next_set = 0; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); + + if (next_set) + mod_timer(&q->timeout, round_jiffies_up(next)); +} + +/* + * Reverse check our software queue for entries that we could potentially + * merge with. Currently includes a hand-wavy stop count of 8, to not spend + * too much time checking for merges. + */ +static bool blk_mq_attempt_merge(struct request_queue *q, + struct blk_mq_ctx *ctx, struct bio *bio) +{ + struct request *rq; + int checked = 8; + + list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { + int el_ret; + + if (!checked--) + break; + + if (!blk_rq_merge_ok(rq, bio)) + continue; + + el_ret = blk_try_merge(rq, bio); + if (el_ret == ELEVATOR_BACK_MERGE) { + if (bio_attempt_back_merge(q, rq, bio)) { + ctx->rq_merged++; + return true; + } + break; + } else if (el_ret == ELEVATOR_FRONT_MERGE) { + if (bio_attempt_front_merge(q, rq, bio)) { + ctx->rq_merged++; + return true; + } + break; + } + } + + return false; +} + +void blk_mq_add_timer(struct request *rq) +{ + __blk_add_timer(rq, NULL); +} + +/* + * Run this hardware queue, pulling any software queues mapped to it in. + * Note that this function currently has various problems around ordering + * of IO. In particular, we'd like FIFO behaviour on handling existing + * items on the hctx->dispatch list. Ignore that for now. + */ +static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + struct request *rq; + LIST_HEAD(rq_list); + int bit, queued; + + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) + return; + + hctx->run++; + + /* + * Touch any software queue that has pending entries. + */ + for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { + clear_bit(bit, hctx->ctx_map); + ctx = hctx->ctxs[bit]; + BUG_ON(bit != ctx->index_hw); + + spin_lock(&ctx->lock); + list_splice_tail_init(&ctx->rq_list, &rq_list); + spin_unlock(&ctx->lock); + } + + /* + * If we have previous entries on our dispatch list, grab them + * and stuff them at the front for more fair dispatch. + */ + if (!list_empty_careful(&hctx->dispatch)) { + spin_lock(&hctx->lock); + if (!list_empty(&hctx->dispatch)) + list_splice_init(&hctx->dispatch, &rq_list); + spin_unlock(&hctx->lock); + } + + /* + * Delete and return all entries from our dispatch list + */ + queued = 0; + + /* + * Now process all the entries, sending them to the driver. + */ + while (!list_empty(&rq_list)) { + int ret; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + blk_mq_start_request(rq); + + /* + * Last request in the series. Flag it as such, this + * enables drivers to know when IO should be kicked off, + * if they don't do it on a per-request basis. + * + * Note: the flag isn't the only condition drivers + * should do kick off. If drive is busy, the last + * request might not have the bit set. + */ + if (list_empty(&rq_list)) + rq->cmd_flags |= REQ_END; + + ret = q->mq_ops->queue_rq(hctx, rq); + switch (ret) { + case BLK_MQ_RQ_QUEUE_OK: + queued++; + continue; + case BLK_MQ_RQ_QUEUE_BUSY: + /* + * FIXME: we should have a mechanism to stop the queue + * like blk_stop_queue, otherwise we will waste cpu + * time + */ + list_add(&rq->queuelist, &rq_list); + blk_mq_requeue_request(rq); + break; + default: + pr_err("blk-mq: bad return on queue: %d\n", ret); + rq->errors = -EIO; + case BLK_MQ_RQ_QUEUE_ERROR: + blk_mq_end_io(rq, rq->errors); + break; + } + + if (ret == BLK_MQ_RQ_QUEUE_BUSY) + break; + } + + if (!queued) + hctx->dispatched[0]++; + else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) + hctx->dispatched[ilog2(queued) + 1]++; + + /* + * Any items that need requeuing? Stuff them into hctx->dispatch, + * that is where we will continue on next queue run. + */ + if (!list_empty(&rq_list)) { + spin_lock(&hctx->lock); + list_splice(&rq_list, &hctx->dispatch); + spin_unlock(&hctx->lock); + } +} + +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +{ + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) + return; + + if (!async) + __blk_mq_run_hw_queue(hctx); + else { + struct request_queue *q = hctx->queue; + + kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0); + } +} + +void blk_mq_run_queues(struct request_queue *q, bool async) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if ((!blk_mq_hctx_has_pending(hctx) && + list_empty_careful(&hctx->dispatch)) || + test_bit(BLK_MQ_S_STOPPED, &hctx->flags)) + continue; + + blk_mq_run_hw_queue(hctx, async); + } +} +EXPORT_SYMBOL(blk_mq_run_queues); + +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + cancel_delayed_work(&hctx->delayed_work); + set_bit(BLK_MQ_S_STOPPED, &hctx->state); +} +EXPORT_SYMBOL(blk_mq_stop_hw_queue); + +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) +{ + clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + __blk_mq_run_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_start_hw_queue); + +void blk_mq_start_stopped_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state)) + continue; + + blk_mq_start_hw_queue(hctx); + } +} +EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); + +static void blk_mq_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + __blk_mq_run_hw_queue(hctx); +} + +static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + struct blk_mq_ctx *ctx = rq->mq_ctx; + + list_add_tail(&rq->queuelist, &ctx->rq_list); + blk_mq_hctx_mark_pending(hctx, ctx); + + /* + * We do this early, to ensure we are on the right CPU. + */ + blk_mq_add_timer(rq); +} + +void blk_mq_insert_request(struct request_queue *q, struct request *rq, + bool run_queue) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + + ctx = rq->mq_ctx; + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + spin_lock(&ctx->lock); + __blk_mq_insert_request(hctx, rq); + spin_unlock(&ctx->lock); + + if (run_queue) + __blk_mq_run_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_insert_request); + +static void __blk_mq_insert_requests(struct request_queue *q, + struct blk_mq_ctx *ctx, + struct list_head *list, + bool run_queue, bool from_schedule) + +{ + struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); + + spin_lock(&ctx->lock); + while (!list_empty(list)) { + struct request *rq; + + rq = list_first_entry(list, struct request, queuelist); + list_del_init(&rq->queuelist); + __blk_mq_insert_request(hctx, rq); + } + spin_unlock(&ctx->lock); + + if (run_queue) + blk_mq_run_hw_queue(hctx, from_schedule); +} + +void blk_mq_insert_requests(struct request_queue *q, struct list_head *list, + bool run_queue, bool from_schedule) +{ + struct blk_mq_ctx *this_ctx; + LIST_HEAD(ctx_list); + + if (list_empty(list)) + return; + + /* + * Iterate list, placing requests on the right ctx. Do one ctx + * at the time. Given general CPU stickiness, the requests will + * typically end up being ordered anyway. + */ + this_ctx = NULL; + while (!list_empty(list)) { + struct request *rq, *tmp; + + /* + * If this_ctx is set and different from rq->mq_ctx, + * skip this 'rq'. This groups the same ctx's together, + * so we batch completions for those. + */ + list_for_each_entry_safe(rq, tmp, list, queuelist) { + if (rq->mq_ctx != this_ctx) { + if (this_ctx) + continue; + + this_ctx = rq->mq_ctx; + } + list_move_tail(&rq->queuelist, &ctx_list); + } + + __blk_mq_insert_requests(q, this_ctx, &ctx_list, run_queue, + from_schedule); + this_ctx = NULL; + } + + /* + * If 'this_ctx' is set, we know we have entries to complete + * on 'ctx_list'. Do those. + */ + if (this_ctx) + __blk_mq_insert_requests(q, this_ctx, &ctx_list, run_queue, + from_schedule); +} + +static void blk_mq_bio_to_request(struct request_queue *q, + struct request *rq, struct bio *bio) +{ + unsigned int rw_flags; + + rw_flags = bio_data_dir(bio); + if (rw_is_sync(bio->bi_rw)) + rw_flags |= REQ_SYNC; + + init_request_from_bio(rq, bio); + blk_account_io_start(rq, 1); +} + +static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int is_sync = rw_is_sync(bio->bi_rw); + int rw = bio_data_dir(bio); + struct request *rq; + unsigned int use_plug, request_count = 0; + + /* + * If we have multiple hardware queues, just go directly to + * one of those for sync IO. + */ + use_plug = (q->nr_hw_queues == 1) || !is_sync; + + blk_queue_bounce(q, &bio); + + if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) + return; + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + trace_block_getrq(q, bio, rw); + rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); + if (likely(rq)) + blk_mq_rq_ctx_init(ctx, rq, rw); + else { + blk_mq_put_ctx(ctx); + trace_block_sleeprq(q, bio, rw); + rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, + false); + ctx = rq->mq_ctx; + hctx = q->mq_ops->map_queue(q, ctx->cpu); + } + + hctx->queued++; + + /* + * A task plug currently exists. Since this is completely lockless, + * utilize that to temporarily store requests until the task is + * either done or scheduled away. + */ + if (use_plug) { + struct blk_plug *plug = current->plug; + + if (plug) { + blk_mq_bio_to_request(q, rq, bio); + if (list_empty(&plug->list)) + trace_block_plug(q); + else if (request_count >= BLK_MAX_REQUEST_COUNT) { + blk_flush_plug_list(plug, false); + trace_block_plug(q); + } + list_add_tail(&rq->queuelist, &plug->list); + blk_mq_put_ctx(ctx); + return; + } + } + + spin_lock(&ctx->lock); + + if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && + blk_mq_attempt_merge(q, ctx, bio)) + __blk_mq_free_request(hctx, ctx, rq); + else { + blk_mq_bio_to_request(q, rq, bio); + __blk_mq_insert_request(hctx, rq); + } + + spin_unlock(&ctx->lock); + blk_mq_put_ctx(ctx); + + /* + * For a SYNC request, send it to the hardware immediately. For an + * ASYNC request, just ensure that we run it later on. The latter + * allows for merging opportunities and more efficient dispatching. + */ + blk_mq_run_hw_queue(hctx, !is_sync); +} + +static void blk_mq_execute_end_io(struct request *rq, int error) +{ + struct completion *wait = rq->end_io_data; + + complete(wait); +} + +/* + * Insert request, pass to device, and wait for it to finish. + */ +int blk_mq_execute_rq(struct request_queue *q, struct request *rq) +{ + DECLARE_COMPLETION_ONSTACK(wait); + unsigned long hang_check; + int err = 0; + + rq->end_io_data = &wait; + rq->end_io = blk_mq_execute_end_io; + blk_mq_insert_request(q, rq, true); + + /* Prevent hang_check timer from firing at us during very long I/O */ + hang_check = sysctl_hung_task_timeout_secs; + if (hang_check) + while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); + else + wait_for_completion_io(&wait); + + if (rq->errors) + err = -EIO; + + blk_mq_finish_request(rq, rq->errors); + + return err; +} +EXPORT_SYMBOL(blk_mq_execute_rq); + +/* + * Default mapping to a software queue, since we use one per CPU. + */ +struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} +EXPORT_SYMBOL(blk_mq_map_queue); + +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg, + unsigned int hctx_index) +{ + return kmalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL | __GFP_ZERO, reg->numa_node); +} +EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); + +void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, + unsigned int hctx_index) +{ + kfree(hctx); +} +EXPORT_SYMBOL(blk_mq_free_single_hw_queue); + +static void blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) +{ + struct blk_mq_hw_ctx *hctx = data; + struct blk_mq_ctx *ctx; + LIST_HEAD(tmp); + + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) + return; + + /* + * Move ctx entries to new CPU, if this one is going away. + */ + ctx = __blk_mq_get_ctx(hctx->queue, cpu); + + spin_lock(&ctx->lock); + if (!list_empty(&ctx->rq_list)) { + list_splice_init(&ctx->rq_list, &tmp); + clear_bit(ctx->index_hw, hctx->ctx_map); + } + spin_unlock(&ctx->lock); + + if (list_empty(&tmp)) + return; + + ctx = blk_mq_get_ctx(hctx->queue); + spin_lock(&ctx->lock); + + while (!list_empty(&tmp)) { + struct request *rq; + + rq = list_first_entry(&tmp, struct request, queuelist); + rq->mq_ctx = ctx; + list_move_tail(&rq->queuelist, &ctx->rq_list); + } + + blk_mq_hctx_mark_pending(hctx, ctx); + + spin_unlock(&ctx->lock); + blk_mq_put_ctx(ctx); +} + +static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, + void (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + unsigned int i; + + for (i = 0; i < hctx->queue_depth; i++) { + struct request *rq = hctx->rqs[i]; + + init(data, hctx, rq, i); + } +} + +void blk_mq_init_commands(struct request_queue *q, + void (*init)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int), + void *data) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_init_hw_commands(hctx, init, data); +} +EXPORT_SYMBOL(blk_mq_init_commands); + +static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) +{ + struct page *page; + + while (!list_empty(&hctx->page_list)) { + page = list_first_entry(&hctx->page_list, struct page, list); + list_del_init(&page->list); + __free_pages(page, page->private); + } + + kfree(hctx->rqs); + + if (hctx->tags) + blk_mq_free_tags(hctx->tags); +} + +static size_t order_to_size(unsigned int order) +{ + size_t ret = PAGE_SIZE; + + while (order--) + ret *= 2; + + return ret; +} + +static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, + unsigned int reserved_tags, int node) +{ + unsigned int i, j, entries_per_page, max_order = 4; + size_t rq_size, left; + + INIT_LIST_HEAD(&hctx->page_list); + + hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *), + GFP_KERNEL, node); + if (!hctx->rqs) + return -ENOMEM; + + /* + * rq_size is the size of the request plus driver payload, rounded + * to the cacheline size + */ + rq_size = round_up(sizeof(struct request) + hctx->rq_pdu, + cache_line_size()); + left = rq_size * hctx->queue_depth; + + for (i = 0; i < hctx->queue_depth;) { + int this_order = max_order; + struct page *page; + int to_do; + void *p; + + while (left < order_to_size(this_order - 1) && this_order) + this_order--; + + do { + page = alloc_pages_node(node, GFP_KERNEL, this_order); + if (page) + break; + if (!this_order--) + break; + if (order_to_size(this_order) < rq_size) + break; + } while (1); + + if (!page) + break; + + page->private = this_order; + list_add_tail(&page->list, &hctx->page_list); + + p = page_address(page); + entries_per_page = order_to_size(this_order) / rq_size; + to_do = min(entries_per_page, hctx->queue_depth - i); + left -= to_do * rq_size; + for (j = 0; j < to_do; j++) { + hctx->rqs[i] = p; + blk_mq_rq_init(hctx, hctx->rqs[i]); + p += rq_size; + i++; + } + } + + if (!i) + return -ENOMEM; + else if (i != hctx->queue_depth) { + hctx->queue_depth = i; + pr_warn("%s: queue depth set to %u because of low memory\n", + __func__, i); + } + + hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); + if (!hctx->tags) { + blk_mq_free_rq_map(hctx); + return -ENOMEM; + } + + return 0; +} + +static int blk_mq_init_hw_queues(struct request_queue *q, + struct blk_mq_reg *reg, void *driver_data) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i, j; + + /* + * Initialize hardware queues + */ + queue_for_each_hw_ctx(q, hctx, i) { + unsigned int num_maps; + int node; + + node = hctx->numa_node; + if (node == NUMA_NO_NODE) + node = hctx->numa_node = reg->numa_node; + + INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); + spin_lock_init(&hctx->lock); + INIT_LIST_HEAD(&hctx->dispatch); + hctx->queue = q; + hctx->queue_num = i; + hctx->flags = reg->flags; + hctx->queue_depth = reg->queue_depth; + hctx->rq_pdu = reg->rq_pdu; + + blk_mq_init_cpu_notifier(&hctx->cpu_notifier, + blk_mq_hctx_notify, hctx); + blk_mq_register_cpu_notifier(&hctx->cpu_notifier); + + if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) + break; + + hctx->ctxs = kmalloc_node(hctx->nr_ctx * sizeof(void *), + GFP_KERNEL, node); + if (!hctx->ctxs) + break; + + num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; + hctx->ctx_map = kmalloc_node(num_maps * sizeof(unsigned long), + GFP_KERNEL, node); + if (!hctx->ctx_map) + break; + + hctx->nr_ctx_map = num_maps; + hctx->nr_ctx = 0; + + if (reg->ops->init_hctx && + reg->ops->init_hctx(hctx, driver_data, i)) + break; + } + + if (i == q->nr_hw_queues) + return 0; + + /* + * Init failed + */ + queue_for_each_hw_ctx(q, hctx, j) { + if (i == j) + break; + + if (reg->ops->exit_hctx) + reg->ops->exit_hctx(hctx, j); + + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + blk_mq_free_rq_map(hctx); + kfree(hctx->ctxs); + } + + return 1; +} + +static void blk_mq_init_cpu_queues(struct request_queue *q, + unsigned int nr_hw_queues) +{ + unsigned int i; + + for_each_possible_cpu(i) { + struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); + struct blk_mq_hw_ctx *hctx; + + memset(__ctx, 0, sizeof(*__ctx)); + __ctx->cpu = i; + spin_lock_init(&__ctx->lock); + INIT_LIST_HEAD(&__ctx->rq_list); + __ctx->queue = q; + + if (!cpu_online(i)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + hctx->nr_ctx++; + + /* + * Set local node, IFF we have more than one hw queue. If + * not, we remain on the home node of the device + */ + if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) + hctx->numa_node = cpu_to_node(i); + } +} + +struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, + void *driver_data) +{ + struct blk_mq_hw_ctx **hctxs; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct request_queue *q; + int i; + + if (!reg->nr_hw_queues || !reg->ops->queue_rq || + !reg->ops->map_queue || !reg->ops->alloc_hctx || + !reg->ops->free_hctx) + return ERR_PTR(-EINVAL); + + if (!reg->queue_depth) + reg->queue_depth = BLK_MQ_MAX_DEPTH; + else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) { + pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth); + reg->queue_depth = BLK_MQ_MAX_DEPTH; + } + + ctx = alloc_percpu(struct blk_mq_ctx); + if (!ctx) + return ERR_PTR(-ENOMEM); + + hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, + reg->numa_node); + + if (!hctxs) + goto err_percpu; + + for (i = 0; i < reg->nr_hw_queues; i++) { + hctxs[i] = reg->ops->alloc_hctx(reg, i); + if (!hctxs[i]) + goto err_hctxs; + + hctxs[i]->numa_node = NUMA_NO_NODE; + hctxs[i]->queue_num = i; + } + + q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node); + if (!q) + goto err_hctxs; + + q->mq_map = blk_mq_make_queue_map(reg); + if (!q->mq_map) + goto err_map; + + setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); + blk_queue_rq_timeout(q, 30000); + + q->nr_queues = nr_cpu_ids; + q->nr_hw_queues = reg->nr_hw_queues; + + q->queue_ctx = ctx; + q->queue_hw_ctx = hctxs; + + q->mq_ops = reg->ops; + + blk_queue_make_request(q, blk_mq_make_request); + blk_queue_rq_timed_out(q, reg->ops->timeout); + if (reg->timeout) + blk_queue_rq_timeout(q, reg->timeout); + + blk_mq_init_cpu_queues(q, reg->nr_hw_queues); + + if (blk_mq_init_hw_queues(q, reg, driver_data)) + goto err_hw; + + /* + * Map software to hardware queues + */ + queue_for_each_ctx(q, ctx, i) { + if (!cpu_online(i)) { + ctx->index_hw = -1; + continue; + } + + hctx = q->mq_ops->map_queue(q, i); + ctx->index_hw = hctx->nr_ctx; + hctx->ctxs[hctx->nr_ctx++] = ctx; + } + + return q; +err_hw: + kfree(q->mq_map); +err_map: + blk_cleanup_queue(q); +err_hctxs: + for (i = 0; i < reg->nr_hw_queues; i++) { + if (!hctxs[i]) + break; + reg->ops->free_hctx(hctxs[i], i); + } + kfree(hctxs); +err_percpu: + free_percpu(ctx); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(blk_mq_init_queue); + +void blk_mq_free_queue(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->delayed_work); + kfree(hctx->ctx_map); + kfree(hctx->ctxs); + blk_mq_free_rq_map(hctx); + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + q->mq_ops->free_hctx(hctx, i); + } + + free_percpu(q->queue_ctx); + kfree(q->queue_hw_ctx); + kfree(q->mq_map); + + q->queue_ctx = NULL; + q->queue_hw_ctx = NULL; + q->mq_map = NULL; +} +EXPORT_SYMBOL(blk_mq_free_queue); + +static int __init blk_mq_init(void) +{ + unsigned int i; + + for_each_possible_cpu(i) + init_llist_head(&per_cpu(ipi_lists, i)); + + blk_mq_cpu_init(); + return 0; +} +subsys_initcall(blk_mq_init); diff --git a/block/blk-mq.h b/block/blk-mq.h new file mode 100644 index 0000000..748ad06 --- /dev/null +++ b/block/blk-mq.h @@ -0,0 +1,48 @@ +#ifndef INT_BLK_MQ_H +#define INT_BLK_MQ_H + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + unsigned int ipi_redirect; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +}; + +void __blk_mq_end_io(struct request *rq, int error); + +/* + * CPU hotplug helpers + */ +struct blk_mq_cpu_notifier; +void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, + void (*fn)(void *, unsigned long, unsigned int), + void *data); +void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); +void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); +void blk_mq_cpu_init(void); +DECLARE_PER_CPU(struct llist_head, ipi_lists); + +/* + * CPU -> queue mappings + */ +struct blk_mq_reg; +extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); + +void blk_mq_add_timer(struct request *rq); + +#endif diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 5efc5a6..6ecf8d3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "blk.h" #include "blk-cgroup.h" @@ -542,6 +543,9 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); + if (q->mq_ops) + blk_mq_free_queue(q); + blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); @@ -588,6 +592,9 @@ int blk_register_queue(struct gendisk *disk) kobject_uevent(&q->kobj, KOBJ_ADD); + if (q->mq_ops) + blk_mq_register_disk(disk); + if (!q->request_fn) return 0; @@ -610,6 +617,9 @@ void blk_unregister_queue(struct gendisk *disk) if (WARN_ON(!q)) return; + if (q->mq_ops) + blk_mq_unregister_disk(disk); + if (q->request_fn) elv_unregister_queue(q); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 6e4744c..2eda25a 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -7,6 +7,7 @@ #include #include "blk.h" +#include "blk-mq.h" #ifdef CONFIG_FAIL_IO_TIMEOUT @@ -87,11 +88,18 @@ static void blk_rq_timed_out(struct request *req) ret = q->rq_timed_out_fn(req); switch (ret) { case BLK_EH_HANDLED: - __blk_complete_request(req); + /* Can we use req->errors here? */ + if (q->mq_ops) + __blk_mq_end_io(req, req->errors); + else + __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: blk_clear_rq_complete(req); - blk_add_timer(req); + if (q->mq_ops) + blk_mq_add_timer(req); + else + blk_add_timer(req); break; case BLK_EH_NOT_HANDLED: /* @@ -107,6 +115,23 @@ static void blk_rq_timed_out(struct request *req) } } +void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, + unsigned int *next_set) +{ + if (time_after_eq(jiffies, rq->deadline)) { + list_del_init(&rq->timeout_list); + + /* + * Check if we raced with end io completion + */ + if (!blk_mark_rq_complete(rq)) + blk_rq_timed_out(rq); + } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { + *next_timeout = rq->deadline; + *next_set = 1; + } +} + void blk_rq_timed_out_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; @@ -116,21 +141,8 @@ void blk_rq_timed_out_timer(unsigned long data) spin_lock_irqsave(q->queue_lock, flags); - list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { - if (time_after_eq(jiffies, rq->deadline)) { - list_del_init(&rq->timeout_list); - - /* - * Check if we raced with end io completion - */ - if (blk_mark_rq_complete(rq)) - continue; - blk_rq_timed_out(rq); - } else if (!next_set || time_after(next, rq->deadline)) { - next = rq->deadline; - next_set = 1; - } - } + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) + blk_rq_check_expired(rq, &next, &next_set); if (next_set) mod_timer(&q->timeout, round_jiffies_up(next)); @@ -156,15 +168,7 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); -/** - * blk_add_timer - Start timeout timer for a single request - * @req: request that is about to start running. - * - * Notes: - * Each request has its own timer, and as it is added to the queue, we - * set up the timer. When the request completes, we cancel the timer. - */ -void blk_add_timer(struct request *req) +void __blk_add_timer(struct request *req, struct list_head *timeout_list) { struct request_queue *q = req->q; unsigned long expiry; @@ -183,7 +187,8 @@ void blk_add_timer(struct request *req) req->timeout = q->rq_timeout; req->deadline = jiffies + req->timeout; - list_add_tail(&req->timeout_list, &q->timeout_list); + if (timeout_list) + list_add_tail(&req->timeout_list, timeout_list); /* * If the timer isn't already pending or this timeout is earlier @@ -195,5 +200,19 @@ void blk_add_timer(struct request *req) if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) mod_timer(&q->timeout, expiry); + +} + +/** + * blk_add_timer - Start timeout timer for a single request + * @req: request that is about to start running. + * + * Notes: + * Each request has its own timer, and as it is added to the queue, we + * set up the timer. When the request completes, we cancel the timer. + */ +void blk_add_timer(struct request *req) +{ + __blk_add_timer(req, &req->q->timeout_list); } diff --git a/block/blk.h b/block/blk.h index e837b8f..c90e1d8 100644 --- a/block/blk.h +++ b/block/blk.h @@ -10,6 +10,7 @@ #define BLK_BATCH_REQ 32 extern struct kmem_cache *blk_requestq_cachep; +extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; @@ -34,14 +35,30 @@ bool __blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes); void blk_rq_timed_out_timer(unsigned long data); +void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, + unsigned int *next_set); +void __blk_add_timer(struct request *req, struct list_head *timeout_list); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); + +bool bio_attempt_front_merge(struct request_queue *q, struct request *req, + struct bio *bio); +bool bio_attempt_back_merge(struct request_queue *q, struct request *req, + struct bio *bio); +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int *request_count); + +void blk_account_io_start(struct request *req, bool new_io); +void blk_account_io_completion(struct request *req, unsigned int bytes); +void blk_account_io_done(struct request *req); + /* * Internal atomic flags for request handling */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, + REQ_ATOM_STARTED, }; /* diff --git a/include/linux/bio.h b/include/linux/bio.h index 820e7aa..bcd785b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -435,6 +435,8 @@ static inline void bio_list_init(struct bio_list *bl) bl->head = bl->tail = NULL; } +#define BIO_EMPTY_LIST { NULL, NULL } + #define bio_list_for_each(bio, bl) \ for (bio = (bl)->head; bio; bio = bio->bi_next) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h new file mode 100644 index 0000000..49f7ad1 --- /dev/null +++ b/include/linux/blk-mq.h @@ -0,0 +1,183 @@ +#ifndef BLK_MQ_H +#define BLK_MQ_H + +#include + +struct blk_mq_tags; + +struct blk_mq_cpu_notifier { + struct list_head list; + void *data; + void (*notify)(void *data, unsigned long action, unsigned int cpu); +}; + +struct blk_mq_hw_ctx { + struct { + spinlock_t lock; + struct list_head dispatch; + } ____cacheline_aligned_in_smp; + + unsigned long state; /* BLK_MQ_S_* flags */ + struct delayed_work delayed_work; + + unsigned long flags; /* BLK_MQ_F_* flags */ + + struct request_queue *queue; + unsigned int queue_num; + + void *driver_data; + + unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + unsigned int nr_ctx_map; + unsigned long *ctx_map; + + struct request **rqs; + struct list_head page_list; + struct blk_mq_tags *tags; + + unsigned long queued; + unsigned long run; +#define BLK_MQ_MAX_DISPATCH_ORDER 10 + unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; + + unsigned int queue_depth; + unsigned int numa_node; + unsigned int rq_pdu; /* per-request extra data */ + + struct blk_mq_cpu_notifier cpu_notifier; + struct kobject kobj; +}; + +struct blk_mq_reg { + struct blk_mq_ops *ops; + unsigned int nr_hw_queues; + unsigned int queue_depth; + unsigned int reserved_tags; + unsigned int rq_pdu; /* per-request extra data */ + int numa_node; + unsigned int timeout; + unsigned int flags; /* BLK_MQ_F_* */ +}; + +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); +typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); +typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); +typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); +typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); + +struct blk_mq_ops { + /* + * Queue request + */ + queue_rq_fn *queue_rq; + + /* + * Map to specific hardware queue + */ + map_queue_fn *map_queue; + + /* + * Called on request timeout + */ + rq_timed_out_fn *timeout; + + /* + * Override for hctx allocations (should probably go) + */ + alloc_hctx_fn *alloc_hctx; + free_hctx_fn *free_hctx; + + /* + * Called when the block layer side of a hardware queue has been + * set up, allowing the driver to allocate/init matching structures. + * Ditto for exit/teardown. + */ + init_hctx_fn *init_hctx; + exit_hctx_fn *exit_hctx; +}; + +enum { + BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ + BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ + BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ + + BLK_MQ_F_SHOULD_MERGE = 1 << 0, + BLK_MQ_F_SHOULD_SORT = 1 << 1, + BLK_MQ_F_SHOULD_IPI = 1 << 2, + + BLK_MQ_S_STOPPED = 1 << 0, + + BLK_MQ_MAX_DEPTH = 2048, +}; + +struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); +void blk_mq_free_queue(struct request_queue *); +int blk_mq_register_disk(struct gendisk *); +void blk_mq_unregister_disk(struct gendisk *); +void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); + +void blk_mq_flush_plug(struct request_queue *, bool); +void blk_mq_insert_request(struct request_queue *, struct request *, bool); +void blk_mq_insert_requests(struct request_queue *, struct list_head *, bool, bool); +int blk_mq_execute_rq(struct request_queue *, struct request *); +void blk_mq_run_queues(struct request_queue *q, bool async); +void blk_mq_free_request(struct request *rq); +bool blk_mq_can_queue(struct blk_mq_hw_ctx *); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); +struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); +struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); + +struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); +void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); + +void blk_mq_end_io(struct request *rq, int error); + +void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); +void blk_mq_start_stopped_hw_queues(struct request_queue *q); + +/* + * Driver command data is immediately after the request. So subtract request + * size to get back to the original request. + */ +static inline struct request *blk_mq_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct request); +} +static inline void *blk_mq_rq_to_pdu(struct request *rq) +{ + return (void *) rq + sizeof(*rq); +} + +static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, + unsigned int tag) +{ + return hctx->rqs[tag]; +} + +#define queue_for_each_hw_ctx(q, hctx, i) \ + for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \ + (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i]) + +#define queue_for_each_ctx(q, ctx, i) \ + for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \ + (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i))) + +#define hctx_for_each_ctx(hctx, ctx, i) \ + for ((i) = 0, ctx = (hctx)->ctxs[0]; \ + (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)]) + +#define blk_ctx_sum(q, sum) \ +({ \ + struct blk_mq_ctx *__x; \ + unsigned int __ret = 0, __i; \ + \ + queue_for_each_ctx((q), __x, __i) \ + __ret += sum; \ + __ret; \ +}) + +#endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dca43e5..0b241fc 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -175,6 +175,7 @@ enum rq_flag_bits { __REQ_IO_STAT, /* account I/O stat */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_KERNEL, /* direct IO to kernel pages */ + __REQ_END, /* last of chain of requests */ __REQ_NR_BITS, /* stops here */ }; @@ -223,5 +224,6 @@ enum rq_flag_bits { #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) #define REQ_SECURE (1ULL << __REQ_SECURE) #define REQ_KERNEL (1ULL << __REQ_KERNEL) +#define REQ_END (1ULL << __REQ_END) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 50ec02c..04af125 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -94,10 +95,14 @@ enum rq_cmd_type_bits { * as well! */ struct request { - struct list_head queuelist; + union { + struct list_head queuelist; + struct llist_node ll_list; + }; struct call_single_data csd; struct request_queue *q; + struct blk_mq_ctx *mq_ctx; u64 cmd_flags; enum rq_cmd_type_bits cmd_type; @@ -215,6 +220,8 @@ struct request_pm_state #include +struct blk_queue_ctx; + typedef void (request_fn_proc) (struct request_queue *q); typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); @@ -313,6 +320,18 @@ struct request_queue { dma_drain_needed_fn *dma_drain_needed; lld_busy_fn *lld_busy_fn; + struct blk_mq_ops *mq_ops; + + unsigned int *mq_map; + + /* sw queues */ + struct blk_mq_ctx *queue_ctx; + unsigned int nr_queues; + + /* hw dispatch queues */ + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + /* * Dispatch queue sorting */ @@ -362,6 +381,11 @@ struct request_queue { struct kobject kobj; /* + * mq queue kobject + */ + struct kobject mq_kobj; + + /* * queue settings */ unsigned long nr_requests; /* Max # of requests */ @@ -1287,6 +1311,7 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); +int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* -- 1.7.4.4