dm kcopyd: call copy offload with asynchronous callback Change dm kcopyd so that it calls blkdev_issue_copy with an asynchronous callback. There can be large number of pending kcopyd requests and holding a process context for each of them may put too much load on the workqueue subsystem. This patch changes it so that blkdev_issue_copy returns after it submitted the requests and copy_offload_callback is called when the copy operation finishes. Signed-off-by: Mikulas Patocka --- drivers/md/dm-kcopyd.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) Index: linux-4.11-rc2/drivers/md/dm-kcopyd.c =================================================================== --- linux-4.11-rc2.orig/drivers/md/dm-kcopyd.c +++ linux-4.11-rc2/drivers/md/dm-kcopyd.c @@ -361,8 +361,6 @@ struct kcopyd_job { sector_t progress; struct kcopyd_job *master_job; - - struct work_struct copy_work; }; static struct kmem_cache *_job_cache; @@ -629,8 +627,9 @@ static void segment_complete(int read_er struct kcopyd_job *sub_job = (struct kcopyd_job *) context; struct kcopyd_job *job = sub_job->master_job; struct dm_kcopyd_client *kc = job->kc; + unsigned long flags; - spin_lock(&job->lock); + spin_lock_irqsave(&job->lock, flags); /* update the error */ if (read_err) @@ -654,7 +653,7 @@ static void segment_complete(int read_er job->progress += count; } } - spin_unlock(&job->lock); + spin_unlock_irqrestore(&job->lock, flags); if (count) { int i; @@ -715,28 +714,25 @@ static void submit_job(struct kcopyd_job } } -static void copy_offload_work(struct work_struct *work) +static void copy_offload_callback(void *ptr, int error) { - struct kcopyd_job *job = container_of(work, struct kcopyd_job, copy_work); - sector_t copied; + struct kcopyd_job *job = ptr; - blkdev_issue_copy(job->source.bdev, job->source.sector, - job->dests[0].bdev, job->dests[0].sector, - job->source.count, &copied, NULL, NULL, - GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - - job->source.sector += copied; - job->source.count -= copied; - job->dests[0].sector += copied; - job->dests[0].count -= copied; + job->source.sector += job->progress; + job->source.count -= job->progress; + job->dests[0].sector += job->progress; + job->dests[0].count -= job->progress; submit_job(job); } static void try_copy_offload(struct kcopyd_job *job) { - INIT_WORK(&job->copy_work, copy_offload_work); - queue_work(job->kc->kcopyd_wq, &job->copy_work); + blkdev_issue_copy(job->source.bdev, job->source.sector, + job->dests[0].bdev, job->dests[0].sector, + job->source.count, &job->progress, + copy_offload_callback, job, + GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); } int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,