--- drivers/md/dm-crypt.c | 107 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 41 deletions(-) Index: linux-3.2-fast/drivers/md/dm-crypt.c =================================================================== --- linux-3.2-fast.orig/drivers/md/dm-crypt.c 2012-01-31 19:26:27.000000000 +0100 +++ linux-3.2-fast/drivers/md/dm-crypt.c 2012-01-31 20:58:08.000000000 +0100 @@ -96,6 +96,12 @@ struct iv_lmk_private { */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; +struct dm_crypt_thread { + struct crypt_config *cc; + unsigned number; + struct task_struct *thread; +}; + /* * The fields in here must be read only after initialization. */ @@ -114,10 +120,14 @@ struct crypt_config { struct workqueue_struct *io_queue; unsigned crypt_threads_size; - struct task_struct **crypt_threads; + struct dm_crypt_thread *crypt_threads; wait_queue_head_t crypt_thread_wait; struct list_head crypt_thread_list; + unsigned long crypt_thread_list_number; + + wait_queue_head_t crypt_thread_parking; + unsigned crypt_threads_active; struct task_struct *write_thread; wait_queue_head_t write_thread_wait; @@ -648,7 +658,8 @@ static void kcryptd_async_done(struct cr static int dmcrypt_thread(void *data) { - struct crypt_config *cc = data; + struct dm_crypt_thread *ct = data; + struct crypt_config *cc = ct->cc; while (1) { struct dm_crypt_request *dmreqs[DMREQ_PULL_BATCH]; unsigned n_dmreqs; @@ -659,6 +670,24 @@ static int dmcrypt_thread(void *data) spin_lock_irq(&cc->crypt_thread_wait.lock); continue_locked: + if (unlikely(ct->number >= cc->crypt_threads_active)) { + __set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&cc->crypt_thread_parking, &wait); + spin_unlock_irq(&cc->crypt_thread_wait.lock); + + if (kthread_should_stop()) { + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cc->crypt_thread_parking, &wait); + break; + } + + schedule(); + + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cc->crypt_thread_parking, &wait); + continue; + } + if (!list_empty(&cc->crypt_thread_list)) goto pop_from_list; @@ -688,6 +717,7 @@ pop_from_list: struct dm_crypt_request, list); list_del(&dmreq->list); dmreqs[n_dmreqs++] = dmreq; + cc->crypt_thread_list_number--; } while (n_dmreqs < DMREQ_PULL_BATCH && !list_empty(&cc->crypt_thread_list)); @@ -743,14 +773,25 @@ static struct ablkcipher_request *crypt_ return req; } +static unsigned long maxnum = 0; + static void crypt_flush_batch(struct crypt_config *cc, struct list_head *batch) { + unsigned long entries = 0; + struct list_head *pos; + + list_for_each(pos, batch) + entries++; + spin_lock_irq(&cc->crypt_thread_wait.lock); list_splice_tail(batch, &cc->crypt_thread_list); + cc->crypt_thread_list_number += entries; + if (unlikely(cc->crypt_thread_list_number > maxnum)) { + maxnum = cc->crypt_thread_list_number; + } wake_up_locked(&cc->crypt_thread_wait); spin_unlock_irq(&cc->crypt_thread_wait.lock); INIT_LIST_HEAD(batch); - } static void crypt_end_io(struct dm_crypt_io *io); @@ -1352,8 +1393,8 @@ static void crypt_dtr(struct dm_target * if (cc->crypt_threads) { int i; for (i = 0; i < cc->crypt_threads_size; i++) { - if (cc->crypt_threads[i]) - kthread_stop(cc->crypt_threads[i]); + if (cc->crypt_threads[i].thread) + kthread_stop(cc->crypt_threads[i].thread); } kfree(cc->crypt_threads); } @@ -1662,53 +1703,36 @@ static int crypt_ctr(struct dm_target *t goto bad; } - if (num_threads == num_online_cpus()) { - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - cc->crypt_threads_size = i + 1; - } else { - if (num_threads > INT_MAX / sizeof(struct task_struct *)) - num_threads = INT_MAX / sizeof(struct task_struct *); - cc->crypt_threads_size = num_threads; - } + if (num_threads > INT_MAX / sizeof(struct dm_crypt_thread)) + num_threads = INT_MAX / sizeof(struct dm_crypt_thread); + cc->crypt_threads_size = num_threads; init_waitqueue_head(&cc->crypt_thread_wait); INIT_LIST_HEAD(&cc->crypt_thread_list); + cc->crypt_thread_list_number = 0; + + init_waitqueue_head(&cc->crypt_thread_parking); + cc->crypt_threads_active = 3; cc->crypt_threads = kzalloc(cc->crypt_threads_size * - sizeof(struct task_struct *), GFP_KERNEL); + sizeof(struct dm_crypt_thread), GFP_KERNEL); if (!cc->crypt_threads) { ti->error = "Couldn't allocate crypt threads"; goto bad; } - if (num_threads == num_online_cpus()) - for (i = 0; i < cc->crypt_threads_size; i++) { - if (cpu_online(i)) { - cc->crypt_threads[i] = kthread_create_on_node( - dmcrypt_thread, cc, cpu_to_node(i), - "dmcryptd/%d", i); - if (IS_ERR(cc->crypt_threads[i])) { - ret = PTR_ERR(cc->crypt_threads[i]); - cc->crypt_threads[i] = NULL; - ti->error = "Couldn't spawn thread"; - goto bad; - } - kthread_bind(cc->crypt_threads[i], i); - wake_up_process(cc->crypt_threads[i]); - } - } else { - for (i = 0; i < cc->crypt_threads_size; i++) { - cc->crypt_threads[i] = kthread_create( - dmcrypt_thread, cc, "dmcryptd/%d", i); - if (IS_ERR(cc->crypt_threads[i])) { - ret = PTR_ERR(cc->crypt_threads[i]); - cc->crypt_threads[i] = NULL; - ti->error = "Couldn't spawn thread"; - goto bad; - } - wake_up_process(cc->crypt_threads[i]); + for (i = 0; i < cc->crypt_threads_size; i++) { + cc->crypt_threads[i].cc = cc; + cc->crypt_threads[i].number = i; + cc->crypt_threads[i].thread = kthread_create(dmcrypt_thread, + &cc->crypt_threads[i], "dmcryptd/%d", i); + if (IS_ERR(cc->crypt_threads[i].thread)) { + ret = PTR_ERR(cc->crypt_threads[i].thread); + cc->crypt_threads[i].thread = NULL; + ti->error = "Couldn't spawn thread"; + goto bad; } + wake_up_process(cc->crypt_threads[i].thread); } init_waitqueue_head(&cc->write_thread_wait); @@ -1923,6 +1947,7 @@ static int __init dm_crypt_init(void) static void __exit dm_crypt_exit(void) { + printk("maxnum was %lu\n", maxnum); dm_unregister_target(&crypt_target); kmem_cache_destroy(_crypt_io_pool); }