dm-crypt: retain write ordering This patch implements reordering outgoing write requests. The requests are submitted in the same order they were received in. Signed-off-by: Mikulas Patocka --- drivers/md/dm-crypt.c | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) Index: linux-3.2-fast/drivers/md/dm-crypt.c =================================================================== --- linux-3.2-fast.orig/drivers/md/dm-crypt.c 2012-02-10 09:08:44.000000000 +0100 +++ linux-3.2-fast/drivers/md/dm-crypt.c 2012-02-10 09:08:52.000000000 +0100 @@ -49,6 +49,8 @@ struct dm_crypt_io { sector_t sector; struct list_head list; + + u64 sequence; }; struct dm_crypt_request { @@ -123,6 +125,9 @@ struct crypt_config { wait_queue_head_t write_thread_wait; struct list_head write_thread_list; + u64 write_sequence; + atomic64_t alloc_sequence; + char *cipher; char *cipher_string; @@ -1095,6 +1100,7 @@ static int dmcrypt_write(void *data) struct crypt_config *cc = data; while (1) { struct list_head local_list; + unsigned spinlock_breaker; struct blk_plug plug; DECLARE_WAITQUEUE(wait, current); @@ -1124,20 +1130,35 @@ continue_locked: goto continue_locked; pop_from_list: - local_list = cc->write_thread_list; - local_list.next->prev = &local_list; - local_list.prev->next = &local_list; - INIT_LIST_HEAD(&cc->write_thread_list); + INIT_LIST_HEAD(&local_list); + spinlock_breaker = 0; + do { + struct dm_crypt_io *io = container_of( + cc->write_thread_list.next, + struct dm_crypt_io, list); + + BUG_ON(io->sequence < cc->write_sequence); + if (io->sequence != cc->write_sequence) + break; + cc->write_sequence++; + + list_del(&io->list); + list_add_tail(&io->list, &local_list); + if (unlikely(!(++spinlock_breaker & 63))) { + spin_unlock_irq(&cc->write_thread_wait.lock); + spin_lock_irq(&cc->write_thread_wait.lock); + } + } while (!list_empty(&cc->write_thread_list)); spin_unlock_irq(&cc->write_thread_wait.lock); blk_start_plug(&plug); - do { + while (!list_empty(&local_list)) { struct dm_crypt_io *io = container_of(local_list.next, struct dm_crypt_io, list); list_del(&io->list); kcryptd_io_write(io); - } while (!list_empty(&local_list)); + } blk_finish_plug(&plug); } return 0; @@ -1147,10 +1168,18 @@ static void kcryptd_crypt_write_io_submi { struct crypt_config *cc = io->cc; unsigned long flags; + struct dm_crypt_io *io_list; spin_lock_irqsave(&cc->write_thread_wait.lock, flags); - list_add_tail(&io->list, &cc->write_thread_list); + list_for_each_entry_reverse(io_list, &cc->write_thread_list, list) { + if (io_list->sequence < io->sequence) { + list_add(&io->list, &io_list->list); + goto added; + } + } + list_add(&io->list, &cc->write_thread_list); wake_up_locked(&cc->write_thread_wait); +added: spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); } @@ -1165,6 +1194,8 @@ static void kcryptd_crypt_write_convert( return; } + io->sequence = atomic64_inc_return(&io->cc->alloc_sequence) - 1; + crypt_convert(io, clone); } @@ -1718,6 +1749,8 @@ static int crypt_ctr(struct dm_target *t init_waitqueue_head(&cc->write_thread_wait); INIT_LIST_HEAD(&cc->write_thread_list); + cc->write_sequence = 0; + atomic64_set(&cc->alloc_sequence, 0); cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); if (IS_ERR(cc->write_thread)) {