dm-crypt: simplify cc_pending This patch removes one extra cc_pending reference from crypt_convert. Now, cc_pending represents real number of pending operations. Signed-off-by: Mikulas Patocka --- drivers/md/dm-crypt.c | 61 +++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 32 deletions(-) Index: linux-3.2-fast/drivers/md/dm-crypt.c =================================================================== --- linux-3.2-fast.orig/drivers/md/dm-crypt.c 2012-02-18 17:11:30.000000000 +0100 +++ linux-3.2-fast/drivers/md/dm-crypt.c 2012-02-18 17:11:33.000000000 +0100 @@ -817,6 +817,20 @@ static void crypt_flush_batch(struct cry } +static void crypt_dec_pending(struct dm_crypt_io *io); +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async); + +static void crypt_dec_cc_pending(struct dm_crypt_io *io) +{ + if (!atomic_dec_and_test(&io->ctx.cc_pending)) + return; + + if (bio_data_dir(io->base_bio) == READ) + crypt_dec_pending(io); + else + kcryptd_crypt_write_io_submit(io, 1); +} + /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ @@ -829,9 +843,7 @@ static int crypt_convert(struct crypt_co atomic_set(&ctx->cc_pending, 1); - while(ctx->idx_in < ctx->bio_in->bi_vcnt && - ctx->idx_out < ctx->bio_out->bi_vcnt) { - + while (1) { struct ablkcipher_request *req = crypt_alloc_req(cc, ctx, GFP_NOWAIT); if (!req) { /* @@ -843,25 +855,30 @@ static int crypt_convert(struct crypt_co req = crypt_alloc_req(cc, ctx, GFP_NOIO); } - atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, req, &batch); if (unlikely(r < 0)) { - atomic_dec(&ctx->cc_pending); - goto flush_ret; + crypt_flush_batch(cc, &batch); + crypt_dec_cc_pending(container_of(ctx, struct dm_crypt_io, ctx)); + goto ret; } ctx->sector++; - if (unlikely(++batch_count >= DMREQ_PUSH_BATCH)) { - batch_count = 0; - crypt_flush_batch(cc, &batch); + if (ctx->idx_in < ctx->bio_in->bi_vcnt && + ctx->idx_out < ctx->bio_out->bi_vcnt) { + atomic_inc(&ctx->cc_pending); + if (unlikely(++batch_count >= DMREQ_PUSH_BATCH)) { + batch_count = 0; + crypt_flush_batch(cc, &batch); + } + continue; } + break; } r = 0; -flush_ret: crypt_flush_batch(cc, &batch); +ret: return r; } @@ -1124,7 +1141,6 @@ static void kcryptd_crypt_write_convert( { struct crypt_config *cc = io->cc; struct bio *clone; - int crypt_finished; unsigned remaining = io->base_bio->bi_size; sector_t sector = io->sector; int r; @@ -1151,21 +1167,11 @@ static void kcryptd_crypt_write_convert( r = crypt_convert(cc, &io->ctx); if (r) io->error = -EIO; - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); - - /* Encryption was already finished, submit io now */ - if (crypt_finished) - kcryptd_crypt_write_io_submit(io, 0); dec: crypt_dec_pending(io); } -static void kcryptd_crypt_read_done(struct dm_crypt_io *io) -{ - crypt_dec_pending(io); -} - static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; @@ -1181,9 +1187,6 @@ static void kcryptd_crypt_read_convert(s if (r < 0) io->error = -EIO; - if (atomic_dec_and_test(&io->ctx.cc_pending)) - kcryptd_crypt_read_done(io); - crypt_dec_pending(io); } @@ -1208,13 +1211,7 @@ static void kcryptd_async_done(struct cr mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); - if (!atomic_dec_and_test(&ctx->cc_pending)) - return; - - if (bio_data_dir(io->base_bio) == READ) - kcryptd_crypt_read_done(io); - else - kcryptd_crypt_write_io_submit(io, 1); + crypt_dec_cc_pending(io); } static void kcryptd_crypt(struct work_struct *work)