dm-crypt merge convert_context and dm_crypt_io There is one-to-one relationship between convert_context and dm_crypt_io, so we can merge these structures into one and simplify the code. Signed-off-by: Mikulas Patocka --- drivers/md/dm-crypt.c | 120 +++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) Index: linux-3.2-fast/drivers/md/dm-crypt.c =================================================================== --- linux-3.2-fast.orig/drivers/md/dm-crypt.c 2012-02-10 09:07:57.000000000 +0100 +++ linux-3.2-fast/drivers/md/dm-crypt.c 2012-02-10 09:08:05.000000000 +0100 @@ -35,9 +35,13 @@ #define DMREQ_PUSH_BATCH 16 /* - * context holding the current state of a multi-part conversion + * per bio private data */ -struct convert_context { +struct dm_crypt_io { + struct crypt_config *cc; + struct bio *base_bio; + struct work_struct work; + struct bio *bio_in; struct bio *bio_out; unsigned int offset_in; @@ -46,17 +50,6 @@ struct convert_context { unsigned int idx_out; sector_t cc_sector; atomic_t cc_pending; -}; - -/* - * per bio private data - */ -struct dm_crypt_io { - struct crypt_config *cc; - struct bio *base_bio; - struct work_struct work; - - struct convert_context ctx; atomic_t io_pending; int error; @@ -65,7 +58,7 @@ struct dm_crypt_io { struct dm_crypt_request { struct list_head list; - struct convert_context *ctx; + struct dm_crypt_io *io; struct scatterlist sg_in; struct scatterlist sg_out; sector_t iv_sector; @@ -567,7 +560,7 @@ static int crypt_iv_lmk_gen(struct crypt u8 *src; int r = 0; - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + if (bio_data_dir(dmreq->io->bio_in) == WRITE) { src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); kunmap_atomic(src, KM_USER0); @@ -583,7 +576,7 @@ static int crypt_iv_lmk_post(struct cryp u8 *dst; int r; - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) + if (bio_data_dir(dmreq->io->bio_in) == WRITE) return 0; dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); @@ -633,17 +626,17 @@ static struct crypt_iv_operations crypt_ }; static void crypt_convert_init(struct crypt_config *cc, - struct convert_context *ctx, + struct dm_crypt_io *io, struct bio *bio_out, struct bio *bio_in, sector_t sector) { - ctx->bio_in = bio_in; - ctx->bio_out = bio_out; - ctx->offset_in = 0; - ctx->offset_out = 0; - ctx->idx_in = bio_in ? bio_in->bi_idx : 0; - ctx->idx_out = bio_out ? bio_out->bi_idx : 0; - ctx->cc_sector = sector + cc->iv_offset; + io->bio_in = bio_in; + io->bio_out = bio_out; + io->offset_in = 0; + io->offset_out = 0; + io->idx_in = bio_in ? bio_in->bi_idx : 0; + io->idx_out = bio_out ? bio_out->bi_idx : 0; + io->cc_sector = sector + cc->iv_offset; } static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, @@ -722,7 +715,7 @@ pop_from_list: int r; DECLARE_COMPLETION(busy_wait); dmreq->busy_wait = &busy_wait; - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) + if (bio_data_dir(dmreq->io->bio_in) == WRITE) r = crypto_ablkcipher_encrypt(req); else r = crypto_ablkcipher_decrypt(req); @@ -739,12 +732,12 @@ pop_from_list: } static int crypt_convert_block(struct crypt_config *cc, - struct convert_context *ctx, + struct dm_crypt_io *io, struct ablkcipher_request *req, struct list_head *batch) { - struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); - struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct bio_vec *bv_in = bio_iovec_idx(io->bio_in, io->idx_in); + struct bio_vec *bv_out = bio_iovec_idx(io->bio_out, io->idx_out); struct dm_crypt_request *dmreq; u8 *iv; int r; @@ -752,26 +745,26 @@ static int crypt_convert_block(struct cr dmreq = dmreq_of_req(cc, req); iv = iv_of_dmreq(cc, dmreq); - dmreq->iv_sector = ctx->cc_sector; - dmreq->ctx = ctx; + dmreq->iv_sector = io->cc_sector; + dmreq->io = io; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, - bv_in->bv_offset + ctx->offset_in); + bv_in->bv_offset + io->offset_in); sg_init_table(&dmreq->sg_out, 1); sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, - bv_out->bv_offset + ctx->offset_out); + bv_out->bv_offset + io->offset_out); - ctx->offset_in += 1 << SECTOR_SHIFT; - if (ctx->offset_in >= bv_in->bv_len) { - ctx->offset_in = 0; - ctx->idx_in++; + io->offset_in += 1 << SECTOR_SHIFT; + if (io->offset_in >= bv_in->bv_len) { + io->offset_in = 0; + io->idx_in++; } - ctx->offset_out += 1 << SECTOR_SHIFT; - if (ctx->offset_out >= bv_out->bv_len) { - ctx->offset_out = 0; - ctx->idx_out++; + io->offset_out += 1 << SECTOR_SHIFT; + if (io->offset_out >= bv_out->bv_len) { + io->offset_out = 0; + io->idx_out++; } if (cc->iv_gen_ops) { @@ -789,9 +782,9 @@ static int crypt_convert_block(struct cr } static struct ablkcipher_request *crypt_alloc_req(struct crypt_config *cc, - struct convert_context *ctx, gfp_t gfp_mask) + struct dm_crypt_io *io, gfp_t gfp_mask) { - unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); + unsigned key_index = io->cc_sector & (cc->tfms_count - 1); struct ablkcipher_request *req = mempool_alloc(cc->req_pool, gfp_mask); if (!req) return NULL; @@ -819,7 +812,7 @@ static void kcryptd_crypt_write_io_submi static void crypt_dec_cc_pending(struct dm_crypt_io *io) { - if (!atomic_dec_and_test(&io->ctx.cc_pending)) + if (!atomic_dec_and_test(&io->cc_pending)) return; if (bio_data_dir(io->base_bio) == READ) @@ -832,16 +825,16 @@ static void crypt_dec_cc_pending(struct * Encrypt / decrypt data from one bio to another one (can be the same one) */ static int crypt_convert(struct crypt_config *cc, - struct convert_context *ctx) + struct dm_crypt_io *io) { int r; LIST_HEAD(batch); unsigned batch_count = 0; - atomic_set(&ctx->cc_pending, 1); + atomic_set(&io->cc_pending, 1); while (1) { - struct ablkcipher_request *req = crypt_alloc_req(cc, ctx, GFP_NOWAIT); + struct ablkcipher_request *req = crypt_alloc_req(cc, io, GFP_NOWAIT); if (!req) { /* * We must flush our request queue before we attempt @@ -849,21 +842,21 @@ static int crypt_convert(struct crypt_co */ batch_count = 0; crypt_flush_batch(cc, &batch); - req = crypt_alloc_req(cc, ctx, GFP_NOIO); + req = crypt_alloc_req(cc, io, GFP_NOIO); } - r = crypt_convert_block(cc, ctx, req, &batch); + r = crypt_convert_block(cc, io, req, &batch); if (unlikely(r < 0)) { crypt_flush_batch(cc, &batch); - crypt_dec_cc_pending(container_of(ctx, struct dm_crypt_io, ctx)); + crypt_dec_cc_pending(io); goto ret; } - ctx->cc_sector++; + io->cc_sector++; - if (ctx->idx_in < ctx->bio_in->bi_vcnt && - ctx->idx_out < ctx->bio_out->bi_vcnt) { - atomic_inc(&ctx->cc_pending); + if (io->idx_in < io->bio_in->bi_vcnt && + io->idx_out < io->bio_out->bi_vcnt) { + atomic_inc(&io->cc_pending); if (unlikely(++batch_count >= DMREQ_PUSH_BATCH)) { batch_count = 0; crypt_flush_batch(cc, &batch); @@ -1092,7 +1085,7 @@ static int kcryptd_io_read(struct dm_cry static void kcryptd_io_write(struct dm_crypt_io *io) { - struct bio *clone = io->ctx.bio_out; + struct bio *clone = io->bio_out; generic_make_request(clone); } @@ -1113,7 +1106,7 @@ static void kcryptd_queue_io(struct dm_c static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { - struct bio *clone = io->ctx.bio_out; + struct bio *clone = io->bio_out; struct crypt_config *cc = io->cc; if (unlikely(io->error < 0)) { @@ -1124,7 +1117,7 @@ static void kcryptd_crypt_write_io_submi } /* crypt_convert should have filled the clone bio */ - BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + BUG_ON(io->idx_out < clone->bi_vcnt); clone->bi_sector = cc->start + io->sector; @@ -1146,7 +1139,7 @@ static void kcryptd_crypt_write_convert( * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); + crypt_convert_init(cc, io, NULL, io->base_bio, sector); clone = crypt_alloc_buffer(io, remaining); if (unlikely(!clone)) { @@ -1154,14 +1147,14 @@ static void kcryptd_crypt_write_convert( goto dec; } - io->ctx.bio_out = clone; - io->ctx.idx_out = 0; + io->bio_out = clone; + io->idx_out = 0; remaining -= clone->bi_size; sector += bio_sectors(clone); crypt_inc_pending(io); - r = crypt_convert(cc, &io->ctx); + r = crypt_convert(cc, io); if (r) io->error = -EIO; @@ -1176,10 +1169,10 @@ static void kcryptd_crypt_read_convert(s crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, + crypt_convert_init(cc, io, io->base_bio, io->base_bio, io->sector); - r = crypt_convert(cc, &io->ctx); + r = crypt_convert(cc, io); if (r < 0) io->error = -EIO; @@ -1191,8 +1184,7 @@ static void kcryptd_async_done(struct cr int error) { struct dm_crypt_request *dmreq = async_req->data; - struct convert_context *ctx = dmreq->ctx; - struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); + struct dm_crypt_io *io = dmreq->io; struct crypt_config *cc = io->cc; if (error == -EINPROGRESS) {