dm-crypt: small changes Small changes: - bio_in and base_bio are always the same, so we can remove bio_in. - simplify arguments of crypt_convert_init - remove parameter from kcryptd_io_read because it is always GFP_NOIO - remove "cc" parameter from crypt_alloc_req because the value can be obtained from io->cc - the rest of the patch just moves functions around without changing any logic Signed-off-by: Mikulas Patocka --- drivers/md/dm-crypt.c | 182 ++++++++++++++++++++------------------------------ 1 file changed, 76 insertions(+), 106 deletions(-) Index: linux-3.2-fast/drivers/md/dm-crypt.c =================================================================== --- linux-3.2-fast.orig/drivers/md/dm-crypt.c 2012-02-10 09:08:21.000000000 +0100 +++ linux-3.2-fast/drivers/md/dm-crypt.c 2012-02-10 09:08:30.000000000 +0100 @@ -42,7 +42,6 @@ struct dm_crypt_io { struct bio *base_bio; struct work_struct work; - struct bio *bio_in; struct bio *bio_out; unsigned int offset_in; unsigned int offset_out; @@ -559,7 +558,7 @@ static int crypt_iv_lmk_gen(struct crypt u8 *src; int r = 0; - if (bio_data_dir(dmreq->io->bio_in) == WRITE) { + if (bio_data_dir(dmreq->io->base_bio) == WRITE) { src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); kunmap_atomic(src, KM_USER0); @@ -575,7 +574,7 @@ static int crypt_iv_lmk_post(struct cryp u8 *dst; int r; - if (bio_data_dir(dmreq->io->bio_in) == WRITE) + if (bio_data_dir(dmreq->io->base_bio) == WRITE) return 0; dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); @@ -624,18 +623,15 @@ static struct crypt_iv_operations crypt_ .post = crypt_iv_lmk_post }; -static void crypt_convert_init(struct crypt_config *cc, - struct dm_crypt_io *io, - struct bio *bio_out, struct bio *bio_in, - sector_t sector) +static void crypt_convert_init(struct dm_crypt_io *io, struct bio *bio_out) { - io->bio_in = bio_in; + struct crypt_config *cc = io->cc; io->bio_out = bio_out; io->offset_in = 0; io->offset_out = 0; - io->idx_in = bio_in ? bio_in->bi_idx : 0; - io->idx_out = bio_out ? bio_out->bi_idx : 0; - io->cc_sector = sector + cc->iv_offset; + io->idx_in = io->base_bio->bi_idx; + io->idx_out = bio_out->bi_idx; + io->cc_sector = io->sector + cc->iv_offset; } static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, @@ -714,7 +710,7 @@ pop_from_list: int r; DECLARE_COMPLETION(busy_wait); dmreq->busy_wait = &busy_wait; - if (bio_data_dir(dmreq->io->bio_in) == WRITE) + if (bio_data_dir(dmreq->io->base_bio) == WRITE) r = crypto_ablkcipher_encrypt(req); else r = crypto_ablkcipher_decrypt(req); @@ -730,12 +726,53 @@ pop_from_list: return 0; } -static int crypt_convert_block(struct crypt_config *cc, - struct dm_crypt_io *io, +static struct ablkcipher_request *crypt_alloc_req(struct dm_crypt_io *io, + gfp_t gfp_mask) +{ + struct crypt_config *cc = io->cc; + unsigned key_index = io->cc_sector & (cc->tfms_count - 1); + struct ablkcipher_request *req = mempool_alloc(cc->req_pool, gfp_mask); + if (!req) + return NULL; + + ablkcipher_request_set_tfm(req, cc->tfms[key_index]); + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, dmreq_of_req(cc, req)); + + return req; +} + +static void crypt_flush_batch(struct crypt_config *cc, struct list_head *batch) +{ + spin_lock_irq(&cc->crypt_thread_wait.lock); + list_splice_tail(batch, &cc->crypt_thread_list); + wake_up_locked(&cc->crypt_thread_wait); + spin_unlock_irq(&cc->crypt_thread_wait.lock); + INIT_LIST_HEAD(batch); + +} + +static void crypt_end_io(struct dm_crypt_io *io); +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io); + +static void crypt_dec_cc_pending(struct dm_crypt_io *io) +{ + if (!atomic_dec_and_test(&io->cc_pending)) + return; + + if (bio_data_dir(io->base_bio) == READ) + crypt_end_io(io); + else + kcryptd_crypt_write_io_submit(io); +} + +static int crypt_convert_block(struct dm_crypt_io *io, struct ablkcipher_request *req, struct list_head *batch) { - struct bio_vec *bv_in = bio_iovec_idx(io->bio_in, io->idx_in); + struct crypt_config *cc = io->cc; + struct bio_vec *bv_in = bio_iovec_idx(io->base_bio, io->idx_in); struct bio_vec *bv_out = bio_iovec_idx(io->bio_out, io->idx_out); struct dm_crypt_request *dmreq; u8 *iv; @@ -780,52 +817,12 @@ static int crypt_convert_block(struct cr return 0; } -static struct ablkcipher_request *crypt_alloc_req(struct crypt_config *cc, - struct dm_crypt_io *io, gfp_t gfp_mask) -{ - unsigned key_index = io->cc_sector & (cc->tfms_count - 1); - struct ablkcipher_request *req = mempool_alloc(cc->req_pool, gfp_mask); - if (!req) - return NULL; - - ablkcipher_request_set_tfm(req, cc->tfms[key_index]); - ablkcipher_request_set_callback(req, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, req)); - - return req; -} - -static void crypt_flush_batch(struct crypt_config *cc, struct list_head *batch) -{ - spin_lock_irq(&cc->crypt_thread_wait.lock); - list_splice_tail(batch, &cc->crypt_thread_list); - wake_up_locked(&cc->crypt_thread_wait); - spin_unlock_irq(&cc->crypt_thread_wait.lock); - INIT_LIST_HEAD(batch); - -} - -static void crypt_end_io(struct dm_crypt_io *io); -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async); - -static void crypt_dec_cc_pending(struct dm_crypt_io *io) -{ - if (!atomic_dec_and_test(&io->cc_pending)) - return; - - if (bio_data_dir(io->base_bio) == READ) - crypt_end_io(io); - else - kcryptd_crypt_write_io_submit(io, 1); -} - /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ -static void crypt_convert(struct crypt_config *cc, - struct dm_crypt_io *io) +static void crypt_convert(struct dm_crypt_io *io) { + struct crypt_config *cc = io->cc; LIST_HEAD(batch); unsigned batch_count = 0; @@ -833,7 +830,7 @@ static void crypt_convert(struct crypt_c while (1) { int r; - struct ablkcipher_request *req = crypt_alloc_req(cc, io, GFP_NOWAIT); + struct ablkcipher_request *req = crypt_alloc_req(io, GFP_NOWAIT); if (!req) { /* * We must flush our request queue before we attempt @@ -841,10 +838,10 @@ static void crypt_convert(struct crypt_c */ batch_count = 0; crypt_flush_batch(cc, &batch); - req = crypt_alloc_req(cc, io, GFP_NOIO); + req = crypt_alloc_req(io, GFP_NOIO); } - r = crypt_convert_block(cc, io, req, &batch); + r = crypt_convert_block(io, req, &batch); if (unlikely(r < 0)) { crypt_flush_batch(cc, &batch); io->error = -EIO; @@ -854,7 +851,7 @@ static void crypt_convert(struct crypt_c io->cc_sector++; - if (io->idx_in < io->bio_in->bi_vcnt && + if (io->idx_in < io->base_bio->bi_vcnt && io->idx_out < io->bio_out->bi_vcnt) { atomic_inc(&io->cc_pending); if (unlikely(++batch_count >= DMREQ_PUSH_BATCH)) { @@ -1041,7 +1038,7 @@ static void clone_init(struct dm_crypt_i clone->bi_destructor = dm_crypt_bio_destructor; } -static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) +static int kcryptd_io_read(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; @@ -1052,7 +1049,7 @@ static int kcryptd_io_read(struct dm_cry * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ - clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); + clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); if (!clone) return 1; @@ -1068,31 +1065,11 @@ static int kcryptd_io_read(struct dm_cry return 0; } -static void kcryptd_io_write(struct dm_crypt_io *io) -{ - struct bio *clone = io->bio_out; - generic_make_request(clone); -} - -static void kcryptd_io(struct work_struct *work) +static void kcryptd_io_write(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - - kcryptd_io_write(io); -} - -static void kcryptd_queue_io(struct dm_crypt_io *io) -{ struct crypt_config *cc = io->cc; - - INIT_WORK(&io->work, kcryptd_io); - queue_work(cc->io_queue, &io->work); -} - -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) -{ struct bio *clone = io->bio_out; - struct crypt_config *cc = io->cc; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -1106,45 +1083,38 @@ static void kcryptd_crypt_write_io_submi clone->bi_sector = cc->start + io->sector; - if (async) - kcryptd_queue_io(io); - else - generic_make_request(clone); + generic_make_request(clone); } -static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; - struct bio *clone; - unsigned remaining = io->base_bio->bi_size; - sector_t sector = io->sector; - crypt_convert_init(cc, io, NULL, io->base_bio, sector); + INIT_WORK(&io->work, kcryptd_io_write); + queue_work(cc->io_queue, &io->work); +} - clone = crypt_alloc_buffer(io, remaining); +static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) +{ + struct bio *clone; + + clone = crypt_alloc_buffer(io, io->base_bio->bi_size); if (unlikely(!clone)) { io->error = -ENOMEM; crypt_end_io(io); return; } - io->bio_out = clone; - io->idx_out = 0; + crypt_convert_init(io, clone); - remaining -= clone->bi_size; - sector += bio_sectors(clone); - - crypt_convert(cc, io); + crypt_convert(io); } static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->cc; - - crypt_convert_init(cc, io, io->base_bio, io->base_bio, - io->sector); + crypt_convert_init(io, io->base_bio); - crypt_convert(cc, io); + crypt_convert(io); } static void kcryptd_async_done(struct crypto_async_request *async_req, @@ -1718,7 +1688,7 @@ static int crypt_map(struct dm_target *t io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); if (bio_data_dir(io->base_bio) == READ) { - kcryptd_io_read(io, GFP_NOIO); + kcryptd_io_read(io); } else { kcryptd_crypt_write_convert(io); }