From eaf61a7659f87df54f5197a5784a8bb041031e68 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 9 May 2014 17:03:16 -0400 Subject: [RHEL6.6 PATCH 09/11] dm crypt: store crypt_config instead of dm_target struct BZ: 1076147 Hand-patched the last hunk that modifies crypt_map() because RHEL6's dm-crypt doesn't have discard support. Upstream commit 49a8a9204bb17296725058bbc7f31092d256be6e Author: Alasdair G Kergon Date: Fri Jul 27 15:08:05 2012 +0100 dm crypt: store crypt_config instead of dm_target struct Store the crypt_config struct pointer directly in struct dm_crypt_io instead of the dm_target struct pointer. Target information is never used - only target->private is referenced, thus we can change it to point directly to struct crypt_config. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) Index: rhel6-compile/drivers/md/dm-crypt.c =================================================================== --- rhel6-compile.orig/drivers/md/dm-crypt.c 2014-05-10 16:55:13.000000000 +0200 +++ rhel6-compile/drivers/md/dm-crypt.c 2014-05-10 16:55:15.000000000 +0200 @@ -47,7 +47,7 @@ struct convert_context { * per bio private data */ struct dm_crypt_io { - struct dm_target *target; + struct crypt_config *cc; struct bio *base_bio; struct work_struct work; @@ -618,7 +618,7 @@ static int crypt_convert(struct crypt_co static void dm_crypt_bio_destructor(struct bio *bio) { struct dm_crypt_io *io = bio->bi_private; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; bio_free(bio, cc->bs); } @@ -632,7 +632,7 @@ static void dm_crypt_bio_destructor(stru static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, unsigned *out_of_pages) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; @@ -691,14 +691,13 @@ static void crypt_free_buffer_pages(stru } } -static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, +static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, struct bio *bio, sector_t sector) { - struct crypt_config *cc = ti->private; struct dm_crypt_io *io; io = mempool_alloc(cc->io_pool, GFP_NOIO); - io->target = ti; + io->cc = cc; io->base_bio = bio; io->sector = sector; io->error = 0; @@ -720,7 +719,7 @@ static void crypt_inc_pending(struct dm_ */ static void crypt_dec_pending(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct dm_crypt_io *base_io = io->base_io; int error = io->error; @@ -759,7 +758,7 @@ static void crypt_dec_pending(struct dm_ static void crypt_endio(struct bio *clone, int error) { struct dm_crypt_io *io = clone->bi_private; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; unsigned rw = bio_data_dir(clone); if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) @@ -786,7 +785,7 @@ static void crypt_endio(struct bio *clon static void clone_init(struct dm_crypt_io *io, struct bio *clone) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; clone->bi_private = io; clone->bi_end_io = crypt_endio; @@ -802,7 +801,7 @@ static void kcryptd_unplug(struct crypt_ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct bio *clone; @@ -852,7 +851,7 @@ static void kcryptd_io(struct work_struc static void kcryptd_queue_io(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_io); queue_work(cc->io_queue, &io->work); @@ -861,7 +860,7 @@ static void kcryptd_queue_io(struct dm_c static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -883,7 +882,7 @@ static void kcryptd_crypt_write_io_submi static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; struct dm_crypt_io *new_io; int crypt_finished; @@ -949,7 +948,7 @@ static void kcryptd_crypt_write_convert( * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { - new_io = crypt_io_alloc(io->target, io->base_bio, + new_io = crypt_io_alloc(io->cc, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, @@ -983,7 +982,7 @@ static void kcryptd_crypt_read_done(stru static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; int r = 0; crypt_inc_pending(io); @@ -1007,7 +1006,7 @@ static void kcryptd_async_done(struct cr struct dm_crypt_request *dmreq = async_req->data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; if (error == -EINPROGRESS) { complete(&ctx->restart); @@ -1043,7 +1042,7 @@ static void kcryptd_crypt(struct work_st static void kcryptd_queue_crypt(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_crypt); queue_work(cc->crypt_queue, &io->work); @@ -1517,7 +1516,7 @@ static int crypt_map(struct dm_target *t union map_info *map_context) { struct dm_crypt_io *io; - struct crypt_config *cc; + struct crypt_config *cc = ti->private; /* * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. @@ -1525,14 +1524,13 @@ static int crypt_map(struct dm_target *t * - for REQ_DISCARD caller must use flush if IO ordering matters */ if (unlikely(bio->bi_rw & (BIO_FLUSH | BIO_DISCARD))) { - cc = ti->private; bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT))