From: Herbert Xu This patch breaks up the read/write processing so that the crypto operations can complete asynchronously. Signed-off-by: Herbert Xu Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 169 +++++++++++++++++++++++++++++++++----------------- 1 files changed, 112 insertions(+), 57 deletions(-) Index: linux-2.6.23-rc6/drivers/md/dm-crypt.c =================================================================== --- linux-2.6.23-rc6.orig/drivers/md/dm-crypt.c 2007-09-25 22:12:20.000000000 +0100 +++ linux-2.6.23-rc6/drivers/md/dm-crypt.c 2007-09-25 22:12:21.000000000 +0100 @@ -28,17 +28,6 @@ #define MESG_STR(x) x, sizeof(x) /* - * per bio private data - */ -struct dm_crypt_io { - struct dm_target *target; - struct bio *base_bio; - struct work_struct work; - atomic_t pending; - int error; -}; - -/* * context holding the current state of a multi-part conversion */ struct convert_context { @@ -50,6 +39,23 @@ struct convert_context { unsigned int idx_out; sector_t sector; int write; + int err; +}; + +/* + * per bio private data + */ +struct dm_crypt_io { + struct dm_target *target; + struct bio *base_bio; + struct work_struct work; + + struct convert_context ctx; + + atomic_t pending; + int error; + unsigned remaining; + sector_t sector; }; struct crypt_config; @@ -321,6 +327,13 @@ crypt_convert_scatterlist(struct crypt_c return r; } +static void dec_pending(struct dm_crypt_io *io, int error); + +static void crypt_read_done(struct dm_crypt_io *io) +{ + dec_pending(io, io->ctx.err); +} + static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -339,8 +352,7 @@ crypt_convert_init(struct crypt_config * /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ -static int crypt_convert(struct crypt_config *cc, - struct convert_context *ctx) +static void crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { int r = 0; @@ -372,14 +384,14 @@ static int crypt_convert(struct crypt_co } r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, - ctx->write, ctx->sector); + ctx->write, ctx->sector); if (r < 0) break; ctx->sector++; } - return r; + ctx->err = r; } static void dm_crypt_bio_destructor(struct bio *bio) @@ -606,72 +618,113 @@ static void process_read(struct dm_crypt generic_make_request(clone); } -static void process_write(struct dm_crypt_io *io) +static void crypt_write_loop(struct dm_crypt_io *io); + +static void process_write_endio(struct dm_crypt_io *io, int async) +{ + struct bio *clone = io->ctx.bio_out; + unsigned remaining = io->remaining; + + /* Grab another reference to the io struct + * before we kick off the request */ + if (remaining) + atomic_inc(&io->pending); + + generic_make_request(clone); + + /* Do not reference clone after this - it + * may be gone already. */ + + if (likely(!remaining)) + return; + + /* out of memory -> run queues */ + congestion_wait(WRITE, HZ / 100); + + if (async) + crypt_write_loop(io); +} + +static void crypt_write_done(struct dm_crypt_io *io, int async) { + struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->target->private; - struct bio *base_bio = io->base_bio; - struct bio *clone; - struct convert_context ctx; - unsigned remaining = base_bio->bi_size; - sector_t sector = base_bio->bi_sector - io->target->begin; - atomic_inc(&io->pending); + if (unlikely(io->ctx.err < 0)) { + crypt_free_buffer_pages(cc, clone, clone->bi_size); + bio_put(clone); + dec_pending(io, -EIO); + return; + } + + /* crypt_convert should have filled the clone bio */ + BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + + clone->bi_sector = cc->start + io->sector; + io->remaining -= clone->bi_size; + io->sector += bio_sectors(clone); - crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); + if (async) + kcryptd_queue_io(io); + else + process_write_endio(io, 0); +} + +static void crypt_write_loop(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *clone; /* * The allocated buffers can be smaller than the whole bio, * so repeat the whole process until all the data can be handled. + * We need to add another io reference because + * it can gone away before testing remaining part. */ - while (remaining) { - clone = crypt_alloc_buffer(io, remaining); + atomic_inc(&io->pending); + + while (io->remaining) { + clone = crypt_alloc_buffer(io, io->remaining); if (unlikely(!clone)) { dec_pending(io, -ENOMEM); - return; + break; } - ctx.bio_out = clone; - ctx.idx_out = 0; + io->ctx.bio_out = clone; + io->ctx.idx_out = 0; - if (unlikely(crypt_convert(cc, &ctx) < 0)) { - crypt_free_buffer_pages(cc, clone, clone->bi_size); - bio_put(clone); - dec_pending(io, -EIO); - return; - } + crypt_convert(cc, &io->ctx); + crypt_write_done(io, 0); + if (unlikely(io->ctx.err < 0)) + break; + }; - /* crypt_convert should have filled the clone bio */ - BUG_ON(ctx.idx_out < clone->bi_vcnt); + dec_pending(io, 0); +} - clone->bi_sector = cc->start + sector; - remaining -= clone->bi_size; - sector += bio_sectors(clone); - - /* Grab another reference to the io struct - * before we kick off the request */ - if (remaining) - atomic_inc(&io->pending); - - generic_make_request(clone); - - /* Do not reference clone after this - it - * may be gone already. */ - - /* out of memory -> run queues */ - if (remaining) - congestion_wait(WRITE, HZ/100); - } +static void process_write(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + + io->remaining = base_bio->bi_size; + io->sector = base_bio->bi_sector - io->target->begin; + + atomic_inc(&io->pending); + + crypt_convert_init(cc, &io->ctx, NULL, base_bio, io->sector, 1); + crypt_write_loop(io); } static void process_read_endio(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; - struct convert_context ctx; - crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->base_bio->bi_sector - io->target->begin, 0); - dec_pending(io, crypt_convert(cc, &ctx)); + crypt_convert(cc, &io->ctx); + crypt_read_done(io); } static void kcryptd_do_work(struct work_struct *work) @@ -680,6 +733,8 @@ static void kcryptd_do_work(struct work_ if (bio_data_dir(io->base_bio) == READ) process_read(io); + else + process_write_endio(io, 1); } static void kcryptd_do_crypt(struct work_struct *work)