From: Milan Broz Add results of operation (error code) into convert_context. Now crypt_convert can be void. [AGK FIXME: I'm not happy about this yet - is there a problem passing the error code around normally when we reach the async patches?] Introduce read/write functions called at the end of io operation. Create new write endio function and move the related code from write loop. Signed-off-by: Herbert Xu Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 73 +++++++++++++++++++++++++++++++++----------------- 1 files changed, 49 insertions(+), 24 deletions(-) Index: linux-2.6.23/drivers/md/dm-crypt.c =================================================================== --- linux-2.6.23.orig/drivers/md/dm-crypt.c 2007-10-10 17:20:57.000000000 +0100 +++ linux-2.6.23/drivers/md/dm-crypt.c 2007-10-10 17:20:57.000000000 +0100 @@ -39,6 +39,7 @@ struct convert_context { unsigned int idx_out; sector_t sector; int write; + int err; }; /* @@ -344,8 +345,8 @@ static void crypt_convert_init(struct cr /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ -static int crypt_convert(struct crypt_config *cc, - struct convert_context *ctx) +static void crypt_convert(struct crypt_config *cc, + struct convert_context *ctx) { int r = 0; @@ -384,7 +385,7 @@ static int crypt_convert(struct crypt_co ctx->sector++; } - return r; + ctx->err = r; } static void dm_crypt_bio_destructor(struct bio *bio) @@ -611,6 +612,40 @@ static void process_read(struct dm_crypt generic_make_request(clone); } +static void process_write_endio(struct dm_crypt_io *io) +{ + struct bio *clone = io->ctx.bio_out; + unsigned remaining = io->remaining; + + /* Grab another reference to the io struct + * before we kick off the request */ + if (remaining) + atomic_inc(&io->pending); + + generic_make_request(clone); + + /* Do not reference clone after this - it + * may be gone already. */ + + /* out of memory -> run queues */ + if (remaining) + congestion_wait(WRITE, HZ / 100); +} + +static void crypt_write_done(struct dm_crypt_io *io) +{ + struct bio *clone = io->ctx.bio_out; + struct crypt_config *cc = io->target->private; + + /* crypt_convert should have filled the clone bio */ + + BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + + clone->bi_sector = cc->start + io->sector; + io->remaining -= clone->bi_size; + io->sector += bio_sectors(clone); +} + static void crypt_write_loop(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; @@ -632,33 +667,17 @@ static void crypt_write_loop(struct dm_c io->ctx.bio_out = clone; io->ctx.idx_out = 0; - if (unlikely(crypt_convert(cc, &io->ctx) < 0)) { + crypt_convert(cc, &io->ctx); + if (unlikely(io->ctx.err < 0)) { crypt_free_buffer_pages(cc, clone, clone->bi_size); bio_put(clone); crypt_dec_pending(io, -EIO); break; } - /* crypt_convert should have filled the clone bio */ - BUG_ON(io->ctx.idx_out < clone->bi_vcnt); + crypt_write_done(io); - clone->bi_sector = cc->start + io->sector; - io->remaining -= clone->bi_size; - io->sector += bio_sectors(clone); - - /* Grab another reference to the io struct - * before we kick off the request */ - if (io->remaining) - atomic_inc(&io->pending); - - generic_make_request(clone); - - /* Do not reference clone after this - it - * may be gone already. */ - - /* out of memory -> run queues */ - if (io->remaining) - congestion_wait(WRITE, HZ/100); + process_write_endio(io); } crypt_dec_pending(io, 0); @@ -678,6 +697,11 @@ static void process_write(struct dm_cryp crypt_write_loop(io); } +static void crypt_read_done(struct dm_crypt_io *io) +{ + crypt_dec_pending(io, io->ctx.err); +} + static void process_read_endio(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; @@ -685,7 +709,8 @@ static void process_read_endio(struct dm crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->base_bio->bi_sector - io->target->begin, 0); - crypt_dec_pending(io, crypt_convert(cc, &io->ctx)); + crypt_convert(cc, &io->ctx); + crypt_read_done(io); } static void kcryptd_do_work(struct work_struct *work)