From: Milan Broz dm-crypt: Use crypto ablkcipher interface Move encrypt/decrypt core to async crypto call. [MB FIXME: Need following patch to work/compile] Signed-off-by: Herbert Xu Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 96 ++++++++++++++++++++++++++------------------------ 1 files changed, 50 insertions(+), 46 deletions(-) Index: linux-2.6.24-rc1/drivers/md/dm-crypt.c =================================================================== --- linux-2.6.24-rc1.orig/drivers/md/dm-crypt.c 2007-11-07 14:08:49.000000000 +0000 +++ linux-2.6.24-rc1/drivers/md/dm-crypt.c 2007-11-07 14:09:54.000000000 +0000 @@ -342,38 +342,6 @@ static void crypt_async_done(struct cryp crypt_write_io_done(io, error); } -static int -crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, - struct scatterlist *in, unsigned int length, - int write, sector_t sector) -{ - u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); - struct blkcipher_desc desc = { - .tfm = cc->tfm, - .info = iv, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP, - }; - int r; - - if (cc->iv_gen_ops) { - r = cc->iv_gen_ops->generator(cc, iv, sector); - if (r < 0) - return r; - - if (write) - r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); - else - r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); - } else { - if (write) - r = crypto_blkcipher_encrypt(&desc, out, in, length); - else - r = crypto_blkcipher_decrypt(&desc, out, in, length); - } - - return r; -} - static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -391,15 +359,18 @@ static void crypt_convert_init(struct cr } static int crypt_convert_block(struct crypt_config *cc, - struct convert_context *ctx) + struct convert_context *ctx, + struct ablkcipher_request *req) { int r; struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); struct dm_crypt_request *dmreq; - struct dm_crypt_request dmreq_tmp; + u8 *iv; - dmreq = &dmreq_tmp; + dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); + iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_ablkcipher_alignmask(cc->tfm) + 1); sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, @@ -421,10 +392,20 @@ static int crypt_convert_block(struct cr ctx->idx_out++; } - r = crypt_convert_scatterlist(cc, &dmreq->sg_out, &dmreq->sg_in, - dmreq->sg_in.length, - bio_data_dir(ctx->bio_in) == WRITE, - ctx->sector); + if (cc->iv_gen_ops) { + r = cc->iv_gen_ops->generator(cc, iv, ctx->sector++); + if (r < 0) + return r; + } + + ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, + 1 << SECTOR_SHIFT, iv); + + if (bio_data_dir(ctx->bio_in) == READ) + r = crypto_ablkcipher_encrypt(req); + else + r = crypto_ablkcipher_decrypt(req); + return r; } @@ -450,9 +431,25 @@ static int crypt_convert(struct crypt_co while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { - r = crypt_convert_block(cc, ctx); - if (r < 0) - break; + crypt_alloc_req(cc, ctx); + + r = crypt_convert_block(cc, ctx, cc->req); + + switch (r) { + case -EBUSY: + wait_for_completion(&ctx->restart); + INIT_COMPLETION(ctx->restart); + /* fall through*/ + case -EINPROGRESS: + atomic_inc(&ctx->pending); + cc->req = NULL; + r = 0; + /* fall through*/ + case 0: + continue; + } + + break; } /* @@ -711,9 +708,11 @@ static void crypt_write_io_loop(struct d r = crypt_convert(cc, &io->ctx); - crypt_write_io_done(io, r); - if (unlikely(r < 0)) - return; + if (r != -EINPROGRESS) { + crypt_write_io_done(io, r); + if (unlikely(r < 0)) + return; + } /* out of memory -> run queues */ if (unlikely(remaining)) @@ -749,11 +748,16 @@ static void crypt_read_io_process(struct struct crypt_config *cc = io->target->private; int r = 0; + atomic_inc(&io->pending); + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector); r = crypt_convert(cc, &io->ctx); - crypt_read_io_done(io, r); + if (r != -EINPROGRESS) + crypt_read_io_done(io, r); + + crypt_dec_pending(io); } static void kcryptd_do_io(struct work_struct *work)