Wait only if bio split was caused by out of memory. There can be two situation when encrypted bio mus be split: 1) there are no free pages 2) new bio layout violate underlying device restriction (e.g. max hw segments) In the 2) case we do not need to wait. Add output varialbe to alloc_buffer to distinguish these two cases. Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 24 ++++++++++++++++-------- 1 files changed, 16 insertions(+), 8 deletions(-) Index: linux/drivers/md/dm-crypt.c =================================================================== --- linux.orig/drivers/md/dm-crypt.c 2008-08-04 14:40:11.000000000 +0100 +++ linux/drivers/md/dm-crypt.c 2008-08-04 14:40:13.000000000 +0100 @@ -459,7 +459,8 @@ static void dm_crypt_bio_destructor(stru * This should never violate the device limitations * May return a smaller bio when running out of pages */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, + unsigned *out_of_pages) { struct crypt_config *cc = io->target->private; struct bio *clone; @@ -473,11 +474,14 @@ static struct bio *crypt_alloc_buffer(st return NULL; clone_init(io, clone); + *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) + if (!page) { + *out_of_pages = 1; break; + } /* * if additional pages cannot be allocated without waiting, @@ -694,7 +698,7 @@ static void kcryptd_crypt_write_convert( { struct crypt_config *cc = io->target->private; struct bio *clone; - unsigned crypt_finished; + unsigned crypt_finished, out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; int r; @@ -709,7 +713,7 @@ static void kcryptd_crypt_write_convert( * so repeat the whole process until all the data can be handled. */ while (remaining) { - clone = crypt_alloc_buffer(io, remaining); + clone = crypt_alloc_buffer(io, remaining, &out_of_pages); if (unlikely(!clone)) { io->error = -ENOMEM; break; @@ -737,11 +741,15 @@ static void kcryptd_crypt_write_convert( break; } - /* out of memory -> run queues */ - if (unlikely(remaining)) { - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + /* + * Out of memory -> run queues + * But don't wait if split was due to the io size restriction + */ + if (unlikely(out_of_pages)) congestion_wait(WRITE, HZ/100); - } + + if (unlikely(remaining)) + wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); } crypt_dec_pending(io);