From: Andi Kleen Subject: [PATCH 3/6] dm-crypt: Scale to multiple CPUs 1/4 - Switch to global workqueue Currently dm-crypt does all encryption work per dmcrypt mapping in a single workqueue. This does not scale well when multiple CPUs are submitting IO at a high rate. The single CPU running the single thread cannot keep up with the encryption and encrypted IO performance tanks. This patch changes the crypto workqueue to be per CPU. This means that as long as the IO submitter (or the interrupt target CPUs for reads) runs on different CPUs the encryption work will be also parallel. To avoid a bottleneck on the IO worker I also changed those to be per CPU threads. There is still some shared data, so I suspect some bouncing cache lines. But I haven't done a detailed study on that yet. All the threads are global, not per CPU. That is to avoid a thread explosion on systems with a large number of CPUs and a larger number of dm-crypt mappings. The code takes care to avoid problems with nested mappings. Part 1/4: - Switch to global thread. (for now use still singlethread - using one tfm is not safe, fixed by foollowing patch.) - Use lazy queue allocation (first device constructor creates queues). - Because dm-ioctl can run in parallel, use mutex to safe queue allocation. Signed-off-by: Andi Kleen Signed-off-by: Milan Broz --- drivers/md/dm-crypt.c | 58 +++++++++++++++++++++++++++++------------------- 1 files changed, 35 insertions(+), 23 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b1d0683..a0b776a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -104,9 +104,6 @@ struct crypt_config { mempool_t *page_pool; struct bio_set *bs; - struct workqueue_struct *io_queue; - struct workqueue_struct *crypt_queue; - char *cipher; char *cipher_mode; @@ -144,6 +141,11 @@ struct crypt_config { #define MIN_POOL_PAGES 32 #define MIN_BIO_PAGES 8 +/* Protect creation of a new crypt queue */ +static DEFINE_MUTEX(queue_setup_lock); +static struct workqueue_struct *crypt_workqueue; +static struct workqueue_struct *io_workqueue; + static struct kmem_cache *_crypt_io_pool; static void clone_init(struct dm_crypt_io *, struct bio *); @@ -740,10 +742,8 @@ static void kcryptd_io(struct work_struct *work) static void kcryptd_queue_io(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; - INIT_WORK(&io->work, kcryptd_io); - queue_work(cc->io_queue, &io->work); + queue_work(io_workqueue, &io->work); } static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, @@ -924,10 +924,8 @@ static void kcryptd_crypt(struct work_struct *work) static void kcryptd_queue_crypt(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; - INIT_WORK(&io->work, kcryptd_crypt); - queue_work(cc->crypt_queue, &io->work); + queue_work(crypt_workqueue, &io->work); } /* @@ -1005,11 +1003,6 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; - if (cc->io_queue) - destroy_workqueue(cc->io_queue); - if (cc->crypt_queue) - destroy_workqueue(cc->crypt_queue); - if (cc->bs) bioset_free(cc->bs); @@ -1164,6 +1157,27 @@ bad: return ret; } +/* Use a global encryption workqueue for all mounts */ +static int crypt_create_workqueues(void) +{ + int ret = 0; + + /* Module unload cleans up on error */ + mutex_lock(&queue_setup_lock); + if (!crypt_workqueue) { + crypt_workqueue = create_singlethread_workqueue("dmcrypt"); + if (!crypt_workqueue) + ret = -ENOMEM; + } + if (!io_workqueue) { + io_workqueue = create_singlethread_workqueue("dmcrypt-io"); + if (!io_workqueue) + ret = -ENOMEM; + } + mutex_unlock(&queue_setup_lock); + return ret; +} + /* * Construct an encryption mapping: * @@ -1246,15 +1260,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; - cc->io_queue = create_singlethread_workqueue("kcryptd_io"); - if (!cc->io_queue) { - ti->error = "Couldn't create kcryptd io queue"; - goto bad; - } - - cc->crypt_queue = create_singlethread_workqueue("kcryptd"); - if (!cc->crypt_queue) { - ti->error = "Couldn't create kcryptd queue"; + ret = crypt_create_workqueues(); + if (ret < 0) { + ti->error = "Cannot create kcrypt work queues"; goto bad; } @@ -1450,6 +1458,10 @@ static void __exit dm_crypt_exit(void) { dm_unregister_target(&crypt_target); kmem_cache_destroy(_crypt_io_pool); + if (crypt_workqueue) + destroy_workqueue(crypt_workqueue); + if (io_workqueue) + destroy_workqueue(io_workqueue); } module_init(dm_crypt_init); -- 1.7.1