From: Kiyoshi Ueda This patch pulls the pg_init path activation code out of process_queued_ios() into a new function. process_queued_ios() has been used for pg_init handling, which needs a kthread context. However, recent development introduced a special work, activate_path(), for that purpose. So there is no need to use process_queued_ios(). This patch is a preparation of the next patch, which fixes the issue that ioctl isn't processed until any I/O is issued. (And also it is a preparation of another patch-set to remove multipath internal queue.) No functional change. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Signed-off-by: Alasdair G Kergon --- drivers/md/dm-mpath.c | 55 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 16 deletions(-) Index: linux-2.6.33-rc6/drivers/md/dm-mpath.c =================================================================== --- linux-2.6.33-rc6.orig/drivers/md/dm-mpath.c +++ linux-2.6.33-rc6/drivers/md/dm-mpath.c @@ -235,6 +235,21 @@ static void free_multipath(struct multip * Path selection *-----------------------------------------------*/ +static void __pg_init_all_paths(struct multipath *m) +{ + struct pgpath *pgpath; + + m->pg_init_count++; + m->pg_init_required = 0; + list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { + /* Skip failed paths */ + if (!pgpath->is_active) + continue; + if (queue_work(kmpath_handlerd, &pgpath->activate_path)) + m->pg_init_in_progress++; + } +} + static void __switch_pg(struct multipath *m, struct pgpath *pgpath) { m->current_pg = pgpath->pg; @@ -350,8 +365,9 @@ static int map_io(struct multipath *m, s /* Queue for the daemon to resubmit */ list_add_tail(&clone->queuelist, &m->queued_ios); m->queue_size++; - if ((m->pg_init_required && !m->pg_init_in_progress) || - !m->queue_io) + if (m->pg_init_required && !m->pg_init_in_progress && pgpath) + __pg_init_all_paths(m); + else if (!m->queue_io) queue_work(kmultipathd, &m->process_queued_ios); pgpath = NULL; r = DM_MAPIO_SUBMITTED; @@ -439,7 +455,7 @@ static void process_queued_ios(struct wo { struct multipath *m = container_of(work, struct multipath, process_queued_ios); - struct pgpath *pgpath = NULL, *tmp; + struct pgpath *pgpath = NULL; unsigned must_queue = 1; unsigned long flags; @@ -457,17 +473,9 @@ static void process_queued_ios(struct wo (!pgpath && !m->queue_if_no_path)) must_queue = 0; - if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { - m->pg_init_count++; - m->pg_init_required = 0; - list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { - /* Skip failed paths */ - if (!tmp->is_active) - continue; - if (queue_work(kmpath_handlerd, &tmp->activate_path)) - m->pg_init_in_progress++; - } - } + if (m->pg_init_required && !m->pg_init_in_progress && pgpath) + __pg_init_all_paths(m); + out: spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) @@ -1215,9 +1223,24 @@ static void pg_init_done(void *data, int /* Activations of other paths are still on going */ goto out; - if (!m->pg_init_required) - m->queue_io = 0; + if (m->pg_init_required) { + /* Requested retry or a new pg_init */ + if (likely(m->current_pgpath)) { + __pg_init_all_paths(m); + goto out; + } + + /* + * The condition requiring pg_init has been changed by someone + * after the pg_init had been requested. + * Cancel m->pg_init_required here explicitly, and start over + * from path selection. + */ + m->pg_init_required = 0; + m->current_pg = NULL; + } + m->queue_io = 0; queue_work(kmultipathd, &m->process_queued_ios); /*