From: Oleg Nesterov __exit_signal() is private to release_task() now. I think it is better to make it static in kernel/exit.c and export flush_sigqueue() instead - this function is much more simple and straightforward. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton --- include/linux/sched.h | 1 include/linux/signal.h | 2 + kernel/exit.c | 63 +++++++++++++++++++++++++++++++++++++ kernel/signal.c | 65 --------------------------------------- 4 files changed, 66 insertions(+), 65 deletions(-) diff -puN include/linux/sched.h~move-__exit_signal-to-kernel-exitc include/linux/sched.h --- devel/include/linux/sched.h~move-__exit_signal-to-kernel-exitc 2006-02-27 20:58:32.000000000 -0800 +++ devel-akpm/include/linux/sched.h 2006-02-27 20:58:32.000000000 -0800 @@ -1145,7 +1145,6 @@ extern void exit_thread(void); extern void exit_files(struct task_struct *); extern void __cleanup_signal(struct signal_struct *); extern void cleanup_sighand(struct task_struct *); -extern void __exit_signal(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern NORET_TYPE void do_group_exit(int); diff -puN include/linux/signal.h~move-__exit_signal-to-kernel-exitc include/linux/signal.h --- devel/include/linux/signal.h~move-__exit_signal-to-kernel-exitc 2006-02-27 20:58:32.000000000 -0800 +++ devel-akpm/include/linux/signal.h 2006-02-27 20:58:32.000000000 -0800 @@ -249,6 +249,8 @@ static inline void init_sigpending(struc INIT_LIST_HEAD(&sig->list); } +extern void flush_sigqueue(struct sigpending *queue); + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) { diff -puN kernel/exit.c~move-__exit_signal-to-kernel-exitc kernel/exit.c --- devel/kernel/exit.c~move-__exit_signal-to-kernel-exitc 2006-02-27 20:58:32.000000000 -0800 +++ devel-akpm/kernel/exit.c 2006-02-27 20:58:32.000000000 -0800 @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -60,6 +61,68 @@ static void __unhash_process(struct task remove_parent(p); } +/* + * This function expects the tasklist_lock write-locked. + */ +static void __exit_signal(struct task_struct *tsk) +{ + struct signal_struct *sig = tsk->signal; + struct sighand_struct *sighand; + + BUG_ON(!sig); + BUG_ON(!atomic_read(&sig->count)); + + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + spin_lock(&sighand->siglock); + + posix_cpu_timers_exit(tsk); + if (atomic_dec_and_test(&sig->count)) + posix_cpu_timers_exit_group(tsk); + else { + /* + * If there is any task waiting for the group exit + * then notify it: + */ + if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { + wake_up_process(sig->group_exit_task); + sig->group_exit_task = NULL; + } + if (tsk == sig->curr_target) + sig->curr_target = next_thread(tsk); + /* + * Accumulate here the counters for all threads but the + * group leader as they die, so they can be added into + * the process-wide totals when those are taken. + * The group leader stays around as a zombie as long + * as there are other threads. When it gets reaped, + * the exit.c code will add its counts into these totals. + * We won't ever get here for the group leader, since it + * will have been the last reference on the signal_struct. + */ + sig->utime = cputime_add(sig->utime, tsk->utime); + sig->stime = cputime_add(sig->stime, tsk->stime); + sig->min_flt += tsk->min_flt; + sig->maj_flt += tsk->maj_flt; + sig->nvcsw += tsk->nvcsw; + sig->nivcsw += tsk->nivcsw; + sig->sched_time += tsk->sched_time; + sig = NULL; /* Marker for below. */ + } + + tsk->signal = NULL; + cleanup_sighand(tsk); + spin_unlock(&sighand->siglock); + rcu_read_unlock(); + + clear_tsk_thread_flag(tsk,TIF_SIGPENDING); + flush_sigqueue(&tsk->pending); + if (sig) { + flush_sigqueue(&sig->shared_pending); + __cleanup_signal(sig); + } +} + void release_task(struct task_struct * p) { int zap_leader; diff -puN kernel/signal.c~move-__exit_signal-to-kernel-exitc kernel/signal.c --- devel/kernel/signal.c~move-__exit_signal-to-kernel-exitc 2006-02-27 20:58:32.000000000 -0800 +++ devel-akpm/kernel/signal.c 2006-02-27 20:58:32.000000000 -0800 @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -295,7 +294,7 @@ static void __sigqueue_free(struct sigqu kmem_cache_free(sigqueue_cachep, q); } -static void flush_sigqueue(struct sigpending *queue) +void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -322,68 +321,6 @@ void flush_signals(struct task_struct *t } /* - * This function expects the tasklist_lock write-locked. - */ -void __exit_signal(struct task_struct *tsk) -{ - struct signal_struct *sig = tsk->signal; - struct sighand_struct *sighand; - - BUG_ON(!sig); - BUG_ON(!atomic_read(&sig->count)); - - rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - spin_lock(&sighand->siglock); - - posix_cpu_timers_exit(tsk); - if (atomic_dec_and_test(&sig->count)) - posix_cpu_timers_exit_group(tsk); - else { - /* - * If there is any task waiting for the group exit - * then notify it: - */ - if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { - wake_up_process(sig->group_exit_task); - sig->group_exit_task = NULL; - } - if (tsk == sig->curr_target) - sig->curr_target = next_thread(tsk); - /* - * Accumulate here the counters for all threads but the - * group leader as they die, so they can be added into - * the process-wide totals when those are taken. - * The group leader stays around as a zombie as long - * as there are other threads. When it gets reaped, - * the exit.c code will add its counts into these totals. - * We won't ever get here for the group leader, since it - * will have been the last reference on the signal_struct. - */ - sig->utime = cputime_add(sig->utime, tsk->utime); - sig->stime = cputime_add(sig->stime, tsk->stime); - sig->min_flt += tsk->min_flt; - sig->maj_flt += tsk->maj_flt; - sig->nvcsw += tsk->nvcsw; - sig->nivcsw += tsk->nivcsw; - sig->sched_time += tsk->sched_time; - sig = NULL; /* Marker for below. */ - } - - tsk->signal = NULL; - cleanup_sighand(tsk); - spin_unlock(&sighand->siglock); - rcu_read_unlock(); - - clear_tsk_thread_flag(tsk,TIF_SIGPENDING); - flush_sigqueue(&tsk->pending); - if (sig) { - flush_sigqueue(&sig->shared_pending); - __cleanup_signal(sig); - } -} - -/* * Flush all handlers for a task. */ _