From: Matt Helsley This patch switches semundo from using the global task_watchers notifier chain to a per-task notifier chain. In the case where a task does not use SysV semaphores this would save a call to exit_sem(). Based off Jes Sorensen's patch implementing this with task_notifiers. Signed-off-by: Matt Helsley Cc: Jes Sorensen Signed-off-by: Andrew Morton --- ipc/sem.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff -puN ipc/sem.c~task-watchers-register-per-task-semundo-watcher ipc/sem.c --- a/ipc/sem.c~task-watchers-register-per-task-semundo-watcher +++ a/ipc/sem.c @@ -142,10 +142,6 @@ static int sem_undo_task_exit(struct not } } -static struct notifier_block sem_watch_tasks_nb = { - .notifier_call = sem_undo_task_exit -}; - static void __ipc_init __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) { ns->ids[IPC_SEM_IDS] = ids; @@ -196,7 +192,6 @@ void __init sem_init (void) ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime\n", IPC_SEM_IDS, sysvipc_sem_proc_show); - register_task_watcher(&sem_watch_tasks_nb); } /* @@ -1013,7 +1008,6 @@ static inline void unlock_semundo(void) spin_unlock(&undo_list->lock); } - /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE @@ -1029,13 +1023,24 @@ static inline int get_undo_list(struct s { struct sem_undo_list *undo_list; int size; + struct notifier_block *semun_nb; + int retval; undo_list = current->sysvsem.undo_list; if (!undo_list) { + semun_nb = NULL; + retval = -ENOMEM; size = sizeof(struct sem_undo_list); undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL); if (undo_list == NULL) - return -ENOMEM; + goto err; + semun_nb = kzalloc(sizeof(*semun_nb), GFP_KERNEL); + if (semun_nb == NULL) + goto err; + semun_nb->notifier_call = sem_undo_task_exit; + retval = register_per_task_watcher(semun_nb); + if (retval) + goto err; memset(undo_list, 0, size); spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); @@ -1043,6 +1048,10 @@ static inline int get_undo_list(struct s } *undo_listp = undo_list; return 0; +err: + kfree(semun_nb); + kfree(undo_list); + return retval; } static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) _