From: Ingo Molnar Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar Cc: Manfred Spraul Signed-off-by: Andrew Morton --- ipc/compat.c | 2 - ipc/mqueue.c | 4 +- ipc/msg.c | 18 ++++++----- ipc/sem.c | 78 +++++++++++++++++++++++++------------------------ ipc/shm.c | 27 ++++++++-------- ipc/util.c | 29 +++++++++--------- ipc/util.h | 4 +- 7 files changed, 85 insertions(+), 77 deletions(-) diff -puN ipc/compat.c~sem2mutex-ipc-idsem ipc/compat.c --- 25/ipc/compat.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/compat.c Tue Jan 17 16:24:17 2006 @@ -30,7 +30,7 @@ #include #include -#include +#include #include #include "util.h" diff -puN ipc/mqueue.c~sem2mutex-ipc-idsem ipc/mqueue.c --- 25/ipc/mqueue.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/mqueue.c Tue Jan 17 16:24:17 2006 @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include "util.h" @@ -761,7 +763,7 @@ out_unlock: * The receiver accepts the message and returns without grabbing the queue * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c fore more details. + * ipc/mutex.c fore more details. * * The same algorithm is used for senders. */ diff -puN ipc/msg.c~sem2mutex-ipc-idsem ipc/msg.c --- 25/ipc/msg.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/msg.c Tue Jan 17 16:24:17 2006 @@ -28,6 +28,8 @@ #include #include #include +#include + #include #include #include "util.h" @@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue * removes the message queue from message queue ID * array, and cleans up all the messages associated with this queue. * - * msg_ids.sem and the spinlock for this message queue is hold - * before freeque() is called. msg_ids.sem remains locked on exit. + * msg_ids.mutex and the spinlock for this message queue is hold + * before freeque() is called. msg_ids.mutex remains locked on exit. */ static void freeque (struct msg_queue *msq, int id) { @@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, i int id, ret = -EPERM; struct msg_queue *msq; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (key == IPC_PRIVATE) ret = newque(key, msgflg); else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ @@ -232,7 +234,7 @@ asmlinkage long sys_msgget (key_t key, i } msg_unlock(msq); } - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return ret; } @@ -362,7 +364,7 @@ asmlinkage long sys_msgctl (int msqid, i msginfo.msgmnb = msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids.in_use; msginfo.msgmap = atomic_read(&msg_hdrs); @@ -373,7 +375,7 @@ asmlinkage long sys_msgctl (int msqid, i msginfo.msgtql = MSGTQL; } max_id = msg_ids.max_id; - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -436,7 +438,7 @@ asmlinkage long sys_msgctl (int msqid, i return -EINVAL; } - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); msq = msg_lock(msqid); err=-EINVAL; if (msq == NULL) @@ -490,7 +492,7 @@ asmlinkage long sys_msgctl (int msqid, i } err = 0; out_up: - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return err; out_unlock_up: msg_unlock(msq); diff -puN ipc/sem.c~sem2mutex-ipc-idsem ipc/sem.c --- 25/ipc/sem.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/sem.c Tue Jan 17 16:24:17 2006 @@ -1,5 +1,5 @@ /* - * linux/ipc/sem.c + * linux/ipc/mutex.c * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * @@ -10,13 +10,13 @@ * value went to 0 and was then incremented rapidly enough. In solving * this problem I have also modified the implementation so that it * processes pending operations in a FIFO manner, thus give a guarantee - * that processes waiting for a lock on the semaphore won't starve + * that processes waiting for a lock on the mutex won't starve * unless another locking process fails to unlock. * In addition the following two changes in behavior have been introduced: * - The original implementation of semop returned the value - * last semaphore element examined on success. This does not + * last mutex element examined on success. This does not * match the manual page specifications, and effectively - * allows the user to read the semaphore even if they do not + * allows the user to read the mutex even if they do not * have read permissions. The implementation now returns 0 * on success as stated in the manual page. * - There is some confusion over whether the set of undo adjustments @@ -34,15 +34,15 @@ * - The POSIX standard says, that the undo adjustments simply should * redo. So the current implementation is o.K. * - The previous code had two flaws: - * 1) It actively gave the semaphore to the next waiting process - * sleeping on the semaphore. Since this process did not have the + * 1) It actively gave the mutex to the next waiting process + * sleeping on the mutex. Since this process did not have the * cpu this led to many unnecessary context switches and bad * performance. Now we only check which process should be able to - * get the semaphore and if this process wants to reduce some - * semaphore value we simply wake it up without doing the + * get the mutex and if this process wants to reduce some + * mutex value we simply wake it up without doing the * operation. So it has to try to get it later. Thus e.g. the - * running process may reacquire the semaphore during the current - * time slice. If it only waits for zero or increases the semaphore, + * running process may reacquire the mutex during the current + * time slice. If it only waits for zero or increases the mutex, * we do the operation in advance and wake it up. * 2) It did not wake up all zero waiting processes. We try to do * better but only get the semops right which only wait for zero or @@ -53,7 +53,7 @@ * check/retry algorithm for waking up blocked processes as the new scheduler * is better at handling thread switch than the old one. * - * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie + * /proc/sysvipc/mutex support (c) 1999 Dragos Acostachioaie * * SMP-threaded, sysctl's added * (c) 1999 Manfred Spraul @@ -75,6 +75,8 @@ #include #include #include +#include + #include #include "util.h" @@ -139,7 +141,7 @@ void __init sem_init (void) * * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without - * performing any operation on the semaphore array. + * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following @@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, i if (nsems < 0 || nsems > sc_semmsl) return -EINVAL; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (key == IPC_PRIVATE) { err = newary(key, nsems, semflg); @@ -242,7 +244,7 @@ asmlinkage long sys_semget (key_t key, i sem_unlock(sma); } - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; } @@ -279,7 +281,7 @@ static inline void remove_from_queue (st } /* - * Determine whether a sequence of semaphore operations would succeed + * Determine whether a sequence of mutex operations would succeed * all at once. Return 0 if yes, 1 if need to sleep, else return error code. */ @@ -345,7 +347,7 @@ undo: return result; } -/* Go through the pending queue for the indicated semaphore +/* Go through the pending queue for the indicated mutex * looking for tasks that can be completed. */ static void update_queue (struct sem_array * sma) @@ -370,7 +372,7 @@ static void update_queue (struct sem_arr * - if the operation modified the array, then * restart from the head of the queue and * check for threads that might be waiting - * for semaphore values to become 0. + * for mutex values to become 0. * - if the operation didn't modify the array, * then just continue. */ @@ -391,11 +393,11 @@ static void update_queue (struct sem_arr } } -/* The following counts are associated to each semaphore: +/* The following counts are associated to each mutex: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero - * This model assumes that a task waits on exactly one semaphore. - * Since semaphore operations are to be performed atomically, tasks actually + * This model assumes that a task waits on exactly one mutex. + * Since mutex operations are to be performed atomically, tasks actually * wait on a whole sequence of semaphores simultaneously. * The counts we return here are a rough approximation, but still * warrant that semncnt+semzcnt>0 if the task is on the pending queue. @@ -437,8 +439,8 @@ static int count_semzcnt (struct sem_arr return semzcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.sem down and - * the spinlock for this semaphore set hold. sem_ids.sem remains locked +/* Free a mutex set. freeary() is called with sem_ids.mutex down and + * the spinlock for this mutex set hold. sem_ids.mutex remains locked * on exit. */ static void freeary (struct sem_array *sma, int id) @@ -447,7 +449,7 @@ static void freeary (struct sem_array *s struct sem_queue *q; int size; - /* Invalidate the existing undo structures for this semaphore set. + /* Invalidate the existing undo structures for this mutex set. * (They will be freed without any further action in exit_sem() * or during the next semop.) */ @@ -468,7 +470,7 @@ static void freeary (struct sem_array *s q = n; } - /* Remove the semaphore set from the ID array*/ + /* Remove the mutex set from the ID array*/ sma = sem_rmid(id); sem_unlock(sma); @@ -525,7 +527,7 @@ static int semctl_nolock(int semid, int seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids.in_use; seminfo.semaem = used_sems; @@ -534,7 +536,7 @@ static int semctl_nolock(int semid, int seminfo.semaem = SEMAEM; } max_id = sem_ids.max_id; - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -885,9 +887,9 @@ asmlinkage long sys_semctl (int semid, i return err; case IPC_RMID: case IPC_SET: - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); err = semctl_down(semid,semnum,cmd,version,arg); - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; default: return -EINVAL; @@ -1243,7 +1245,7 @@ int copy_semundo(unsigned long clone_fla /* * add semadj values to semaphores, free undo structures. - * undo structures are not freed when semaphore arrays are destroyed + * undo structures are not freed when mutex arrays are destroyed * so some of them may be out of date. * IMPLEMENTATION NOTE: There is some confusion over whether the * set of adjustments that needs to be done should be done in an atomic @@ -1299,27 +1301,27 @@ found: /* perform adjustments registered in u */ nsems = sma->sem_nsems; for (i = 0; i < nsems; i++) { - struct sem * sem = &sma->sem_base[i]; + struct sem * mutex = &sma->sem_base[i]; if (u->semadj[i]) { - sem->semval += u->semadj[i]; + mutex->semval += u->semadj[i]; /* - * Range checks of the new semaphore value, + * Range checks of the new mutex value, * not defined by sus: * - Some unices ignore the undo entirely * (e.g. HP UX 11i 11.22, Tru64 V5.1) * - some cap the value (e.g. FreeBSD caps * at 0, but doesn't enforce SEMVMX) * - * Linux caps the semaphore value, both at 0 + * Linux caps the mutex value, both at 0 * and at SEMVMX. * * Manfred */ - if (sem->semval < 0) - sem->semval = 0; - if (sem->semval > SEMVMX) - sem->semval = SEMVMX; - sem->sempid = current->tgid; + if (mutex->semval < 0) + mutex->semval = 0; + if (mutex->semval > SEMVMX) + mutex->semval = SEMVMX; + mutex->sempid = current->tgid; } } sma->sem_otime = get_seconds(); diff -puN ipc/shm.c~sem2mutex-ipc-idsem ipc/shm.c --- 25/ipc/shm.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/shm.c Tue Jan 17 16:24:17 2006 @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -109,7 +110,7 @@ static void shm_open (struct vm_area_str * * @shp: struct to free * - * It has to be called with shp and shm_ids.sem locked, + * It has to be called with shp and shm_ids.mutex locked, * but returns with shp unlocked and freed. */ static void shm_destroy (struct shmid_kernel *shp) @@ -139,7 +140,7 @@ static void shm_close (struct vm_area_st int id = file->f_dentry->d_inode->i_ino; struct shmid_kernel *shp; - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); /* remove from the list of attaches of the shm segment */ if(!(shp = shm_lock(id))) BUG(); @@ -151,7 +152,7 @@ static void shm_close (struct vm_area_st shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); } static int shm_mmap(struct file * file, struct vm_area_struct * vma) @@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, s struct shmid_kernel *shp; int err, id = 0; - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if (key == IPC_PRIVATE) { err = newseg(key, shmflg, size); } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { @@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, s } shm_unlock(shp); } - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); return err; } @@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, i return err; memset(&shm_info,0,sizeof(shm_info)); - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shm_info.used_ids = shm_ids.in_use; shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = shm_ids.max_id; - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; @@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, i * Instead we set a destroyed flag, and then blow * the name away when the usage hits zero. */ - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err = -EINVAL; if (shp == NULL) @@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, i shm_unlock(shp); } else shm_destroy (shp); - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; } @@ -620,7 +621,7 @@ asmlinkage long sys_shmctl (int shmid, i err = -EFAULT; goto out; } - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err=-EINVAL; if(shp==NULL) @@ -658,7 +659,7 @@ asmlinkage long sys_shmctl (int shmid, i out_unlock_up: shm_unlock(shp); out_up: - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; out_unlock: shm_unlock(shp); @@ -771,7 +772,7 @@ long do_shmat(int shmid, char __user *sh invalid: up_write(¤t->mm->mmap_sem); - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if(!(shp = shm_lock(shmid))) BUG(); shp->shm_nattch--; @@ -780,7 +781,7 @@ invalid: shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); *raddr = (unsigned long) user_addr; err = 0; diff -puN ipc/util.c~sem2mutex-ipc-idsem ipc/util.c --- 25/ipc/util.c~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/util.c Tue Jan 17 16:24:17 2006 @@ -69,7 +69,8 @@ __initcall(ipc_init); void __init ipc_init_ids(struct ipc_ids* ids, int size) { int i; - sema_init(&ids->sem,1); + + mutex_init(&ids->mutex); if(size > IPCMNI) size = IPCMNI; @@ -139,7 +140,7 @@ void __init ipc_init_proc_interface(cons * @ids: Identifier set * @key: The key to find * - * Requires ipc_ids.sem locked. + * Requires ipc_ids.mutex locked. * Returns the identifier if found or -1 if not. */ @@ -151,7 +152,7 @@ int ipc_findkey(struct ipc_ids* ids, key /* * rcu_dereference() is not needed here - * since ipc_ids.sem is held + * since ipc_ids.mutex is held */ for (id = 0; id <= max_id; id++) { p = ids->entries->p[id]; @@ -164,7 +165,7 @@ int ipc_findkey(struct ipc_ids* ids, key } /* - * Requires ipc_ids.sem locked + * Requires ipc_ids.mutex locked */ static int grow_ary(struct ipc_ids* ids, int newsize) { @@ -211,7 +212,7 @@ static int grow_ary(struct ipc_ids* ids, * is returned. The list is returned in a locked state on success. * On failure the list is not locked and -1 is returned. * - * Called with ipc_ids.sem held. + * Called with ipc_ids.mutex held. */ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) @@ -222,7 +223,7 @@ int ipc_addid(struct ipc_ids* ids, struc /* * rcu_dereference()() is not needed here since - * ipc_ids.sem is held + * ipc_ids.mutex is held */ for (id = 0; id < size; id++) { if(ids->entries->p[id] == NULL) @@ -258,7 +259,7 @@ found: * fed an invalid identifier. The entry is removed and internal * variables recomputed. The object associated with the identifier * is returned. - * ipc_ids.sem and the spinlock for this ID is hold before this function + * ipc_ids.mutex and the spinlock for this ID is hold before this function * is called, and remain locked on the exit. */ @@ -271,7 +272,7 @@ struct kern_ipc_perm* ipc_rmid(struct ip /* * do not need a rcu_dereference()() here to force ordering - * on Alpha, since the ipc_ids.sem is held. + * on Alpha, since the ipc_ids.mutex is held. */ p = ids->entries->p[lid]; ids->entries->p[lid] = NULL; @@ -532,13 +533,13 @@ void ipc64_perm_to_ipc_perm (struct ipc6 /* * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() - * is called with shm_ids.sem locked. Since grow_ary() is also called with - * shm_ids.sem down(for Shared Memory), there is no need to add read + * is called with shm_ids.mutex locked. Since grow_ary() is also called with + * shm_ids.mutex down(for Shared Memory), there is no need to add read * barriers here to gurantee the writes in grow_ary() are seen in order * here (for Alpha). * - * However ipc_get() itself does not necessary require ipc_ids.sem down. So - * if in the future ipc_get() is used by other places without ipc_ids.sem + * However ipc_get() itself does not necessary require ipc_ids.mutex down. So + * if in the future ipc_get() is used by other places without ipc_ids.mutex * down, then ipc_get() needs read memery barriers as ipc_lock() does. */ struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) @@ -669,7 +670,7 @@ static void *sysvipc_proc_start(struct s * Take the lock - this will be released by the corresponding * call to stop(). */ - down(&iface->ids->sem); + mutex_lock(&iface->ids->mutex); /* pos < 0 is invalid */ if (*pos < 0) @@ -699,7 +700,7 @@ static void sysvipc_proc_stop(struct seq ipc_unlock(ipc); /* Release the lock we took in start() */ - up(&iface->ids->sem); + mutex_unlock(&iface->ids->mutex); } static int sysvipc_proc_show(struct seq_file *s, void *it) diff -puN ipc/util.h~sem2mutex-ipc-idsem ipc/util.h --- 25/ipc/util.h~sem2mutex-ipc-idsem Tue Jan 17 16:24:17 2006 +++ 25-akpm/ipc/util.h Tue Jan 17 16:24:17 2006 @@ -25,7 +25,7 @@ struct ipc_ids { int max_id; unsigned short seq; unsigned short seq_max; - struct semaphore sem; + struct mutex mutex; struct ipc_id_ary nullentry; struct ipc_id_ary* entries; }; @@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(cons #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) #endif -/* must be called with ids->sem acquired.*/ +/* must be called with ids->mutex acquired.*/ int ipc_findkey(struct ipc_ids* ids, key_t key); int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); _