From: Ingo Molnar clean up the code as per Andrew's suggestions: - '# ifdef' => '#ifdef' - fastcall removal - lots of macro -> C function conversions - move rtmutex_internals.h to kernel/rtmutex_common.h - uninline two larger functions - remove noinline - explain locking better - set_task_state(current, state) => set_current_state(state) - fix the PI code (Esben Nielsen) Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- include/linux/rtmutex.h | 29 +--- include/linux/rtmutex_internal.h | 187 ----------------------------- kernel/fork.c | 11 + kernel/futex.c | 3 kernel/rtmutex-debug.c | 2 kernel/rtmutex-debug.h | 2 kernel/rtmutex.c | 116 +++++++++++++++-- kernel/rtmutex_common.h | 123 +++++++++++++++++++ kernel/sched.c | 2 9 files changed, 247 insertions(+), 228 deletions(-) diff -puN include/linux/rtmutex.h~pi-futex-v2 include/linux/rtmutex.h --- devel/include/linux/rtmutex.h~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/include/linux/rtmutex.h 2006-05-19 16:01:33.000000000 -0700 @@ -27,14 +27,14 @@ struct rt_mutex { spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; -# ifdef CONFIG_DEBUG_RT_MUTEXES +#ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; struct list_head held_list; unsigned long acquire_ip; const char *name, *file; int line; void *magic; -# endif +#endif }; struct rt_mutex_waiter; @@ -79,40 +79,31 @@ struct hrtimer_sleeper; * * Returns 1 if the mutex is locked, 0 if unlocked. */ -static inline int fastcall rt_mutex_is_locked(struct rt_mutex *lock) +static inline int rt_mutex_is_locked(struct rt_mutex *lock) { return lock->owner != NULL; } -extern void fastcall __rt_mutex_init(struct rt_mutex *lock, const char *name); -extern void fastcall rt_mutex_destroy(struct rt_mutex *lock); +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void rt_mutex_destroy(struct rt_mutex *lock); -extern void fastcall rt_mutex_lock(struct rt_mutex *lock); -extern int fastcall rt_mutex_lock_interruptible(struct rt_mutex *lock, +extern void rt_mutex_lock(struct rt_mutex *lock); +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock); -extern int fastcall rt_mutex_timed_lock(struct rt_mutex *lock, +extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); -extern int fastcall rt_mutex_trylock(struct rt_mutex *lock); +extern int rt_mutex_trylock(struct rt_mutex *lock); -extern void fastcall rt_mutex_unlock(struct rt_mutex *lock); +extern void rt_mutex_unlock(struct rt_mutex *lock); #ifdef CONFIG_RT_MUTEXES -# define rt_mutex_init_task(p) \ - do { \ - spin_lock_init(&p->pi_lock); \ - plist_head_init(&p->pi_waiters); \ - p->pi_blocked_on = NULL; \ - p->pi_locked_by = NULL; \ - INIT_LIST_HEAD(&p->pi_lock_chain); \ - } while (0) # define INIT_RT_MUTEXES(tsk) \ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ .pi_lock_chain = LIST_HEAD_INIT(tsk.pi_lock_chain), #else -# define rt_mutex_init_task(p) do { } while (0) # define INIT_RT_MUTEXES(tsk) #endif diff -L include/linux/rtmutex_internal.h -puN include/linux/rtmutex_internal.h~pi-futex-v2 /dev/null --- devel/include/linux/rtmutex_internal.h +++ /dev/null 2006-05-19 15:26:20.261540500 -0700 @@ -1,187 +0,0 @@ -/* - * RT Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner - * - * This file contains the private data structure and API definitions. - */ - -#ifndef __LINUX_RT_MUTEX_INTERNAL_H -#define __LINUX_RT_MUTEX_INTERNAL_H - -#include - -/* - * The rtmutex in kernel tester is independent of rtmutex debugging. We - * call schedule_rt_mutex_test() instead of schedule() for the tasks which - * belong to the tester. That way we can delay the wakeup path of those - * threads to provoke lock stealing and testing of complex boosting scenarios. - */ -#ifdef CONFIG_RT_MUTEX_TESTER - -extern void schedule_rt_mutex_test(struct rt_mutex *lock); - -#define schedule_rt_mutex(_lock) \ - do { \ - if (!(current->flags & PF_MUTEX_TESTER)) \ - schedule(); \ - else \ - schedule_rt_mutex_test(_lock); \ - } while (0) - -#else -# define schedule_rt_mutex(_lock) schedule() -#endif - -/* - * This is the control structure for tasks blocked on a rt_mutex, - * which is allocated on the kernel stack on of the blocked task. - * - * @list_entry: pi node to enqueue into the mutex waiters list - * @pi_list_entry: pi node to enqueue into the mutex owner waiters list - * @task: task reference to the blocked task - */ -struct rt_mutex_waiter { - struct plist_node list_entry; - struct plist_node pi_list_entry; - struct task_struct *task; - struct rt_mutex *lock; -#ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - pid_t deadlock_task_pid; - struct rt_mutex *deadlock_lock; -#endif -}; - -/* - * Plist wrapper macros - */ -#define rt_mutex_has_waiters(lock) (!plist_head_empty(&lock->wait_list)) - -#define rt_mutex_top_waiter(lock) \ -({ struct rt_mutex_waiter *__w = plist_first_entry(&lock->wait_list, \ - struct rt_mutex_waiter, list_entry); \ - BUG_ON(__w->lock != lock); \ - __w; \ -}) - -#define task_has_pi_waiters(task) (!plist_head_empty(&task->pi_waiters)) - -#define task_top_pi_waiter(task) \ - plist_first_entry(&task->pi_waiters, struct rt_mutex_waiter, pi_list_entry) - -/* - * lock->owner state tracking: - * - * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 - * are used to keep track of the "owner is pending" and "lock has - * waiters" state. - * - * owner bit1 bit0 - * NULL 0 0 lock is free (fast acquire possible) - * NULL 0 1 invalid state - * NULL 1 0 invalid state - * NULL 1 1 invalid state - * taskpointer 0 0 lock is held (fast release possible) - * taskpointer 0 1 task is pending owner - * taskpointer 1 0 lock is held and has waiters - * taskpointer 1 1 task is pending owner and lock has more waiters - * - * Pending ownership is assigned to the top (highest priority) - * waiter of the lock, when the lock is released. The thread is woken - * up and can now take the lock. Until the lock is taken (bit 0 - * cleared) a competing higher priority thread can steal the lock - * which puts the woken up thread back on the waiters list. - * - * The fast atomic compare exchange based acquire and release is only - * possible when bit 0 and 1 of lock->owner are 0. - */ -#define RT_MUTEX_OWNER_PENDING 1UL -#define RT_MUTEX_HAS_WAITERS 2UL -#define RT_MUTEX_OWNER_MASKALL 3UL - -#define rt_mutex_owner(lock) \ -({ \ - typecheck(struct rt_mutex *,(lock)); \ - ((struct task_struct *)((unsigned long)((lock)->owner) & ~RT_MUTEX_OWNER_MASKALL)); \ -}) - -#define rt_mutex_real_owner(lock) \ -({ \ - typecheck(struct rt_mutex *,(lock)); \ - ((struct task_struct *)((unsigned long)((lock)->owner) & ~RT_MUTEX_HAS_WAITERS)); \ -}) - -#define rt_mutex_owner_pending(lock) \ -({ \ - typecheck(struct rt_mutex *,(lock)); \ - ((unsigned long)((lock)->owner) & RT_MUTEX_OWNER_PENDING); \ -}) - -static inline void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, - unsigned long msk) -{ - unsigned long val = ((unsigned long) owner) | msk; - - if (rt_mutex_has_waiters(lock)) - val |= RT_MUTEX_HAS_WAITERS; - - lock->owner = (struct task_struct *)(val); -} - -static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) -{ - unsigned long owner; - - owner = ((unsigned long) lock->owner) & ~RT_MUTEX_HAS_WAITERS; - lock->owner = (struct task_struct *)(owner); -} - -static inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) -{ - if (!rt_mutex_has_waiters(lock)) - clear_rt_mutex_waiters(lock); -} - -/* - * We can speed up the acquire/release, if the architecture - * supports cmpxchg and if there's no debugging state to be set up - */ -#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) - -# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) - -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) -{ - unsigned long owner, *p = (unsigned long *) &lock->owner; - - do { - owner = *p; - } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); -} - -#else - -# define rt_mutex_cmpxchg(l,c,n) (0) - -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) -{ - unsigned long owner = ((unsigned long) lock->owner)| RT_MUTEX_HAS_WAITERS; - - lock->owner = (struct task_struct *) owner; -} - -#endif - -/* - * PI-futex support (proxy locking functions, etc.): - */ -extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); -extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); -extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, - struct task_struct *proxy_owner); -#endif diff -puN kernel/fork.c~pi-futex-v2 kernel/fork.c --- devel/kernel/fork.c~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/fork.c 2006-05-19 16:01:33.000000000 -0700 @@ -914,6 +914,17 @@ asmlinkage long sys_set_tid_address(int return current->pid; } +static inline void rt_mutex_init_task(struct task_struct *p) +{ +#ifdef CONFIG_RT_MUTEXES + spin_lock_init(&p->pi_lock); + plist_head_init(&p->pi_waiters); + p->pi_blocked_on = NULL; + p->pi_locked_by = NULL; + INIT_LIST_HEAD(&p->pi_lock_chain); +#endif +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. diff -puN kernel/futex.c~pi-futex-v2 kernel/futex.c --- devel/kernel/futex.c~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/futex.c 2006-05-19 16:01:33.000000000 -0700 @@ -48,9 +48,10 @@ #include #include #include -#include #include +#include "rtmutex_common.h" + #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) /* diff -puN kernel/rtmutex.c~pi-futex-v2 kernel/rtmutex.c --- devel/kernel/rtmutex.c~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/rtmutex.c 2006-05-19 16:01:33.000000000 -0700 @@ -12,7 +12,7 @@ #include #include -#include +#include "rtmutex_common.h" #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" @@ -21,6 +21,80 @@ #endif /* + * lock->owner state tracking: + * + * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 + * are used to keep track of the "owner is pending" and "lock has + * waiters" state. + * + * owner bit1 bit0 + * NULL 0 0 lock is free (fast acquire possible) + * NULL 0 1 invalid state + * NULL 1 0 invalid state + * NULL 1 1 invalid state + * taskpointer 0 0 lock is held (fast release possible) + * taskpointer 0 1 task is pending owner + * taskpointer 1 0 lock is held and has waiters + * taskpointer 1 1 task is pending owner and lock has more waiters + * + * Pending ownership is assigned to the top (highest priority) + * waiter of the lock, when the lock is released. The thread is woken + * up and can now take the lock. Until the lock is taken (bit 0 + * cleared) a competing higher priority thread can steal the lock + * which puts the woken up thread back on the waiters list. + * + * The fast atomic compare exchange based acquire and release is only + * possible when bit 0 and 1 of lock->owner are 0. + */ + +static void +rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, + unsigned long mask) +{ + unsigned long val = (unsigned long)owner | mask; + + if (rt_mutex_has_waiters(lock)) + val |= RT_MUTEX_HAS_WAITERS; + + lock->owner = (struct task_struct *)val; +} + +static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) +{ + lock->owner = (struct task_struct *) + ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); +} + +static void fixup_rt_mutex_waiters(struct rt_mutex *lock) +{ + if (!rt_mutex_has_waiters(lock)) + clear_rt_mutex_waiters(lock); +} + +/* + * We can speed up the acquire/release, if the architecture + * supports cmpxchg and if there's no debugging state to be set up + */ +#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) +# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) +static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +{ + unsigned long owner, *p = (unsigned long *) &lock->owner; + + do { + owner = *p; + } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); +} +#else +# define rt_mutex_cmpxchg(l,c,n) (0) +static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +{ + lock->owner = (struct task_struct *) + ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); +} +#endif + +/* * Calculate task priority from the waiter list priority * * Return task->normal_prio when the waiter list is empty or when @@ -87,6 +161,9 @@ static DEFINE_SPINLOCK(pi_conflicts_lock * If 'try' is set, we have to backout if we hit a owner who is * running its own pi chain operation. We go back and take the slow * path via the pi_conflicts_lock. + * + * We put all held locks into a list, via ->pi_lock_chain, and walk + * this list at unlock_pi_chain() time. */ static int lock_pi_chain(struct rt_mutex *act_lock, struct rt_mutex_waiter *waiter, @@ -222,10 +299,15 @@ static void adjust_pi_chain(struct rt_mu plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); - if (waiter && waiter == rt_mutex_top_waiter(lock)) { + if (waiter) waiter->pi_list_entry.prio = waiter->task->prio; - plist_add(&waiter->pi_list_entry, &owner->pi_waiters); + + if (rt_mutex_has_waiters(lock)) { + top_waiter = rt_mutex_top_waiter(lock); + plist_add(&top_waiter->pi_list_entry, + &owner->pi_waiters); } + __rt_mutex_adjust_prio(owner); waiter = owner->pi_blocked_on; @@ -605,7 +687,7 @@ static int remove_waiter(struct rt_mutex /* * Slow path lock function: */ -static int fastcall noinline __sched +static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock __IP_DECL__) @@ -711,7 +793,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, /* * Slow path try-lock function: */ -static inline int fastcall +static inline int rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) { unsigned long flags; @@ -739,7 +821,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo /* * Slow path to release a rt-mutex: */ -static void fastcall noinline __sched +static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { unsigned long flags; @@ -773,7 +855,7 @@ rt_mutex_slowunlock(struct rt_mutex *loc static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, int detect_deadlock, - int fastcall (*slowfn)(struct rt_mutex *lock, int state, + int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock __IP_DECL__)) { @@ -787,7 +869,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock, - int fastcall (*slowfn)(struct rt_mutex *lock, int state, + int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock __IP_DECL__)) { @@ -800,7 +882,7 @@ rt_mutex_timed_fastlock(struct rt_mutex static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, - int fastcall (*slowfn)(struct rt_mutex *lock __IP_DECL__)) + int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) { if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); @@ -811,7 +893,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lo static inline void rt_mutex_fastunlock(struct rt_mutex *lock, - void fastcall (*slowfn)(struct rt_mutex *lock)) + void (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, current, NULL))) rt_mutex_deadlock_account_unlock(current); @@ -824,7 +906,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc * * @lock: the rt_mutex to be locked */ -void fastcall __sched rt_mutex_lock(struct rt_mutex *lock) +void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); @@ -843,7 +925,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); * -EINTR when interrupted by a signal * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ -int fastcall __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, +int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock) { might_sleep(); @@ -868,7 +950,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup * -ETIMEOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ -int fastcall +int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock) { @@ -887,7 +969,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); * * Returns 1 on success and 0 on contention */ -int fastcall __sched rt_mutex_trylock(struct rt_mutex *lock) +int __sched rt_mutex_trylock(struct rt_mutex *lock) { return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); } @@ -898,7 +980,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); * * @lock: the rt_mutex to be unlocked */ -void fastcall __sched rt_mutex_unlock(struct rt_mutex *lock) +void __sched rt_mutex_unlock(struct rt_mutex *lock) { rt_mutex_fastunlock(lock, rt_mutex_slowunlock); } @@ -912,7 +994,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock); * use of the mutex is forbidden. The mutex must not be locked when * this function is called. */ -void fastcall rt_mutex_destroy(struct rt_mutex *lock) +void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); #ifdef CONFIG_DEBUG_RT_MUTEXES @@ -931,7 +1013,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); * * Initializing of a locked rt lock is not allowed */ -void fastcall __rt_mutex_init(struct rt_mutex *lock, const char *name) +void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; spin_lock_init(&lock->wait_lock); diff -puN /dev/null kernel/rtmutex_common.h --- /dev/null 2006-05-19 15:26:20.261540500 -0700 +++ devel-akpm/kernel/rtmutex_common.h 2006-05-19 16:01:33.000000000 -0700 @@ -0,0 +1,123 @@ +/* + * RT Mutexes: blocking mutual exclusion locks with PI support + * + * started by Ingo Molnar and Thomas Gleixner: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner + * + * This file contains the private data structure and API definitions. + */ + +#ifndef __KERNEL_RTMUTEX_COMMON_H +#define __KERNEL_RTMUTEX_COMMON_H + +#include + +/* + * The rtmutex in kernel tester is independent of rtmutex debugging. We + * call schedule_rt_mutex_test() instead of schedule() for the tasks which + * belong to the tester. That way we can delay the wakeup path of those + * threads to provoke lock stealing and testing of complex boosting scenarios. + */ +#ifdef CONFIG_RT_MUTEX_TESTER + +extern void schedule_rt_mutex_test(struct rt_mutex *lock); + +#define schedule_rt_mutex(_lock) \ + do { \ + if (!(current->flags & PF_MUTEX_TESTER)) \ + schedule(); \ + else \ + schedule_rt_mutex_test(_lock); \ + } while (0) + +#else +# define schedule_rt_mutex(_lock) schedule() +#endif + +/* + * This is the control structure for tasks blocked on a rt_mutex, + * which is allocated on the kernel stack on of the blocked task. + * + * @list_entry: pi node to enqueue into the mutex waiters list + * @pi_list_entry: pi node to enqueue into the mutex owner waiters list + * @task: task reference to the blocked task + */ +struct rt_mutex_waiter { + struct plist_node list_entry; + struct plist_node pi_list_entry; + struct task_struct *task; + struct rt_mutex *lock; +#ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + pid_t deadlock_task_pid; + struct rt_mutex *deadlock_lock; +#endif +}; + +/* + * Various helpers to access the waiters-plist: + */ +static inline int rt_mutex_has_waiters(struct rt_mutex *lock) +{ + return !plist_head_empty(&lock->wait_list); +} + +static inline struct rt_mutex_waiter * +rt_mutex_top_waiter(struct rt_mutex *lock) +{ + struct rt_mutex_waiter *w; + + w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, + list_entry); + BUG_ON(w->lock != lock); + + return w; +} + +static inline int task_has_pi_waiters(struct task_struct *p) +{ + return !plist_head_empty(&p->pi_waiters); +} + +static inline struct rt_mutex_waiter * +task_top_pi_waiter(struct task_struct *p) +{ + return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter, + pi_list_entry); +} + +/* + * lock->owner state tracking: + */ +#define RT_MUTEX_OWNER_PENDING 1UL +#define RT_MUTEX_HAS_WAITERS 2UL +#define RT_MUTEX_OWNER_MASKALL 3UL + +static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) +{ + return (struct task_struct *) + ((unsigned long)((lock)->owner) & ~RT_MUTEX_OWNER_MASKALL); +} + +static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock) +{ + return (struct task_struct *) + ((unsigned long)((lock)->owner) & ~RT_MUTEX_HAS_WAITERS); +} + +static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock) +{ + return ((unsigned long)((lock)->owner) & RT_MUTEX_OWNER_PENDING); +} + +/* + * PI-futex support (proxy locking functions, etc.): + */ +extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); +extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, + struct task_struct *proxy_owner); +#endif diff -puN kernel/rtmutex-debug.c~pi-futex-v2 kernel/rtmutex-debug.c --- devel/kernel/rtmutex-debug.c~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/rtmutex-debug.c 2006-05-19 16:01:33.000000000 -0700 @@ -27,7 +27,7 @@ #include #include -#include +#include "rtmutex_common.h" #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" diff -puN kernel/rtmutex-debug.h~pi-futex-v2 kernel/rtmutex-debug.h --- devel/kernel/rtmutex-debug.h~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/rtmutex-debug.h 2006-05-19 16:01:33.000000000 -0700 @@ -9,8 +9,6 @@ * This file contains macros used solely by rtmutex.c. Debug version. */ -#include - #define __IP_DECL__ , unsigned long ip #define __IP__ , ip #define __RET_IP__ , (unsigned long)__builtin_return_address(0) diff -puN kernel/sched.c~pi-futex-v2 kernel/sched.c --- devel/kernel/sched.c~pi-futex-v2 2006-05-19 16:01:33.000000000 -0700 +++ devel-akpm/kernel/sched.c 2006-05-19 16:01:33.000000000 -0700 @@ -3961,7 +3961,7 @@ static void __setscheduler(struct task_s p->rt_priority = prio; p->normal_prio = normal_prio(p); - /* we are holding p->pi_list already */ + /* we are holding p->pi_lock already */ p->prio = rt_mutex_getprio(p); /* * SCHED_BATCH tasks are treated as perpetual CPU hogs: _