From: Ingo Molnar Introduce trylock_kernel(), to be used by the early init code to acquire the BKL in an atomic way. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- include/linux/smp_lock.h | 1 + init/main.c | 13 ++++++++----- lib/kernel_lock.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 5 deletions(-) diff -puN include/linux/smp_lock.h~add-trylock_kernel include/linux/smp_lock.h --- devel/include/linux/smp_lock.h~add-trylock_kernel 2006-01-18 18:51:58.000000000 -0800 +++ devel-akpm/include/linux/smp_lock.h 2006-01-18 18:51:58.000000000 -0800 @@ -39,6 +39,7 @@ static inline int reacquire_kernel_lock( } extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); +extern int __lockfunc trylock_kernel(void); extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); #else diff -puN init/main.c~add-trylock_kernel init/main.c --- devel/init/main.c~add-trylock_kernel 2006-01-18 18:51:58.000000000 -0800 +++ devel-akpm/init/main.c 2006-01-18 18:51:58.000000000 -0800 @@ -442,11 +442,14 @@ asmlinkage void __init start_kernel(void { char * command_line; extern struct kernel_param __start___param[], __stop___param[]; -/* - * Interrupts are still disabled. Do necessary setups, then - * enable them - */ - lock_kernel(); + + /* + * Interrupts are still disabled. Do necessary setups, then + * enable them. This is the first time we take the BKL, so + * it must succeed: + */ + if (!trylock_kernel()) + WARN_ON(1); page_address_init(); printk(KERN_NOTICE); printk(linux_banner); diff -puN lib/kernel_lock.c~add-trylock_kernel lib/kernel_lock.c --- devel/lib/kernel_lock.c~add-trylock_kernel 2006-01-18 18:51:58.000000000 -0800 +++ devel-akpm/lib/kernel_lock.c 2006-01-18 18:51:58.000000000 -0800 @@ -76,6 +76,23 @@ void __lockfunc lock_kernel(void) task->lock_depth = depth; } +int __lockfunc trylock_kernel(void) +{ + struct task_struct *task = current; + int depth = task->lock_depth + 1; + + if (likely(!depth)) { + if (unlikely(down_trylock(&kernel_sem))) + return 0; + else + __acquire(kernel_sem); + } + + task->lock_depth = depth; + return 1; +} + + void __lockfunc unlock_kernel(void) { struct task_struct *task = current; @@ -194,6 +211,22 @@ void __lockfunc lock_kernel(void) current->lock_depth = depth; } +int __lockfunc trylock_kernel(void) +{ + struct task_struct *task = current; + int depth = task->lock_depth + 1; + + if (likely(!depth)) { + if (unlikely(!spin_trylock(&kernel_flag))) + return 0; + else + __acquire(kernel_sem); + } + + task->lock_depth = depth; + return 1; +} + void __lockfunc unlock_kernel(void) { BUG_ON(current->lock_depth < 0); @@ -204,5 +237,6 @@ void __lockfunc unlock_kernel(void) #endif EXPORT_SYMBOL(lock_kernel); +/* we do not export trylock_kernel(). BKL code should shrink :-) */ EXPORT_SYMBOL(unlock_kernel); _