From: Ingo Molnar RCU task-struct freeing can call free_uid(), which is taking uidhash_lock - while other users of uidhash_lock are softirq-unsafe. This bug was found by the lock validator i'm working on: ============================ [ BUG: illegal lock usage! ] ---------------------------- illegal {enabled-softirqs} -> {used-in-softirq} usage. swapper/0 [HC0[0]:SC1[2]:HE1:SE0] takes {uidhash_lock [u:25]}, at: [] _atomic_dec_and_lock+0x48/0x80 {enabled-softirqs} state was registered at: [] _write_unlock_bh+0xd/0x10 hardirqs last enabled at: [] kmem_cache_free+0x78/0xb0 softirqs last enabled at: [] copy_process+0x2ca/0xe80 other info that might help in debugging this: ------------------------------ | showing all locks held by: | (swapper/0 [c30d8790, 140]): ------------------------------ [] show_trace+0xd/0x10 [] dump_stack+0x17/0x20 [] print_usage_bug+0x1e1/0x200 [] mark_lock+0x259/0x290 [] debug_lock_chain_spin+0x465/0x10f0 [] _raw_spin_lock+0x2d/0x90 [] _spin_lock+0x8/0x10 [] _atomic_dec_and_lock+0x48/0x80 [] free_uid+0x14/0x70 [] __put_task_struct+0x38/0x130 [] __put_task_struct_cb+0xd/0x10 [] __rcu_process_callbacks+0x81/0x180 [] rcu_process_callbacks+0x30/0x60 [] tasklet_action+0x54/0xd0 [] __do_softirq+0x87/0x120 [] do_softirq+0x69/0xb0 ======================= [] irq_exit+0x39/0x50 [] smp_apic_timer_interrupt+0x4c/0x50 [] apic_timer_interrupt+0x27/0x2c [] cpu_idle+0x68/0x80 [] start_secondary+0x29e/0x480 [<00000000>] 0x0 [] 0xc30d9fb4 the fix is to always take the uidhash_spinlock in a softirq-safe manner. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- kernel/user.c | 25 +++++++++++++++++-------- 1 files changed, 17 insertions(+), 8 deletions(-) diff -puN kernel/user.c~fix-uidhash_lock-rcu-deadlock kernel/user.c --- devel/kernel/user.c~fix-uidhash_lock-rcu-deadlock 2006-01-25 11:28:04.000000000 -0800 +++ devel-akpm/kernel/user.c 2006-01-25 11:28:04.000000000 -0800 @@ -13,6 +13,7 @@ #include #include #include +#include /* * UID task count cache, to get fast user lookup in "alloc_uid" @@ -27,6 +28,12 @@ static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; + +/* + * The uidhash_lock is mostly taken from process context, but it is + * occasionally also taken from softirq/tasklet context, when + * task-structs get RCU-freed. Hence all locking must be softirq-safe. + */ static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { @@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); ret = uid_hash_find(uid, uidhashentry(uid)); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); return ret; } void free_uid(struct user_struct *up) { + local_bh_disable(); if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); key_put(up->uid_keyring); @@ -98,6 +106,7 @@ void free_uid(struct user_struct *up) kmem_cache_free(uid_cachep, up); spin_unlock(&uidhash_lock); } + local_bh_enable(); } struct user_struct * alloc_uid(uid_t uid) @@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid struct list_head *hashent = uidhashentry(uid); struct user_struct *up; - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); up = uid_hash_find(uid, hashent); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); if (!up) { struct user_struct *new; @@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); @@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid uid_hash_insert(new, hashent); up = new; } - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); } return up; @@ -183,9 +192,9 @@ static int __init uid_cache_init(void) INIT_LIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(0)); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); return 0; } _