From: Keshavamurthy Anil S Based on some feedback from Oleg Nesterov, I have made few changes to previously posted patch. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton --- arch/powerpc/kernel/kprobes.c | 3 --- arch/x86_64/kernel/kprobes.c | 4 ++-- kernel/kprobes.c | 32 ++++++++++++++++++-------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff -puN arch/powerpc/kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix arch/powerpc/kernel/kprobes.c --- 25/arch/powerpc/kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix Wed Dec 14 16:11:26 2005 +++ 25-akpm/arch/powerpc/kernel/kprobes.c Wed Dec 14 16:11:26 2005 @@ -35,7 +35,6 @@ #include #include -static DECLARE_MUTEX(kprobe_mutex); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -54,9 +53,7 @@ int __kprobes arch_prepare_kprobe(struct /* insn must be on a special executable page on ppc64 */ if (!ret) { - down(&kprobe_mutex); p->ainsn.insn = get_insn_slot(); - up(&kprobe_mutex); if (!p->ainsn.insn) ret = -ENOMEM; } diff -puN arch/x86_64/kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix arch/x86_64/kernel/kprobes.c --- 25/arch/x86_64/kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix Wed Dec 14 16:11:26 2005 +++ 25-akpm/arch/x86_64/kernel/kprobes.c Wed Dec 14 16:11:26 2005 @@ -43,7 +43,7 @@ #include void jprobe_return_end(void); -void __kprobes arch_copy_kprobe(struct kprobe *p); +static void __kprobes arch_copy_kprobe(struct kprobe *p); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); @@ -180,7 +180,7 @@ static inline s32 *is_riprel(u8 *insn) return NULL; } -void __kprobes arch_copy_kprobe(struct kprobe *p) +static void __kprobes arch_copy_kprobe(struct kprobe *p) { s32 *ripdisp; memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); diff -puN kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix kernel/kprobes.c --- 25/kernel/kprobes.c~kprobes-changed-from-using-spinlock-to-mutex-fix Wed Dec 14 16:11:26 2005 +++ 25-akpm/kernel/kprobes.c Wed Dec 14 16:11:26 2005 @@ -431,7 +431,7 @@ static int __kprobes register_aggr_kprob copy_kprobe(old_p, p); ret = add_new_kprobe(old_p, p); } else { - ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); + ap = kcalloc(1, sizeof(struct kprobe), GFP_KERNEL); if (!ap) return -ENOMEM; add_aggr_kprobe(ap, old_p); @@ -491,7 +491,8 @@ out: void __kprobes unregister_kprobe(struct kprobe *p) { struct module *mod; - struct kprobe *old_p, *cleanup_p; + struct kprobe *old_p, *list_p; + int cleanup_p; down(&kprobe_mutex); old_p = get_kprobe(p->addr); @@ -499,22 +500,25 @@ void __kprobes unregister_kprobe(struct up(&kprobe_mutex); return; } - - if ((old_p->pre_handler == aggr_pre_handler) && + if (p != old_p) { + list_for_each_entry_rcu(list_p, &old_p->list, list) + if (list_p == p) + /* kprobe p is a valid probe */ + goto valid_p; + up(&kprobe_mutex); + return; + } +valid_p: + if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && (p->list.next == &old_p->list) && - (p->list.prev == &old_p->list)) { - /* Only one element in the aggregate list */ + (p->list.prev == &old_p->list))) { + /* Only probe on the hash list */ arch_disarm_kprobe(p); hlist_del_rcu(&old_p->hlist); - cleanup_p = old_p; - } else if (old_p == p) { - /* Only one kprobe element in the hash list */ - arch_disarm_kprobe(p); - hlist_del_rcu(&p->hlist); - cleanup_p = p; + cleanup_p = 1; } else { list_del_rcu(&p->list); - cleanup_p = NULL; + cleanup_p = 0; } up(&kprobe_mutex); @@ -524,7 +528,7 @@ void __kprobes unregister_kprobe(struct module_put(mod); if (cleanup_p) { - if (cleanup_p->pre_handler == aggr_pre_handler) { + if (p != old_p) { list_del_rcu(&p->list); kfree(old_p); } _