From: Ingo Molnar Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar Acked-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton --- arch/powerpc/kernel/kprobes.c | 4 ++-- arch/x86_64/kernel/kprobes.c | 4 ++-- include/linux/kprobes.h | 3 ++- kernel/kprobes.c | 14 +++++++------- 4 files changed, 13 insertions(+), 12 deletions(-) diff -puN arch/powerpc/kernel/kprobes.c~sem2mutex-kprobes arch/powerpc/kernel/kprobes.c --- 25/arch/powerpc/kernel/kprobes.c~sem2mutex-kprobes Fri Jan 13 16:12:56 2006 +++ 25-akpm/arch/powerpc/kernel/kprobes.c Fri Jan 13 16:12:56 2006 @@ -82,9 +82,9 @@ void __kprobes arch_disarm_kprobe(struct void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); } static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff -puN arch/x86_64/kernel/kprobes.c~sem2mutex-kprobes arch/x86_64/kernel/kprobes.c --- 25/arch/x86_64/kernel/kprobes.c~sem2mutex-kprobes Fri Jan 13 16:12:56 2006 +++ 25-akpm/arch/x86_64/kernel/kprobes.c Fri Jan 13 16:12:56 2006 @@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); } static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff -puN include/linux/kprobes.h~sem2mutex-kprobes include/linux/kprobes.h --- 25/include/linux/kprobes.h~sem2mutex-kprobes Fri Jan 13 16:12:56 2006 +++ 25-akpm/include/linux/kprobes.h Fri Jan 13 16:12:56 2006 @@ -36,6 +36,7 @@ #include #include #include +#include #ifdef CONFIG_KPROBES #include @@ -152,7 +153,7 @@ struct kretprobe_instance { }; extern spinlock_t kretprobe_lock; -extern struct semaphore kprobe_mutex; +extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff -puN kernel/kprobes.c~sem2mutex-kprobes kernel/kprobes.c --- 25/kernel/kprobes.c~sem2mutex-kprobes Fri Jan 13 16:12:56 2006 +++ 25-akpm/kernel/kprobes.c Fri Jan 13 16:12:56 2006 @@ -48,7 +48,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; -DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; @@ -477,7 +477,7 @@ static int __kprobes __register_kprobe(s } p->nmissed = 0; - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (old_p) { ret = register_aggr_kprobe(old_p, p); @@ -494,7 +494,7 @@ static int __kprobes __register_kprobe(s arch_arm_kprobe(p); out: - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); if (ret && probed_mod) module_put(probed_mod); @@ -513,10 +513,10 @@ void __kprobes unregister_kprobe(struct struct kprobe *old_p, *list_p; int cleanup_p; - down(&kprobe_mutex); + mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (unlikely(!old_p)) { - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); return; } if (p != old_p) { @@ -524,7 +524,7 @@ void __kprobes unregister_kprobe(struct if (list_p == p) /* kprobe p is a valid probe */ goto valid_p; - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); return; } valid_p: @@ -540,7 +540,7 @@ valid_p: cleanup_p = 0; } - up(&kprobe_mutex); + mutex_unlock(&kprobe_mutex); synchronize_sched(); if (p->mod_refcounted && _