i386: i386 add idle notifier From: Stephane Eranian Hello, Here is a patch that adds an idle notifier to the i386 tree. The idle notifier functionalities and implementation are identical to the x86_64 idle notifier. We use the idle notifier in the context of perfmon. The patch is against Andi Kleen's x86_64-2.6.19-rc6-061128-1.bz2 kernel. It may apply to other kernels but it needs some updates to poll_idle() and default_idle() to work correctly. changelog: - add an idle notifier mechanism to i386 tree signed-off-by: stephane eranian Signed-off-by: Andi Kleen --- arch/i386/kernel/apic.c | 4 +++ arch/i386/kernel/cpu/mcheck/p4.c | 2 + arch/i386/kernel/irq.c | 3 ++ arch/i386/kernel/process.c | 43 +++++++++++++++++++++++++++++++++++++++ arch/i386/kernel/smp.c | 2 + include/asm-i386/idle.h | 14 ++++++++++++ 6 files changed, 68 insertions(+) Index: linux/arch/i386/kernel/apic.c =================================================================== --- linux.orig/arch/i386/kernel/apic.c +++ linux/arch/i386/kernel/apic.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -1255,6 +1256,7 @@ fastcall void smp_apic_timer_interrupt(s * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ + exit_idle(); irq_enter(); smp_local_timer_interrupt(); irq_exit(); @@ -1305,6 +1307,7 @@ fastcall void smp_spurious_interrupt(str { unsigned long v; + exit_idle(); irq_enter(); /* * Check if this really is a spurious interrupt and ACK it @@ -1329,6 +1332,7 @@ fastcall void smp_error_interrupt(struct { unsigned long v, v1; + exit_idle(); irq_enter(); /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); Index: linux/arch/i386/kernel/cpu/mcheck/p4.c =================================================================== --- linux.orig/arch/i386/kernel/cpu/mcheck/p4.c +++ linux/arch/i386/kernel/cpu/mcheck/p4.c @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -59,6 +60,7 @@ static void (*vendor_thermal_interrupt)( fastcall void smp_thermal_interrupt(struct pt_regs *regs) { + exit_idle(); irq_enter(); vendor_thermal_interrupt(regs); irq_exit(); Index: linux/arch/i386/kernel/irq.c =================================================================== --- linux.orig/arch/i386/kernel/irq.c +++ linux/arch/i386/kernel/irq.c @@ -19,6 +19,8 @@ #include #include +#include + DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; EXPORT_PER_CPU_SYMBOL(irq_stat); @@ -61,6 +63,7 @@ fastcall unsigned int do_IRQ(struct pt_r union irq_ctx *curctx, *irqctx; u32 *isp; #endif + exit_idle(); if (unlikely((unsigned)irq >= NR_IRQS)) { printk(KERN_EMERG "%s: cannot handle IRQ %d\n", Index: linux/arch/i386/kernel/process.c =================================================================== --- linux.orig/arch/i386/kernel/process.c +++ linux/arch/i386/kernel/process.c @@ -48,6 +48,7 @@ #include #include #include +#include #ifdef CONFIG_MATH_EMULATION #include #endif @@ -80,6 +81,46 @@ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); static DEFINE_PER_CPU(unsigned int, cpu_idle_state); +static ATOMIC_NOTIFIER_HEAD(idle_notifier); + +void idle_notifier_register(struct notifier_block *n) +{ + atomic_notifier_chain_register(&idle_notifier, n); +} +EXPORT_SYMBOL_GPL(idle_notifier_register); + +void idle_notifier_unregister(struct notifier_block *n) +{ + atomic_notifier_chain_unregister(&idle_notifier, n); +} +EXPORT_SYMBOL(idle_notifier_unregister); + +static DEFINE_PER_CPU(volatile unsigned long, idle_state); + +void enter_idle(void) +{ + /* needs to be atomic w.r.t. interrupts, not against other CPUs */ + __set_bit(0, &__get_cpu_var(idle_state)); + atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); +} + +void __exit_idle(void) +{ + /* needs to be atomic w.r.t. interrupts, not against other CPUs */ + if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0) + return; + atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); +} + +/* Called from interrupts to signify idle end */ +void exit_idle(void) +{ + /* idle loop has pid 0 */ + if (current->pid) + return; + __exit_idle(); +} + void disable_hlt(void) { hlt_counter++; @@ -184,7 +225,9 @@ void cpu_idle(void) play_dead(); __get_cpu_var(irq_stat).idle_timestamp = jiffies; + enter_idle(); idle(); + __exit_idle(); } preempt_enable_no_resched(); schedule(); Index: linux/arch/i386/kernel/smp.c =================================================================== --- linux.orig/arch/i386/kernel/smp.c +++ linux/arch/i386/kernel/smp.c @@ -23,6 +23,7 @@ #include #include +#include #include /* @@ -629,6 +630,7 @@ fastcall void smp_call_function_interrup /* * At this point the info structure may be out of scope unless wait==1 */ + exit_idle(); irq_enter(); (*func)(info); irq_exit(); Index: linux/include/asm-i386/idle.h =================================================================== --- /dev/null +++ linux/include/asm-i386/idle.h @@ -0,0 +1,14 @@ +#ifndef _ASM_I386_IDLE_H +#define _ASM_I386_IDLE_H 1 + +#define IDLE_START 1 +#define IDLE_END 2 + +struct notifier_block; +void idle_notifier_register(struct notifier_block *n); +void idle_notifier_unregister(struct notifier_block *n); + +void exit_idle(void); +void enter_idle(void); + +#endif