===== arch/ia64/Kconfig 1.48 vs edited ===== --- 1.48/arch/ia64/Kconfig Wed Oct 15 01:35:33 2003 +++ edited/arch/ia64/Kconfig Mon Dec 8 14:36:26 2003 @@ -661,6 +661,15 @@ and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. +config IA64_ALLOW_NMI + bool "Allow non-maskable interrupts" + help + The normal ia64 irq enable/disable code prevents even non-maskable + interrupts from occuring, which can be a problem for kernel + debuggers, watchdogs, and profilers. Say Y here if you're interested + in NMIs and don't mind the small performance penalty this option + imposes. + config DEBUG_INFO bool "Compile the kernel with debug info" depends on DEBUG_KERNEL ===== include/asm-ia64/gcc_intrin.h 1.2 vs edited ===== --- 1.2/include/asm-ia64/gcc_intrin.h Tue Aug 19 23:13:39 2003 +++ edited/include/asm-ia64/gcc_intrin.h Mon Dec 8 14:33:39 2003 @@ -572,6 +572,14 @@ } \ }) +#ifdef CONFIG_IA64_ALLOW_NMI +#define ia64_intrin_local_irq_restore(x) \ +do { \ + asm volatile ("mov cr.tpr=%0;;" \ + "srlz.d" \ + :: "r"((x)) : "memory"); \ +} while (0) +#else #define ia64_intrin_local_irq_restore(x) \ do { \ asm volatile (" cmp.ne p6,p7=%0,r0;;" \ @@ -580,5 +588,6 @@ "(p6) srlz.d" \ :: "r"((x)) : "p6", "p7", "memory"); \ } while (0) +#endif #endif /* _ASM_IA64_GCC_INTRIN_H */ ===== include/asm-ia64/system.h 1.42 vs edited ===== --- 1.42/include/asm-ia64/system.h Tue Aug 19 23:13:40 2003 +++ edited/include/asm-ia64/system.h Mon Dec 8 14:33:19 2003 @@ -111,6 +111,56 @@ */ /* For spinlocks etc */ +#ifdef CONFIG_IA64_ALLOW_NMI + +#define IA64_TPR_MMI_BIT (1UL<<16) +#define IA64_TPR_MMI_MASK 0xffffffffffeffffUL + +#define __local_irq_save(x) \ +do { \ + (x) = ia64_getreg(_IA64_REG_CR_TPR); \ + ia64_stop(); \ + (x) = (x) | IA64_TPR_MMI_BIT; \ + ia64_stop(); \ + ia64_setreg(_IA64_REG_CR_TPR, (x)); \ + ia64_srlz_d(); \ +} while (0) + +#define __local_irq_disable() \ +do { \ + u64 __tpr; \ + ia64_stop(); \ + __tpr = ia64_getreg(_IA64_REG_CR_TPR); \ + ia64_stop(); \ + __tpr = __tpr | IA64_TPR_MMI_BIT; \ + ia64_setreg(_IA64_REG_CR_TPR, __tpr); \ + ia64_srlz_d(); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x)) + +#define local_irq_enable() \ +do { \ + u64 __tpr; \ + __tpr = ia64_getreg(_IA64_REG_CR_TPR); \ + ia64_stop(); \ + __tpr &= IA64_TPR_MMI_MASK; \ + ia64_stop(); \ + ia64_setreg(_IA64_REG_CR_TPR, __tpr); \ + ia64_srlz_d(); \ +} while (0) + +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_CR_TPR)) + +#define irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_TPR_MMI_BIT) != 0; \ +}) + +#else /* !CONFIG_IA64_ALLOW_NMI */ + /* clearing psr.i is implicitly serialized (visible by next insn) */ /* setting psr.i requires data serialization */ #define __local_irq_save(x) \ @@ -128,6 +178,18 @@ #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) +#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) + +#define irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ +}) + +#endif /* CONFIG_IA64_ALLOW_NMI */ + #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; @@ -161,16 +223,6 @@ # define local_irq_disable() __local_irq_disable() # define local_irq_restore(x) __local_irq_restore(x) #endif /* !CONFIG_IA64_DEBUG_IRQ */ - -#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) - -#define irqs_disabled() \ -({ \ - unsigned long __ia64_id_flags; \ - local_save_flags(__ia64_id_flags); \ - (__ia64_id_flags & IA64_PSR_I) == 0; \ -}) #ifdef __KERNEL__