===== arch/ia64/Kconfig 1.59 vs edited ===== --- 1.59/arch/ia64/Kconfig Fri Jan 23 17:34:02 2004 +++ edited/arch/ia64/Kconfig Wed Feb 4 09:54:32 2004 @@ -653,6 +653,15 @@ and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. +config IA64_ALLOW_NMI + bool "Allow non-maskable interrupts" + help + The normal ia64 irq enable/disable code prevents even non-maskable + interrupts from occuring, which can be a problem for kernel + debuggers, watchdogs, and profilers. Say Y here if you're interested + in NMIs and don't mind the small performance penalty this option + imposes. + config DEBUG_INFO bool "Compile the kernel with debug info" depends on DEBUG_KERNEL ===== arch/ia64/kernel/entry.S 1.55 vs edited ===== --- 1.55/arch/ia64/kernel/entry.S Mon Jan 26 16:20:58 2004 +++ edited/arch/ia64/kernel/entry.S Thu Feb 5 11:03:42 2004 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -193,7 +194,7 @@ (p6) br.cond.dpnt .map ;; .done: -(p6) ssm psr.ic // if we we had to map, renable the psr.ic bit FIRST!!! +(p6) irq_enable // if we we had to map, renable the psr.ic bit FIRST!!! ;; (p6) srlz.d ld8 sp=[r21] // load kernel stack pointer of new task @@ -209,7 +210,7 @@ br.ret.sptk.many rp // boogie on out in new context .map: - rsm psr.ic // interrupts (psr.i) are already disabled here + irq_disable(r25) // interrupts (psr.i) are already disabled here movl r25=PAGE_KERNEL ;; srlz.d @@ -664,9 +665,9 @@ * user- or fsys-mode, hence we disable interrupts early on: */ #ifdef CONFIG_PREEMPT - rsm psr.i // disable interrupts + irq_disable(r20) // disable interrupts #else -(pUStk) rsm psr.i +(pUStk) irq_disable(r20) #endif cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk @@ -777,9 +778,9 @@ * user- or fsys-mode, hence we disable interrupts early on: */ #ifdef CONFIG_PREEMPT - rsm psr.i // disable interrupts + irq_disable(r20) // disable interrupts #else -(pUStk) rsm psr.i +(pUStk) irq_disable(r20) #endif cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk @@ -1048,11 +1049,11 @@ (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 ;; (pKStk) st4 [r20]=r21 - ssm psr.i // enable interrupts + irq_enable // enable interrupts #endif br.call.spnt.many rp=schedule .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 - rsm psr.i // disable interrupts + irq_disable(r20) // disable interrupts ;; #ifdef CONFIG_PREEMPT (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 ===== arch/ia64/kernel/fsys.S 1.22 vs edited ===== --- 1.22/arch/ia64/kernel/fsys.S Sat Nov 15 13:53:22 2003 +++ edited/arch/ia64/kernel/fsys.S Thu Feb 5 10:48:37 2004 @@ -388,7 +388,7 @@ mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) ;; - rsm psr.i // mask interrupt delivery + irq_disable // mask interrupt delivery mov ar.ccv=0 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP @@ -461,11 +461,10 @@ #ifdef CONFIG_SMP st4.rel [r31]=r0 // release the lock #endif - ssm psr.i + irq_enable cmp.ne p9,p0=r8,r0 // check for bad HOW value ;; - srlz.d // ensure psr.i is set again mov r18=0 // i must not leak kernel bits... (p9) br.spnt.few .fail_einval // bail out for bad HOW value @@ -485,17 +484,13 @@ #ifdef CONFIG_SMP st4.rel [r31]=r0 // release the lock #endif - ssm psr.i - ;; - srlz.d + irq_enable br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall #ifdef CONFIG_SMP .lock_contention: /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ - ssm psr.i - ;; - srlz.d + irq_enable br.sptk.many fsys_fallback_syscall #endif END(fsys_rt_sigprocmask) @@ -597,7 +592,7 @@ mov ar.rsc=0x3 // set eager mode, pl 0, little-endian, loadrs=0 br.call.sptk.many b7=ia64_syscall_setup ;; - ssm psr.i + irq_enable movl r2=ia64_ret_from_syscall ;; mov rp=r2 // set the real return addr ===== include/asm-ia64/gcc_intrin.h 1.2 vs edited ===== --- 1.2/include/asm-ia64/gcc_intrin.h Tue Aug 19 23:13:39 2003 +++ edited/include/asm-ia64/gcc_intrin.h Wed Feb 4 09:54:33 2004 @@ -572,6 +572,14 @@ } \ }) +#ifdef CONFIG_IA64_ALLOW_NMI +#define ia64_intrin_local_irq_restore(x) \ +do { \ + asm volatile ("mov cr.tpr=%0;;" \ + "srlz.d" \ + :: "r"((x)) : "memory"); \ +} while (0) +#else #define ia64_intrin_local_irq_restore(x) \ do { \ asm volatile (" cmp.ne p6,p7=%0,r0;;" \ @@ -580,5 +588,6 @@ "(p6) srlz.d" \ :: "r"((x)) : "p6", "p7", "memory"); \ } while (0) +#endif #endif /* _ASM_IA64_GCC_INTRIN_H */ ===== include/asm-ia64/system.h 1.43 vs edited ===== --- 1.43/include/asm-ia64/system.h Wed Dec 10 17:33:48 2003 +++ edited/include/asm-ia64/system.h Thu Feb 5 16:51:23 2004 @@ -114,6 +114,62 @@ */ /* For spinlocks etc */ +#ifdef CONFIG_IA64_ALLOW_NMI + +#define IA64_TPR_MMI_BIT (1UL<<16) +#define IA64_TPR_MMI_MASK (~(IA64_TPR_MMI_BIT)) + +#define __local_irq_save(x) \ +do { \ + (x) = ia64_getreg(_IA64_REG_CR_TPR); \ + ia64_stop(); \ + (x) = (x) | IA64_TPR_MMI_BIT; \ + ia64_stop(); \ + ia64_setreg(_IA64_REG_CR_TPR, (x)); \ + ia64_srlz_d(); \ +} while (0) + +#define __local_irq_disable() \ +do { \ + u64 __tpr; \ + ia64_stop(); \ + __tpr = ia64_getreg(_IA64_REG_CR_TPR); \ + ia64_stop(); \ + __tpr = __tpr | IA64_TPR_MMI_BIT; \ + ia64_setreg(_IA64_REG_CR_TPR, __tpr); \ + ia64_srlz_d(); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x)) + +#define local_irq_enable() \ +do { \ + u64 __tpr, __psr; \ + __tpr = ia64_getreg(_IA64_REG_CR_TPR); \ + __psr = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + __tpr &= IA64_TPR_MMI_MASK; \ + ia64_stop(); \ + ia64_setreg(_IA64_REG_CR_TPR, 0); \ + ia64_srlz_d(); \ + if (!(__psr & IA64_PSR_I)) { \ + printk("psr.i was disabled, enabling\n"); \ + ia64_ssm(IA64_PSR_I); \ + ia64_srlz_d(); \ + } \ +} while (0) + +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_CR_TPR)) + +#define irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_TPR_MMI_BIT) != 0; \ +}) + +#else /* !CONFIG_IA64_ALLOW_NMI */ + /* clearing psr.i is implicitly serialized (visible by next insn) */ /* setting psr.i requires data serialization */ #define __local_irq_save(x) \ @@ -131,6 +187,18 @@ #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) +#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) + +#define irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ +}) + +#endif /* CONFIG_IA64_ALLOW_NMI */ + #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; @@ -165,16 +233,6 @@ # define local_irq_restore(x) __local_irq_restore(x) #endif /* !CONFIG_IA64_DEBUG_IRQ */ -#define local_irq_enable() ({ ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) -#define local_save_flags(flags) ((flags) = ia64_getreg(_IA64_REG_PSR)) - -#define irqs_disabled() \ -({ \ - unsigned long __ia64_id_flags; \ - local_save_flags(__ia64_id_flags); \ - (__ia64_id_flags & IA64_PSR_I) == 0; \ -}) - #ifdef __KERNEL__ #define prepare_to_switch() do { } while(0) @@ -279,6 +337,25 @@ #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) #endif /* __KERNEL__ */ + +#elif defined(__ASSEMBLY__) + +#define IA64_TPR_MMI_BIT 1<<16 + +#ifdef CONFIG_IA64_ALLOW_NMI +#define irq_disable(q) movl r25=IA64_TPR_MMI_BIT \ + ;; \ + movl cr.tpr=r25 \ + ;; \ + srlz.d +#define irq_enable mov cr.tpr=r0 + ;; \ + srlz.d +#else +#define irq_disable rsm psr.i +#define irq_enable ssm psr.i \ + srlz.d +#endif #endif /* __ASSEMBLY__ */ ===== kernel/printk.c 1.31 vs edited ===== --- 1.31/kernel/printk.c Sun Jan 18 22:35:45 2004 +++ edited/kernel/printk.c Wed Feb 4 10:29:51 2004 @@ -494,6 +494,7 @@ va_start(args, fmt); printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); va_end(args); + early_printk_sn_sal(printk_buf, printed_len); /* * Copy the output into log_buf. If the caller didn't provide