GIT 32cc8f7ab71e672849ebc0e8d8d3da9ae3aea376 git://www.linux-mips.org/pub/scm/upstream.git#for-akpm commit Author: Ralf Baechle Date: Wed Aug 8 18:58:32 2007 +0100 [MIPS] SMTC: Microoptimize atomic_postincrement for non-weak consistency. Signed-off-by: Ralf Baechle commit 465a25f6f7d85bf5b0a418d901fb48092f9a0c92 Author: Yoichi Yuasa Date: Tue Aug 7 00:09:17 2007 +0900 Date: Tue, 7 Aug 2007 00:09:17 +0900 [MIPS] vr41xx: add cpu_wait Add cpu_wait for NEC VR41xx Signed-off-by: Yoichi Yuasa Signed-off-by: Ralf Baechle commit 7b2705f99cda70f1a209cd3b3a332698fc42b72a Author: Kevin D. Kissell Date: Fri Aug 3 19:38:03 2007 +0200 Author: Kevin D. Kissell IRQ Affinity Support for SMTC on Malta Platform Signed-off-by: Kevin D. Kissell Signed-off-by: Ralf Baechle arch/mips/Kconfig | 13 ++++++ arch/mips/kernel/i8259.c | 3 + arch/mips/kernel/smtc.c | 65 +++++++++++++++++++++++++++++- arch/mips/mips-boards/malta/malta_smtc.c | 51 +++++++++++++++++++++++ arch/mips/vr41xx/common/pmu.c | 18 +++++++- include/asm-mips/irq.h | 66 +++++++++++++++++++++++++++++- include/asm-mips/smtc_ipi.h | 1 7 files changed, 211 insertions(+), 6 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3b404b7..429db22 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1385,6 +1385,19 @@ config MIPS_MT_SMTC_IM_BACKSTOP impact on interrupt service overhead. Disable it only if you know what you are doing. +config MIPS_MT_SMTC_IRQAFF + bool "Support IRQ affinity API" + depends on MIPS_MT_SMTC + default n + help + Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) + for SMTC Linux kernel. Requires platform support, of which + an example can be found in the MIPS kernel i8259 and Malta + platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY + be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to + interrupt dispatch, and should be used only if you know what + you are doing. + config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" depends on MIPS_VPE_LOADER diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2345160..a2bd93e 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -38,6 +38,9 @@ static struct irq_chip i8259A_chip = { .mask = disable_8259A_irq, .unmask = enable_8259A_irq, .mask_ack = mask_and_ack_8259A, +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + .set_affinity = plat_set_irq_affinity, +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 16aa5d3..897f33e 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -605,6 +605,60 @@ #endif return setup_irq(irq, new); } +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * Support for IRQ affinity to TCs + */ + +void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + /* + * If a "fast path" cache of quickly decodable affinity state + * is maintained, this is where it gets done, on a call up + * from the platform affinity code. + */ +} + +void smtc_forward_irq(unsigned int irq) +{ + int target; + + /* + * OK wise guy, now figure out how to get the IRQ + * to be serviced on an authorized "CPU". + * + * Ideally, to handle the situation where an IRQ has multiple + * eligible CPUS, we would maintain state per IRQ that would + * allow a fair distribution of service requests. Since the + * expected use model is any-or-only-one, for simplicity + * and efficiency, we just pick the easiest one to find. + */ + + target = first_cpu(irq_desc[irq].affinity); + + /* + * We depend on the platform code to have correctly processed + * IRQ affinity change requests to ensure that the IRQ affinity + * mask has been purged of bits corresponding to nonexistent and + * offline "CPUs", and to TCs bound to VPEs other than the VPE + * connected to the physical interrupt input for the interrupt + * in question. Otherwise we have a nasty problem with interrupt + * mask management. This is best handled in non-performance-critical + * platform IRQ affinity setting code, to minimize interrupt-time + * checks. + */ + + /* If no one is eligible, service locally */ + if (target >= NR_CPUS) { + do_IRQ_no_affinity(irq); + return; + } + + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); +} + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + /* * IPI model for SMTC is tricky, because interrupts aren't TC-specific. * Within a VPE one TC can interrupt another by different approaches. @@ -658,7 +712,7 @@ static __inline__ int atomic_postincreme " addu %1, %0, 1 \n" " sc %1, %2 \n" " beqz %1, 1b \n" - " sync \n" + __WEAK_LLSC_MB : "=&r" (result), "=&r" (temp), "=m" (*pv) : "m" (*pv) : "memory"); @@ -829,6 +883,15 @@ #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ break; } break; +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + case IRQ_AFFINITY_IPI: + /* + * Accept a "forwarded" interrupt that was initially + * taken by a TC who doesn't have affinity for the IRQ. + */ + do_IRQ_no_affinity((int)arg_copy); + break; +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ default: printk("Impossible SMTC IPI Type 0x%x\n", type_copy); break; diff --git a/arch/mips/mips-boards/malta/malta_smtc.c b/arch/mips/mips-boards/malta/malta_smtc.c index 0fb4c26..b18ae60 100644 --- a/arch/mips/mips-boards/malta/malta_smtc.c +++ b/arch/mips/mips-boards/malta/malta_smtc.c @@ -1,6 +1,7 @@ /* * Malta Platform-specific hooks for SMP operation */ +#include #include #include @@ -86,3 +87,53 @@ void prom_smp_finish(void) void prom_cpus_done(void) { } + +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * IRQ affinity hook + */ + + +void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + cpumask_t tmask = affinity; + int cpu = 0; + void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); + + /* + * On the legacy Malta development board, all I/O interrupts + * are routed through the 8259 and combined in a single signal + * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, + * that signal is brought to IP2 of both VPEs. To avoid racing + * concurrent interrupt service events, IP2 is enabled only on + * one VPE, by convention VPE0. So long as no bits are ever + * cleared in the affinity mask, there will never be any + * interrupt forwarding. But as soon as a program or operator + * sets affinity for one of the related IRQs, we need to make + * sure that we don't ever try to forward across the VPE boundry, + * at least not until we engineer a system where the interrupt + * _ack() or _end() function can somehow know that it corresponds + * to an interrupt taken on another VPE, and perform the appropriate + * restoration of Status.IM state using MFTR/MTTR instead of the + * normal local behavior. We also ensure that no attempt will + * be made to forward to an offline "CPU". + */ + + for_each_cpu_mask(cpu, affinity) { + if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) + cpu_clear(cpu, tmask); + } + irq_desc[irq].affinity = tmask; + + if (cpus_empty(tmask)) + /* + * We could restore a default mask here, but the + * runtime code can anyway deal with the null set + */ + printk(KERN_WARNING + "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); + + /* Do any generic SMTC IRQ affinity setup */ + smtc_set_irq_affinity(irq, tmask); +} +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c index 5e46979..e0ffbe9 100644 --- a/arch/mips/vr41xx/common/pmu.c +++ b/arch/mips/vr41xx/common/pmu.c @@ -1,7 +1,7 @@ /* * pmu.c, Power Management Unit routines for NEC VR4100 series. * - * Copyright (C) 2003-2005 Yoichi Yuasa + * Copyright (C) 2003-2007 Yoichi Yuasa * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,11 +22,12 @@ #include #include #include #include -#include +#include #include #include #include +#include #include #include @@ -44,6 +45,18 @@ static void __iomem *pmu_base; #define pmu_read(offset) readw(pmu_base + (offset)) #define pmu_write(offset, value) writew((value), pmu_base + (offset)) +static void vr41xx_cpu_wait(void) +{ + local_irq_disable(); + if (!need_resched()) + /* + * "standby" sets IE bit of the CP0_STATUS to 1. + */ + __asm__("standby;\n"); + else + local_irq_enable(); +} + static inline void software_reset(void) { uint16_t pmucnt2; @@ -113,6 +126,7 @@ static int __init vr41xx_pmu_init(void) return -EBUSY; } + cpu_wait = vr41xx_cpu_wait; _machine_restart = vr41xx_restart; _machine_halt = vr41xx_halt; pm_power_off = vr41xx_power_off; diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 97102eb..d998620 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -24,6 +24,36 @@ #else #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #endif +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +#include + +extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); +extern void smtc_forward_irq(unsigned int irq); + +/* + * IRQ affinity hook invoked at the beginning of interrupt dispatch + * if option is enabled. + * + * Up through Linux 2.6.22 (at least) cpumask operations are very + * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity + * used a "fast path" per-IRQ-descriptor cache of affinity information + * to reduce latency. As there is a project afoot to optimize the + * cpumask implementations, this version is optimistically assuming + * that cpumask.h macro overhead is reasonable during interrupt dispatch. + */ +#define IRQ_AFFINITY_HOOK(irq) \ +do { \ + if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ + smtc_forward_irq(irq); \ + irq_exit(); \ + return; \ + } \ +} while (0) +#else /* Not doing SMTC affinity */ +#define IRQ_AFFINITY_HOOK(irq) \ +do { } while (0) +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP /* * Clear interrupt mask handling "backstop" if irq_hwmask @@ -33,13 +63,26 @@ #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ #define __DO_IRQ_SMTC_HOOK(irq) \ do { \ + IRQ_AFFINITY_HOOK(irq); \ if (irq_hwmask[irq] & 0x0000ff00) \ write_c0_tccontext(read_c0_tccontext() & \ - ~(irq_hwmask[irq] & 0x0000ff00)); \ + ~(irq_hwmask[irq] & 0x0000ff00)); \ +} while (0) + +#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ +do { \ + if (irq_hwmask[irq] & 0x0000ff00) \ + write_c0_tccontext(read_c0_tccontext() & \ + ~(irq_hwmask[irq] & 0x0000ff00)); \ } while (0) #else -#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) -#endif +#define __DO_IRQ_SMTC_HOOK(irq) do { \ + IRQ_AFFINITY_HOOK(irq); \ +} while (0) +#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { \ +} while (0) + +#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ /* * do_IRQ handles all normal device IRQ's (the special @@ -57,6 +100,23 @@ do { \ irq_exit(); \ } while (0) +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * To avoid inefficient and in some cases pathological re-checking of + * IRQ affinity, we have this variant that skips the affinity check. + */ + + +#define do_IRQ_no_affinity(irq) \ +do { \ + irq_enter(); \ + __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ + generic_handle_irq(irq); \ + irq_exit(); \ +} while (0) + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + extern void arch_init_irq(void); extern void spurious_interrupt(void); diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index a52a4a7..e09131a 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h @@ -34,6 +34,7 @@ #endif /* SMTC_IPI_DEBUG */ #define LINUX_SMP_IPI 1 #define SMTC_CLOCK_TICK 2 +#define IRQ_AFFINITY_IPI 3 /* * A queue of IPI messages