From: Ingo Molnar Consolidation: remove the irq_affinity[NR_IRQS] array and move it into the irq_desc[NR_IRQS].affinity field. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Andrew Morton --- arch/alpha/kernel/irq.c | 2 +- arch/i386/kernel/irq.c | 2 +- arch/ia64/kernel/irq.c | 4 ++-- arch/parisc/kernel/irq.c | 8 ++++---- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/platforms/pseries/xics.c | 4 ++-- arch/powerpc/sysdev/mpic.c | 2 +- arch/ppc/syslib/open_pic.c | 4 ++-- arch/x86_64/kernel/irq.c | 2 +- include/linux/irq.h | 7 ++++--- kernel/irq/handle.c | 5 ++++- kernel/irq/manage.c | 2 -- kernel/irq/proc.c | 4 ++-- 13 files changed, 25 insertions(+), 23 deletions(-) diff -puN arch/alpha/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/alpha/kernel/irq.c --- a/arch/alpha/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/alpha/kernel/irq.c @@ -56,7 +56,7 @@ select_smp_affinity(unsigned int irq) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; - irq_affinity[irq] = cpumask_of_cpu(cpu); + irq_desc[irq].affinity = cpumask_of_cpu(cpu); irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); return 0; } diff -puN arch/i386/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/i386/kernel/irq.c --- a/arch/i386/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/i386/kernel/irq.c @@ -277,7 +277,7 @@ void fixup_irqs(cpumask_t map) if (irq == 2) continue; - cpus_and(mask, irq_affinity[irq], map); + cpus_and(mask, irq_desc[irq].affinity, map); if (any_online_cpu(mask) == NR_CPUS) { printk("Breaking affinity for irq %i\n", irq); mask = map; diff -puN arch/ia64/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/ia64/kernel/irq.c --- a/arch/ia64/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/ia64/kernel/irq.c @@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int cpu_set(cpu_logical_id(hwid), mask); if (irq < NR_IRQS) { - irq_affinity[irq] = mask; + irq_desc[irq].affinity = mask; irq_redir[irq] = (char) (redir & 0xff); } } @@ -131,7 +131,7 @@ static void migrate_irqs(void) if (desc->status == IRQ_PER_CPU) continue; - cpus_and(mask, irq_affinity[irq], cpu_online_map); + cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); if (any_online_cpu(mask) == NR_CPUS) { /* * Save it for phase 2 processing diff -puN arch/parisc/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/parisc/kernel/irq.c --- a/arch/parisc/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/parisc/kernel/irq.c @@ -94,7 +94,7 @@ int cpu_check_affinity(unsigned int irq, if (irq == TIMER_IRQ || irq == IPI_IRQ) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - irq_affinity[irq] = CPU_MASK_ALL; + irq_desc[irq].affinity = CPU_MASK_ALL; return -EINVAL; } @@ -110,7 +110,7 @@ static void cpu_set_affinity_irq(unsigne if (cpu_check_affinity(irq, &dest)) return; - irq_affinity[irq] = dest; + irq_desc[irq].affinity = dest; } #endif @@ -265,7 +265,7 @@ int txn_alloc_irq(unsigned int bits_wide unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - irq_affinity[irq] = cpumask_of_cpu(cpu); + irq_desc[irq].affinity = cpumask_of_cpu(cpu); #endif return cpu_data[cpu].txn_addr; @@ -326,7 +326,7 @@ void do_cpu_irq_mask(struct pt_regs *reg /* Work our way from MSb to LSb...same order we alloc EIRs */ for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { #ifdef CONFIG_SMP - cpumask_t dest = irq_affinity[irq]; + cpumask_t dest = irq_desc[irq].affinity; #endif if (!(bit & eirr_val)) continue; diff -puN arch/powerpc/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/powerpc/kernel/irq.c --- a/arch/powerpc/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/powerpc/kernel/irq.c @@ -164,7 +164,7 @@ void fixup_irqs(cpumask_t map) if (irq_desc[irq].status & IRQ_PER_CPU) continue; - cpus_and(mask, irq_affinity[irq], map); + cpus_and(mask, irq_desc[irq].affinity, map); if (any_online_cpu(mask) == NR_CPUS) { printk("Breaking affinity for irq %i\n", irq); mask = map; diff -puN arch/powerpc/platforms/pseries/xics.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/powerpc/platforms/pseries/xics.c --- a/arch/powerpc/platforms/pseries/xics.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/powerpc/platforms/pseries/xics.c @@ -238,7 +238,7 @@ static int get_irq_server(unsigned int i { unsigned int server; /* For the moment only implement delivery to all cpus or one cpu */ - cpumask_t cpumask = irq_affinity[irq]; + cpumask_t cpumask = irq_desc[irq].affinity; cpumask_t tmp = CPU_MASK_NONE; if (!distribute_irqs) @@ -729,7 +729,7 @@ void xics_migrate_irqs_away(void) /* Reset affinity to all cpus */ desc->chip->set_affinity(virq, CPU_MASK_ALL); - irq_affinity[virq] = CPU_MASK_ALL; + irq_desc[irq].affinity = CPU_MASK_ALL; unlock: spin_unlock_irqrestore(&desc->lock, flags); } diff -puN arch/powerpc/sysdev/mpic.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/powerpc/sysdev/mpic.c --- a/arch/powerpc/sysdev/mpic.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/powerpc/sysdev/mpic.c @@ -886,7 +886,7 @@ void mpic_setup_this_cpu(void) /* let the mpic know we want intrs. default affinity is 0xffffffff * until changed via /proc. That's how it's done on x86. If we want * it differently, then we should make sure we also change the default - * values of irq_affinity in irq.c. + * values of irq_desc[].affinity in irq.c. */ if (distribute_irqs) { for (i = 0; i < mpic->num_sources ; i++) diff -puN arch/ppc/syslib/open_pic.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/ppc/syslib/open_pic.c --- a/arch/ppc/syslib/open_pic.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/ppc/syslib/open_pic.c @@ -615,8 +615,8 @@ void __devinit do_openpic_setup_cpu(void /* let the openpic know we want intrs. default affinity * is 0xffffffff until changed via /proc * That's how it's done on x86. If we want it differently, then - * we should make sure we also change the default values of irq_affinity - * in irq.c. + * we should make sure we also change the default values of + * irq_desc[].affinity in irq.c. */ for (i = 0; i < NumSources; i++) openpic_mapirq(i, msk, CPU_MASK_ALL); diff -puN arch/x86_64/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc arch/x86_64/kernel/irq.c --- a/arch/x86_64/kernel/irq.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/arch/x86_64/kernel/irq.c @@ -114,7 +114,7 @@ void fixup_irqs(cpumask_t map) if (irq == 2) continue; - cpus_and(mask, irq_affinity[irq], map); + cpus_and(mask, irq_desc[irq].affinity, map); if (any_online_cpu(mask) == NR_CPUS) { printk("Breaking affinity for irq %i\n", irq); mask = map; diff -puN include/linux/irq.h~genirq-cleanup-merge-irq_affinity-into-irq_desc include/linux/irq.h --- a/include/linux/irq.h~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/include/linux/irq.h @@ -77,6 +77,9 @@ typedef struct irq_desc { unsigned int irq_count; /* For detecting broken interrupts */ unsigned int irqs_unhandled; spinlock_t lock; +#ifdef CONFIG_SMP + cpumask_t affinity; +#endif #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) unsigned int move_irq; /* Flag need to re-target intr dest*/ #endif @@ -97,12 +100,10 @@ extern int setup_irq(unsigned int irq, s extern void terminate_irqs(void); #ifdef CONFIG_GENERIC_HARDIRQS -extern cpumask_t irq_affinity[NR_IRQS]; - #ifdef CONFIG_SMP static inline void set_native_irq_info(int irq, cpumask_t mask) { - irq_affinity[irq] = mask; + irq_desc[irq].affinity = mask; } #else static inline void set_native_irq_info(int irq, cpumask_t mask) diff -puN kernel/irq/handle.c~genirq-cleanup-merge-irq_affinity-into-irq_desc kernel/irq/handle.c --- a/kernel/irq/handle.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/kernel/irq/handle.c @@ -32,7 +32,10 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline [0 ... NR_IRQS-1] = { .status = IRQ_DISABLED, .chip = &no_irq_type, - .lock = SPIN_LOCK_UNLOCKED + .lock = SPIN_LOCK_UNLOCKED, +#ifdef CONFIG_SMP + .affinity = CPU_MASK_ALL +#endif } }; diff -puN kernel/irq/manage.c~genirq-cleanup-merge-irq_affinity-into-irq_desc kernel/irq/manage.c --- a/kernel/irq/manage.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/kernel/irq/manage.c @@ -16,8 +16,6 @@ #ifdef CONFIG_SMP -cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; - #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; #endif diff -puN kernel/irq/proc.c~genirq-cleanup-merge-irq_affinity-into-irq_desc kernel/irq/proc.c --- a/kernel/irq/proc.c~genirq-cleanup-merge-irq_affinity-into-irq_desc +++ a/kernel/irq/proc.c @@ -36,7 +36,7 @@ void proc_set_irq_affinity(unsigned int void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) { set_balance_irq_affinity(irq, mask_val); - irq_affinity[irq] = mask_val; + irq_desc[irq].affinity = mask_val; irq_desc[irq].chip->set_affinity(irq, mask_val); } #endif @@ -44,7 +44,7 @@ void proc_set_irq_affinity(unsigned int static int irq_affinity_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { - int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); + int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity); if (count - len < 2) return -EINVAL; _