===== arch/ia64/sn/include/pci/pcidev.h 1.2 vs edited ===== --- 1.2/arch/ia64/sn/include/pci/pcidev.h 2004-10-20 13:38:34 -07:00 +++ edited/arch/ia64/sn/include/pci/pcidev.h 2005-02-10 14:58:19 -08:00 @@ -9,8 +9,10 @@ #define _ASM_IA64_SN_PCI_PCIDEV_H #include +#include extern struct sn_irq_info **sn_irq; +extern spinlock_t sn_irq_list_lock; #define SN_PCIDEV_INFO(pci_dev) \ ((struct pcidev_info *)(pci_dev)->sysdata) ===== arch/ia64/sn/kernel/io_init.c 1.9 vs edited ===== --- 1.9/arch/ia64/sn/kernel/io_init.c 2005-01-11 16:22:08 -08:00 +++ edited/arch/ia64/sn/kernel/io_init.c 2005-02-10 14:49:56 -08:00 @@ -330,6 +330,7 @@ { int i = 0; struct pci_dev *pci_dev = NULL; + unsigned long flags; extern void sn_init_cpei_timer(void); #ifdef CONFIG_PROC_FS extern void register_sn_procfs(void); @@ -343,10 +344,12 @@ */ ia64_max_iommu_merge_mask = ~PAGE_MASK; sn_fixup_ionodes(); + spin_lock_irqsave(&sn_irq_list_lock, flags); sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL); if (sn_irq <= 0) BUG(); /* Canno afford to run out of memory. */ memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS); + spin_unlock_irqrestore(&sn_irq_list_lock, flags); sn_init_cpei_timer(); ===== arch/ia64/sn/kernel/irq.c 1.31 vs edited ===== --- 1.31/arch/ia64/sn/kernel/irq.c 2005-01-22 15:54:50 -08:00 +++ edited/arch/ia64/sn/kernel/irq.c 2005-02-10 14:59:22 -08:00 @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -26,6 +27,7 @@ extern int sn_force_interrupt_flag; extern int sn_ioif_inited; struct sn_irq_info **sn_irq; +DEFINE_SPINLOCK(sn_irq_list_lock); static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, u64 sn_irq_info, @@ -128,16 +130,19 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) { - struct sn_irq_info *sn_irq_info = sn_irq[irq]; + struct sn_irq_info *sn_irq_info; struct sn_irq_info *tmp_sn_irq_info; int cpuid, cpuphys; nasid_t t_nasid; /* nasid to target */ int t_slice; /* slice to target */ + unsigned long flags; + spin_lock_irqsave(&sn_irq_list_lock, flags); + sn_irq_info = sn_irq[irq]; /* allocate a temp sn_irq_info struct to get new target info */ tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL); if (!tmp_sn_irq_info) - return; + goto out_unlock; cpuid = first_cpu(mask); cpuphys = cpu_physical_id(cpuid); @@ -166,31 +171,32 @@ __pa(tmp_sn_irq_info), irq, t_nasid, t_slice); - if (status == 0) { - /* Update kernels sn_irq_info with new target info */ - unregister_intr_pda(sn_irq_info); - sn_irq_info->irq_cpuid = cpuid; - sn_irq_info->irq_nasid = t_nasid; - sn_irq_info->irq_slice = t_slice; - sn_irq_info->irq_xtalkaddr = - tmp_sn_irq_info->irq_xtalkaddr; - sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; - register_intr_pda(sn_irq_info); + /* SAL call failed */ + if (status) + break; + + /* Update kernels sn_irq_info with new target info */ + unregister_intr_pda(sn_irq_info); + sn_irq_info->irq_cpuid = cpuid; + sn_irq_info->irq_nasid = t_nasid; + sn_irq_info->irq_slice = t_slice; + sn_irq_info->irq_xtalkaddr = tmp_sn_irq_info->irq_xtalkaddr; + sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie; + register_intr_pda(sn_irq_info); - if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { - pcibr_change_devices_irq(sn_irq_info); - } + if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) { + pcibr_change_devices_irq(sn_irq_info); + } - sn_irq_info = sn_irq_info->irq_next; + sn_irq_info = sn_irq_info->irq_next; #ifdef CONFIG_SMP - set_irq_affinity_info((irq & 0xff), cpuphys, 0); + set_irq_affinity_info((irq & 0xff), cpuphys, 0); #endif - } else { - break; /* snp_affinity failed the intr_alloc */ - } } kfree(tmp_sn_irq_info); + out_unlock: + spin_unlock_irqrestore(&sn_irq_list_lock, flags); } struct hw_interrupt_type irq_type_sn = { @@ -235,6 +241,7 @@ } } +/* Caller must hold sn_irq_list_lock */ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) { int irq = sn_irq_info->irq_irq; @@ -324,13 +331,16 @@ nasid_t nasid = sn_irq_info->irq_nasid; int slice = sn_irq_info->irq_slice; int cpu = nasid_slice_to_cpuid(nasid, slice); + unsigned long flags; sn_irq_info->irq_cpuid = cpu; sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); + spin_lock_irqsave(&sn_irq_list_lock, flags); /* link it into the sn_irq[irq] list */ sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq]; sn_irq[sn_irq_info->irq_irq] = sn_irq_info; + spin_unlock_irqrestore(&sn_irq_list_lock, flags); (void)register_intr_pda(sn_irq_info); } @@ -338,9 +348,12 @@ static void force_interrupt(int irq) { struct sn_irq_info *sn_irq_info; + unsigned long flags; if (!sn_ioif_inited) return; + + spin_lock_irqsave(&sn_irq_list_lock, flags); sn_irq_info = sn_irq[irq]; while (sn_irq_info) { if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && @@ -349,6 +362,7 @@ } sn_irq_info = sn_irq_info->irq_next; } + spin_unlock_irqrestore(&sn_irq_list_lock, flags); } /* @@ -413,12 +427,15 @@ void sn_lb_int_war_check(void) { + unsigned long flags; int i; if (!sn_ioif_inited || pda->sn_first_irq == 0) return; for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { - struct sn_irq_info *sn_irq_info = sn_irq[i]; + struct sn_irq_info *sn_irq_info; + spin_lock_irqsave(&sn_irq_list_lock, flags); + sn_irq_info = sn_irq[i]; while (sn_irq_info) { /* Only call for PCI bridges that are fully initialized. */ if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && @@ -427,5 +444,6 @@ } sn_irq_info = sn_irq_info->irq_next; } + spin_unlock_irqrestore(&sn_irq_list_lock, flags); } }