===== arch/ia64/kernel/mca.c 1.70 vs edited ===== --- 1.70/arch/ia64/kernel/mca.c 2004-10-20 11:27:10 -07:00 +++ edited/arch/ia64/kernel/mca.c 2004-11-22 15:19:47 -08:00 @@ -813,8 +813,10 @@ ia64_os_to_sal_handoff_state.imots_sal_check_ra = ia64_sal_to_os_handoff_state.imsto_sal_check_ra; - if (recover) + if (recover) { ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; + ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); + } else ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; @@ -870,21 +872,70 @@ void ia64_mca_ucmc_handler(void) { + struct io_range *range; + unsigned long io_addr = 0; pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &ia64_sal_to_os_handoff_state.proc_state_param; - int recover; + int recover = 0; + ia64_err_rec_t *curr_record; /* Get the MCA error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); - /* TLB error is only exist in this SAL error record */ - recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) - /* other error recovery */ - || (ia64_mca_ucmc_extension - && ia64_mca_ucmc_extension( - IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), - &ia64_sal_to_os_handoff_state, - &ia64_os_to_sal_handoff_state)); + /* TLB errors are fixed up before we get here, so recover */ + if (psp->tc) { + recover = 1; + goto return_to_sal; + } + + /* + * If it's not a bus check with a valid target identifier, + * we don't have a chance. + */ + if (!psp->bc) { + recover = 0; + goto return_to_sal; + } + + /* + * If we can't get this lock, we can't safely look at the list, + * so give up. + */ + if (!spin_trylock(&io_range_list_lock)) { + recover = 0; + goto return_to_sal; + } + + curr_record = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); + io_addr = curr_record->proc_err.info->target_identifier; + + /* + * See if an I/O error occured in a previously registered range + */ + list_for_each_entry(range, &pci_io_ranges, range_list) { + if (range->start <= io_addr && io_addr <= range->end) { + struct siginfo siginfo; + struct task_struct *owner = NULL; + recover = 1; + siginfo.si_signo = SIGBUS; + siginfo.si_code = BUS_ADRERR; + siginfo.si_addr = (void *) io_addr; + owner = find_task_by_pid(range->owner); + if (owner) + force_sig_info(SIGBUS, &siginfo, owner); + else { + /* + * need to free memory too, is that safe + * here? + */ + list_del(&range->range_list); + } + break; + } + } + spin_unlock(&io_range_list_lock); + + return_to_sal: /* * Wakeup all the processors which are spinning in the rendezvous ===== arch/ia64/pci/pci.c 1.59 vs edited ===== --- 1.59/arch/ia64/pci/pci.c 2004-11-05 11:55:25 -08:00 +++ edited/arch/ia64/pci/pci.c 2004-11-22 11:53:04 -08:00 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -36,6 +37,8 @@ #include #include +#include "../sn/include/pci/pcidev.h" +#include "../sn/include/pci/pcibus_provider_defs.h" #undef DEBUG #define DEBUG @@ -48,6 +51,9 @@ static int pci_routeirq; +LIST_HEAD(pci_io_ranges); +spinlock_t io_range_list_lock = SPIN_LOCK_UNLOCKED; + /* * Low-level SAL-based PCI configuration access functions. Note that SAL * calls are already serialized (via sal_lock), so we don't need another @@ -501,24 +507,36 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { - /* - * I/O space cannot be accessed via normal processor loads and - * stores on this platform. - */ - if (mmap_state == pci_mmap_io) - /* - * XXX we could relax this for I/O spaces for which ACPI - * indicates that the space is 1-to-1 mapped. But at the - * moment, we don't support multiple PCI address spaces and - * the legacy I/O space is not 1-to-1 mapped, so this is moot. - */ - return -EINVAL; + struct io_range *new_range; + int ret = 0; + + /* Remap legacy I/O space for this bus if the offset is < 16k */ + if (mmap_state == pci_mmap_io && + (vma->vm_pgoff << PAGE_SHIFT) < (1<<16)) { + if (SN_PCIDEV_BUSSOFT(dev) == NULL) { + ret = -EINVAL; + goto out; + } + + vma->vm_pgoff += SN_PCIDEV_BUSSOFT(dev)->bs_legacy_io >> PAGE_SHIFT; + } + + /* Remap legacy mem space for this bus if the offset is < 1M */ + if (mmap_state == pci_mmap_mem && + (vma->vm_pgoff << PAGE_SHIFT) < (1<<20)) { + if (SN_PCIDEV_BUSSOFT(dev) == NULL) { + ret = -EINVAL; + goto out; + } + + vma->vm_pgoff += SN_PCIDEV_BUSSOFT(dev)->bs_legacy_mem >> PAGE_SHIFT; + } /* * Leave vm_pgoff as-is, the PCI space address is the physical * address on this platform. */ - vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO); + vma->vm_flags |= (VM_SHM | VM_IO | VM_RESERVED); if (write_combine) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); @@ -526,10 +544,35 @@ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; + vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + ret = -EAGAIN; + goto out; + } - return 0; + new_range = kmalloc(sizeof(struct io_range), GFP_KERNEL); + if (!new_range) { + printk(KERN_WARNING "%s: cannot allocate io_range, " + "I/O errors for 0x%016lx-0x%016lx will be fatal", + __FUNCTION__, vma->vm_start, vma->vm_end); + goto out; + } + + /* + * Track this range and its associated process for use by the + * MCA handler. + */ + new_range->start = __pa(vma->vm_pgoff << PAGE_SHIFT); + new_range->end = new_range->start + (vma->vm_end - vma->vm_start); + new_range->owner = current->pid; + + spin_lock(&io_range_list_lock); + list_add(&new_range->range_list, &pci_io_ranges); + spin_unlock(&io_range_list_lock); + + printk("I/O range 0x%016lx-0x%016lx registered\n", + new_range->start, new_range->end); + out: + return ret; } /** ===== drivers/pci/proc.c 1.41 vs edited ===== --- 1.41/drivers/pci/proc.c 2004-10-06 09:44:51 -07:00 +++ edited/drivers/pci/proc.c 2004-11-15 10:09:45 -08:00 @@ -279,8 +279,22 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file) { + struct io_range *range; + kfree(file->private_data); file->private_data = NULL; + + spin_lock(&io_range_list_lock); + list_for_each_entry(range, &pci_io_ranges, range_list) { + if (range->owner == current->pid) { + list_del(&range->range_list); + printk("I/O range 0x%016lx-0x%016lx de-registered\n", + range->start, range->end); + kfree(range); + break; + } + } + spin_unlock(&io_range_list_lock); return 0; } ===== include/asm-ia64/io.h 1.24 vs edited ===== --- 1.24/include/asm-ia64/io.h 2004-10-28 12:10:56 -07:00 +++ edited/include/asm-ia64/io.h 2004-11-15 10:18:24 -08:00 @@ -1,6 +1,8 @@ #ifndef _ASM_IA64_IO_H #define _ASM_IA64_IO_H +#include + /* * This file contains the definitions for the emulated IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same @@ -51,6 +53,17 @@ extern struct io_space io_space[]; extern unsigned int num_io_spaces; +/* + * Simple I/O range object with owner (if there is one) + */ +struct io_range { + unsigned long start, end; + struct list_head range_list; + pid_t owner; +}; + +extern struct list_head pci_io_ranges; + # ifdef __KERNEL__ /* @@ -66,11 +79,14 @@ #define PIO_RESERVED __IA64_UNCACHED_OFFSET #define HAVE_ARCH_PIO_SIZE +#include #include #include #include #include #include + +extern spinlock_t io_range_list_lock; /* * Change virtual addresses to physical addresses and vv.