GIT 1c10bb0c5105edc410ca9ab754d543c20474fc72 git+ssh://master.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git#test commit 1f4c5c1fe2a6a74271989ec079af11e2bb8e2826 Author: Andrew Morton Date: Thu Jun 29 02:29:20 2006 -0700 [IA64] git-ia64 versus genirq Fix the git-ia64 tree after genirq merge. Signed-off-by: Andrew Morton Signed-off-by: Tony Luck commit 2ab561a116e16cdee3ae0e13d51910634c15aee9 Author: David Mosberger-Tang Date: Wed Jun 21 11:19:22 2006 -0700 [IA64] esi-support Add support for making ESI calls [1]. ESI stands for "Extensible SAL specification" and is basically a way for invoking firmware subroutines which are identified by a GUID. I don't know whether ESI is used by vendors other than HP (if you do, please let me know) but as firmware "backdoors" go, this seems one of the cleaner methods, so it seems reasonable to support it, even though I'm not aware of any publicly documented ESI calls. I'd have liked to make the ESI module completely stand-alone, but unfortunately that is not easily (or not at all) possible because in order to make ESI calls in physical mode, a small stub similar to the EFI stub is needed in the kernel proper. I did try to create a stub that would work in user-level, but it quickly got ugly beyond recognition (e.g., the stub had to make assumptions about how the module-loader generated call-stubs work) and I didn't even get it to work (that's probably fixable, but I didn't bother because I concluded it was too ugly anyhow). While it's not terribly elegant to have kernel code which isn't actively used in the kernel proper, I think it might be worth making an exception here for two reasons: the code is trivially small (all that's really needed is esi_stub.S) and by including it in the normal kernel distro, it might encourage other OEMs to also use ESI, which I think would be far better than each inventing their own firmware "backdoor". The code was originally written by Alex. I just massaged and packaged it a bit (and perhaps messed up some things along the way...). Changes since first version of patch that was posted to mailing list: * Export ia64_esi_call and ia64_esi_call_phys() as GPL symbols. * Disallow building esi.c as a module for now. Building as a module would currently lead to an unresolved reference to "sal_lock" on SMP kernels because that symbol doesn't get exported. * Export esi_call_phys() only if ESI is enabled. * Remove internal stuff from esi.h and add a "proc_type" argument to ia64_esi_call() such that serialization-requirements can be expressed (ESI follows SAL here, where procedure calls may have to be serialized, are MP-safe, or MP-safe andr reentrant). [1] h21007.www2.hp.com/dspp/tech/tech_TechDocumentDetailPage_IDX/1,1701,919,00.html Signed-off-by: David Mosberger Signed-off-by: Alex Williamson Signed-off-by: Tony Luck commit beada884dd437b509c26b39f1a0b0c6b31e6f340 Author: Zou Nan hai Date: Wed Jun 14 14:33:24 2006 -0700 [IA64] Miscellaneous updates for kexec/kdump Signed-off-by: Zou Nan hai commit 76d08bb3f09054edc45326ce5c698a3f6c45f5d0 Author: Tony Luck Date: Mon Jun 5 13:54:14 2006 -0700 [IA64] Add "model name" to /proc/cpuinfo Linux ia64 port tried to decode the processor family number to something human-readable, but Intel brandnames don't change synchronously with updates to the family number. Adopt a more i386-like approach and just print the family number in decimal. Add a new field "model name" that uses PAL_BRAND_INFO to find the official name for the cpu, or on older systems, falls back to using the well-known codenames (Merced, McKinley, Madison). Signed-off-by: Tony Luck commit b373e385743597f576b67c423807bbdfe3b862e7 Author: Khalid Aziz Date: Tue May 9 15:53:22 2006 -0700 [IA64] kexec for ia64 Enable kexec for ia64. Signed-off-by: Khalid Aziz Signed-off-by: Nanhai Zou Signed-off-by: Tony Luck arch/ia64/Kconfig | 31 +++ arch/ia64/hp/common/sba_iommu.c | 22 ++ arch/ia64/kernel/Makefile | 6 + arch/ia64/kernel/crash.c | 152 ++++++++++++++++ arch/ia64/kernel/efi.c | 17 +- arch/ia64/kernel/entry.S | 2 arch/ia64/kernel/esi.c | 205 +++++++++++++++++++++ arch/ia64/kernel/esi_stub.S | 96 ++++++++++ arch/ia64/kernel/ia64_ksyms.c | 4 arch/ia64/kernel/machine_kexec.c | 115 ++++++++++++ arch/ia64/kernel/relocate_kernel.S | 353 ++++++++++++++++++++++++++++++++++++ arch/ia64/kernel/setup.c | 79 +++++++- arch/ia64/kernel/smp.c | 29 +++ include/asm-ia64/esi.h | 30 +++ include/asm-ia64/kexec.h | 34 +++ include/asm-ia64/machvec_hpzx1.h | 2 include/asm-ia64/meminit.h | 3 include/asm-ia64/pal.h | 10 + include/asm-ia64/processor.h | 1 include/asm-ia64/smp.h | 3 include/linux/irq.h | 1 kernel/irq/manage.c | 12 + 22 files changed, 1192 insertions(+), 15 deletions(-) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index db274da..6e68e65 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -429,8 +429,39 @@ config IA64_PALINFO config SGI_SN def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) +config IA64_ESI + bool "ESI (Extensible SAL Interface) support" + help + If you say Y here, support is built into the kernel to + make ESI calls. ESI calls are used to support vendor-specific + firmware extensions, such as the ability to inject memory-errors + for test-purposes. If you're unsure, say N. + source "drivers/sn/Kconfig" +config KEXEC + bool "kexec system call (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is indepedent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similiarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. It may help to enable device hotplugging + support. As of this writing the exact hardware interface is + strongly in flux, so no good recommendation can be made. + +config CRASH_DUMP + bool "kernel crash dumps (EXPERIMENTAL)" + depends on EXPERIMENTAL + help + Generate crash dump after being started by kexec. + source "drivers/firmware/Kconfig" source "fs/Kconfig.binfmt" diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index db8e1fc..aa4ef60 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1623,6 +1623,28 @@ #endif READ_REG(ioc->ioc_hpa + IOC_IBASE); } +#ifdef CONFIG_KEXEC +void +ioc_iova_disable(void) +{ + struct ioc *ioc; + + ioc = ioc_list; + + while (ioc != NULL) { + /* Disable IOVA translation */ + WRITE_REG(ioc->ibase & 0xfffffffffffffffe, ioc->ioc_hpa + IOC_IBASE); + READ_REG(ioc->ioc_hpa + IOC_IBASE); + + /* Clear I/O TLB of any possible entries */ + WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); + READ_REG(ioc->ioc_hpa + IOC_PCOM); + + ioc = ioc->next; + } +} +#endif + static void __init ioc_resource_init(struct ioc *ioc) { diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index ad8215a..96686f8 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -28,10 +28,16 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o obj-$(CONFIG_AUDIT) += audit.o mca_recovery-y += mca_drv.o mca_drv_asm.o +obj-$(CONFIG_IA64_ESI) += esi.o +ifneq ($(CONFIG_IA64_ESI),) +obj-y += esi_stub.o # must be in kernel proper +endif + # The gate DSO image is built using a special linker script. targets += gate.so gate-syms.o diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c new file mode 100644 index 0000000..a1a192c --- /dev/null +++ b/arch/ia64/kernel/crash.c @@ -0,0 +1,152 @@ +/* + * arch/ia64/kernel/crash.c + * + * Architecture specific (ia64) functions for kexec based crash dumps. + * + * Created by: Khalid Aziz + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2005 Intel Corp Zou Nan hai + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +size_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + vaddr = page_address(pfn_to_page(pfn)); + + if (userbuf) { + if (copy_to_user(buf, (vaddr + offset), csize)) { + return -EFAULT; + } + } else + memcpy(buf, (vaddr + offset), csize); + return csize; +} + +static void device_shootdown(void) +{ + struct pci_dev *dev; + irq_desc_t *desc; + u16 pci_command; + + list_for_each_entry(dev, &pci_devices, global_list) { + desc = irq_desc + dev->irq; + if (!desc->action) + continue; + pci_read_config_word(dev, PCI_COMMAND, &pci_command); + if (pci_command & PCI_COMMAND_MASTER) { + pci_command &= ~PCI_COMMAND_MASTER; + pci_write_config_word(dev, PCI_COMMAND, pci_command); + } + disable_irq_nosync(dev->irq); + desc->chip->end(dev->irq); + } +} + +static Elf64_Word +*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, + size_t data_len) +{ + struct elf_note *note = (struct elf_note *)buf; + note->n_namesz = strlen(name) + 1; + note->n_descsz = data_len; + note->n_type = type; + buf += (sizeof(*note) + 3)/4; + memcpy(buf, name, note->n_namesz); + buf += (note->n_namesz + 3)/4; + memcpy(buf, data, data_len); + buf += (data_len + 3)/4; + return buf; +} + +static void +final_note(void *buf) +{ + memset(buf, 0, sizeof(struct elf_note)); +} + +static void +crash_save_this_cpu(void) +{ + void *buf; + struct elf_prstatus prstatus; + int cpu = smp_processor_id(); + elf_greg_t *dst = (elf_greg_t *)&prstatus.pr_reg; + + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + + dst[1] = ia64_getreg(_IA64_REG_GP); + dst[12] = ia64_getreg(_IA64_REG_SP); + dst[13] = ia64_getreg(_IA64_REG_TP); + + dst[42] = ia64_getreg(_IA64_REG_IP); + dst[45] = ia64_getreg(_IA64_REG_AR_RSC); + + ia64_setreg(_IA64_REG_AR_RSC, 0); + ia64_srlz_i(); + + dst[46] = ia64_getreg(_IA64_REG_AR_BSP); + dst[47] = ia64_getreg(_IA64_REG_AR_BSPSTORE); + + dst[48] = ia64_getreg(_IA64_REG_AR_RNAT); + dst[49] = ia64_getreg(_IA64_REG_AR_CCV); + dst[50] = ia64_getreg(_IA64_REG_AR_UNAT); + + dst[51] = ia64_getreg(_IA64_REG_AR_FPSR); + dst[52] = ia64_getreg(_IA64_REG_AR_PFS); + dst[53] = ia64_getreg(_IA64_REG_AR_LC); + + dst[54] = ia64_getreg(_IA64_REG_AR_LC); + dst[55] = ia64_getreg(_IA64_REG_AR_CSD); + dst[56] = ia64_getreg(_IA64_REG_AR_SSD); + + buf = (u64 *) per_cpu_ptr(crash_notes, cpu); + if (!buf) + return; + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, + sizeof(prstatus)); + final_note(buf); +} + +void +machine_crash_shutdown(struct pt_regs *pt) +{ + /* This function is only called after the system + * has paniced or is otherwise in a critical state. + * The minimum amount of code to allow a kexec'd kernel + * to run successfully needs to happen here. + * + * In practice this means shooting down the other cpus in + * an SMP system. + */ + if (in_interrupt()) { + ia64_eoi(); + } + crash_save_this_cpu(); + device_shootdown(); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif +#ifdef CONFIG_IA64_HP_ZX1 + ioc_iova_disable(); +#endif +} diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index bb8770a..52729a9 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -26,6 +26,7 @@ #include #include #include #include +#include #include #include @@ -41,7 +42,7 @@ extern efi_status_t efi_call_phys (void struct efi efi; EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; -static unsigned long mem_limit = ~0UL, max_addr = ~0UL; +static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; #define efi_call_virt(f, args...) (*(f))(args) @@ -421,6 +422,8 @@ efi_init (void) mem_limit = memparse(cp + 4, &cp); } else if (memcmp(cp, "max_addr=", 9) == 0) { max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); + } else if (memcmp(cp, "min_addr=", 9) == 0) { + min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); } else { while (*cp != ' ' && *cp) ++cp; @@ -428,6 +431,8 @@ efi_init (void) ++cp; } } + if (min_addr != 0UL) + printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20); if (max_addr != ~0UL) printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); @@ -894,7 +899,8 @@ find_memmap_space (void) as = max(contig_low, md->phys_addr); ae = min(contig_high, efi_md_end(md)); - /* keep within max_addr= command line arg */ + /* keep within max_addr= and min_addr= command line arg */ + as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; @@ -1004,7 +1010,8 @@ efi_memmap_init(unsigned long *s, unsign } else ae = efi_md_end(md); - /* keep within max_addr= command line arg */ + /* keep within max_addr= and min_addr= command line arg */ + as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; @@ -1116,6 +1123,10 @@ efi_initialize_iomem_resources(struct re */ insert_resource(res, code_resource); insert_resource(res, data_resource); +#ifdef CONFIG_KEXEC + if (crashk_res.end > crashk_res.start) + insert_resource(res, &crashk_res); +#endif } } } diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index fef0657..b6fde46 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1575,7 +1575,7 @@ sys_call_table: data8 sys_mq_timedreceive // 1265 data8 sys_mq_notify data8 sys_mq_getsetattr - data8 sys_ni_syscall // reserved for kexec_load + data8 sys_kexec_load data8 sys_ni_syscall // reserved for vserver data8 sys_waitid // 1270 data8 sys_add_key diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c new file mode 100644 index 0000000..ebf4e98 --- /dev/null +++ b/arch/ia64/kernel/esi.c @@ -0,0 +1,205 @@ +/* + * Extensible SAL Interface (ESI) support routines. + * + * Copyright (C) 2006 Hewlett-Packard Co + * Alex Williamson + */ +#include +#include +#include +#include + +#include +#include + +MODULE_AUTHOR("Alex Williamson "); +MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support"); +MODULE_LICENSE("GPL"); + +#define MODULE_NAME "esi" + +#define ESI_TABLE_GUID \ + EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \ + 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4) + +enum esi_systab_entry_type { + ESI_DESC_ENTRY_POINT = 0 +}; + +/* + * Entry type: Size: + * 0 48 + */ +#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)] + +typedef struct ia64_esi_desc_entry_point { + u8 type; + u8 reserved1[15]; + u64 esi_proc; + u64 gp; + efi_guid_t guid; +} ia64_esi_desc_entry_point_t; + +struct pdesc { + void *addr; + void *gp; +}; + +static struct ia64_sal_systab *esi_systab; + +static int __init esi_init (void) +{ + efi_config_table_t *config_tables; + struct ia64_sal_systab *systab; + unsigned long esi = 0; + char *p; + int i; + + config_tables = __va(efi.systab->tables); + + for (i = 0; i < (int) efi.systab->nr_tables; ++i) { + if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) { + esi = config_tables[i].table; + break; + } + } + + if (!esi) + return -ENODEV;; + + systab = __va(esi); + + if (strncmp(systab->signature, "ESIT", 4) != 0) { + printk(KERN_ERR "bad signature in ESI system table!"); + return -ENODEV; + } + + p = (char *) (systab + 1); + for (i = 0; i < systab->entry_count; i++) { + /* + * The first byte of each entry type contains the type + * descriptor. + */ + switch (*p) { + case ESI_DESC_ENTRY_POINT: + break; + default: + printk(KERN_WARNING "Unkown table type %d found in " + "ESI table, ignoring rest of table\n", *p); + return -ENODEV; + } + + p += ESI_DESC_SIZE(*p); + } + + esi_systab = systab; + return 0; +} + + +int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp, + enum esi_proc_type proc_type, u64 func, + u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, + u64 arg7) +{ + struct ia64_fpreg fr[6]; + unsigned long flags = 0; + int i; + char *p; + + if (!esi_systab) + return -1; + + p = (char *) (esi_systab + 1); + for (i = 0; i < esi_systab->entry_count; i++) { + if (*p == ESI_DESC_ENTRY_POINT) { + ia64_esi_desc_entry_point_t *esi = (void *)p; + if (!efi_guidcmp(guid, esi->guid)) { + ia64_sal_handler esi_proc; + struct pdesc pdesc; + + pdesc.addr = __va(esi->esi_proc); + pdesc.gp = __va(esi->gp); + + esi_proc = (ia64_sal_handler) &pdesc; + + ia64_save_scratch_fpregs(fr); + if (proc_type == ESI_PROC_SERIALIZED) + spin_lock_irqsave(&sal_lock, flags); + else if (proc_type == ESI_PROC_MP_SAFE) + local_irq_save(flags); + else + preempt_disable(); + *isrvp = (*esi_proc)(func, arg1, arg2, arg3, + arg4, arg5, arg6, arg7); + if (proc_type == ESI_PROC_SERIALIZED) + spin_unlock_irqrestore(&sal_lock, + flags); + else if (proc_type == ESI_PROC_MP_SAFE) + local_irq_restore(flags); + else + preempt_enable(); + ia64_load_scratch_fpregs(fr); + return 0; + } + } + p += ESI_DESC_SIZE(*p); + } + return -1; +} +EXPORT_SYMBOL_GPL(ia64_esi_call); + +int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp, + u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4, + u64 arg5, u64 arg6, u64 arg7) +{ + struct ia64_fpreg fr[6]; + unsigned long flags; + u64 esi_params[8]; + char *p; + int i; + + if (!esi_systab) + return -1; + + p = (char *) (esi_systab + 1); + for (i = 0; i < esi_systab->entry_count; i++) { + if (*p == ESI_DESC_ENTRY_POINT) { + ia64_esi_desc_entry_point_t *esi = (void *)p; + if (!efi_guidcmp(guid, esi->guid)) { + ia64_sal_handler esi_proc; + struct pdesc pdesc; + + pdesc.addr = (void *)esi->esi_proc; + pdesc.gp = (void *)esi->gp; + + esi_proc = (ia64_sal_handler) &pdesc; + + esi_params[0] = func; + esi_params[1] = arg1; + esi_params[2] = arg2; + esi_params[3] = arg3; + esi_params[4] = arg4; + esi_params[5] = arg5; + esi_params[6] = arg6; + esi_params[7] = arg7; + ia64_save_scratch_fpregs(fr); + spin_lock_irqsave(&sal_lock, flags); + *isrvp = esi_call_phys(esi_proc, esi_params); + spin_unlock_irqrestore(&sal_lock, flags); + ia64_load_scratch_fpregs(fr); + return 0; + } + } + p += ESI_DESC_SIZE(*p); + } + return -1; +} +EXPORT_SYMBOL_GPL(ia64_esi_call_phys); + +static void __exit esi_exit (void) +{ +} + +module_init(esi_init); +module_exit(esi_exit); /* makes module removable... */ diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S new file mode 100644 index 0000000..6b3d6c1 --- /dev/null +++ b/arch/ia64/kernel/esi_stub.S @@ -0,0 +1,96 @@ +/* + * ESI call stub. + * + * Copyright (C) 2005 Hewlett-Packard Co + * Alex Williamson + * + * Based on EFI call stub by David Mosberger. The stub is virtually + * identical to the one for EFI phys-mode calls, except that ESI + * calls may have up to 8 arguments, so they get passed to this routine + * through memory. + * + * This stub allows us to make ESI calls in physical mode with interrupts + * turned off. ESI calls may not support calling from virtual mode. + * + * Google for "Extensible SAL specification" for a document describing the + * ESI standard. + */ + +/* + * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System + * Abstraction Layer Specification", revision 2.6e). Note that + * psr.dfl and psr.dfh MUST be cleared, despite what this manual says. + * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call + * (the br.ia instruction fails unless psr.dfl and psr.dfh are + * cleared). Fortunately, SAL promises not to touch the floating + * point regs, so at least we don't have to save f2-f127. + */ +#define PSR_BITS_TO_CLEAR \ + (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ + IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ + IA64_PSR_DFL | IA64_PSR_DFH) + +#define PSR_BITS_TO_SET \ + (IA64_PSR_BN) + +#include +#include + +/* + * Inputs: + * in0 = address of function descriptor of ESI routine to call + * in1 = address of array of ESI parameters + * + * Outputs: + * r8 = result returned by called function + */ +GLOBAL_ENTRY(esi_call_phys) + .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) + alloc loc1=ar.pfs,2,7,8,0 + ld8 r2=[in0],8 // load ESI function's entry point + mov loc0=rp + .body + ;; + ld8 out0=[in1],8 // ESI params loaded from array + ;; // passing all as inputs doesn't work + ld8 out1=[in1],8 + ;; + ld8 out2=[in1],8 + ;; + ld8 out3=[in1],8 + ;; + ld8 out4=[in1],8 + ;; + ld8 out5=[in1],8 + ;; + ld8 out6=[in1],8 + ;; + ld8 out7=[in1] + mov loc2=gp // save global pointer + mov loc4=ar.rsc // save RSE configuration + mov ar.rsc=0 // put RSE in enforced lazy, LE mode + ;; + ld8 gp=[in0] // load ESI function's global pointer + movl r16=PSR_BITS_TO_CLEAR + mov loc3=psr // save processor status word + movl r17=PSR_BITS_TO_SET + ;; + or loc3=loc3,r17 + mov b6=r2 + ;; + andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared + br.call.sptk.many rp=ia64_switch_mode_phys +.ret0: mov loc5=r19 // old ar.bsp + mov loc6=r20 // old sp + br.call.sptk.many rp=b6 // call the ESI function +.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode + mov r16=loc3 // save virtual mode psr + mov r19=loc5 // save virtual mode bspstore + mov r20=loc6 // save virtual mode sp + br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode +.ret2: mov ar.rsc=loc4 // restore RSE configuration + mov ar.pfs=loc1 + mov rp=loc0 + mov gp=loc2 + br.ret.sptk.many rp +END(esi_call_phys) diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 3ead20f..879c181 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -105,5 +105,9 @@ # endif # endif #endif +#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) +extern void esi_call_phys (void); +EXPORT_SYMBOL_GPL(esi_call_phys); +#endif extern char ia64_ivt[]; EXPORT_SYMBOL(ia64_ivt); diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c new file mode 100644 index 0000000..573a6b8 --- /dev/null +++ b/arch/ia64/kernel/machine_kexec.c @@ -0,0 +1,115 @@ +/* + * arch/ia64/kernel/machine_kexec.c + * + * Handle transition of Linux booting another kernel + * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P. + * Copyright (C) 2005 Khalid Aziz + * Copyright (C) 2006 Intel Corp, Zou Nan hai + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, + struct ia64_boot_param *, unsigned long); + +/* + * Do what every setup is needed on image and the + * reboot code buffer to allow us to avoid allocations + * later. + */ +int machine_kexec_prepare(struct kimage *image) +{ + void *control_code_buffer; + const unsigned long *func; + + func = (unsigned long *)&relocate_new_kernel; + /* Pre-load control code buffer to minimize work in kexec path */ + control_code_buffer = page_address(image->control_code_page); + memcpy((void *)control_code_buffer, (const void *)func[0], + relocate_new_kernel_size); + flush_icache_range((unsigned long)control_code_buffer, + (unsigned long)control_code_buffer + relocate_new_kernel_size); + + return 0; +} + +void machine_kexec_cleanup(struct kimage *image) +{ +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_PCI + struct pci_dev *dev = NULL; + irq_desc_t *idesc; + cpumask_t mask = CPU_MASK_NONE; + /* Disable all PCI devices */ + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + if (!(dev->is_enabled)) + continue; + idesc = irq_desc + dev->irq; + if (!idesc) + continue; + cpu_set(0, mask); + disable_irq_nosync(dev->irq); + idesc->chip->end(dev->irq); + idesc->chip->set_affinity(dev->irq, mask); + idesc->action = NULL; + pci_disable_device(dev); + } +#endif + +#ifdef CONFIG_HOTPLUG_CPU + { + int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) + cpu_down(cpu); + } + } +#elif defined(CONFIG_SMP) + smp_call_function(kexec_stop_this_cpu, (void *)image->start, 0, 0); +#endif + + +#ifdef CONFIG_IA64_HP_ZX1 + ioc_iova_disable(); +#endif +} + +/* + * Do not allocate memory (or fail in any way) in machine_kexec(). + * We are past the point of no return, committed to rebooting now. + */ +extern void *efi_get_pal_addr(void); +void machine_kexec(struct kimage *image) +{ + relocate_new_kernel_t rnk; + void *pal_addr = efi_get_pal_addr(); + unsigned long code_addr = (unsigned long)page_address(image->control_code_page); + /* Interrupts aren't acceptable while we reboot */ + ia64_set_itv(1<<16); + local_irq_disable(); + rnk = (relocate_new_kernel_t)&code_addr; + (*rnk)(image->head, image->start, ia64_boot_param, + GRANULEROUNDDOWN((unsigned long) pal_addr)); + BUG(); + for (;;); +} diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S new file mode 100644 index 0000000..09bd041 --- /dev/null +++ b/arch/ia64/kernel/relocate_kernel.S @@ -0,0 +1,353 @@ +/* + * arch/ia64/kernel/relocate_kernel.S + * + * Relocate kexec'able kernel and start it + * + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2005 Khalid Aziz + * Copyright (C) 2005 Intel Corp, Zou Nan hai + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include +#include +#include + + /* Must be relocatable PIC code callable as a C function + */ +GLOBAL_ENTRY(relocate_new_kernel) + .prologue + alloc r31=ar.pfs,4,0,0,0 + .body +.reloc_entry: +{ + rsm psr.i| psr.ic + mov r2=ip +} + ;; +{ + flushrs // must be first insn in group + srlz.i +} + ;; + dep r2=0,r2,61,3 //to physical address + ;; + //first switch to physical mode + add r3=1f-.reloc_entry, r2 + movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC + mov ar.rsc=0 // put RSE in enforced lazy mode + ;; + add sp=(memory_stack_end - 16 - .reloc_entry),r2 + add r8=(register_stack - .reloc_entry),r2 + ;; + mov r18=ar.rnat + mov ar.bspstore=r8 + ;; + mov cr.ipsr=r16 + mov cr.iip=r3 + mov cr.ifs=r0 + srlz.i + ;; + mov ar.rnat=r18 + rfi + ;; +1: + //physical mode code begin + mov b6=in1 + dep r28=0,in2,61,3 //to physical address + + // purge all TC entries +#define O(member) IA64_CPUINFO_##member##_OFFSET + GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 + ;; + addl r17=O(PTCE_STRIDE),r2 + addl r2=O(PTCE_BASE),r2 + ;; + ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base + ld4 r19=[r2],4 // r19=ptce_count[0] + ld4 r21=[r17],4 // r21=ptce_stride[0] + ;; + ld4 r20=[r2] // r20=ptce_count[1] + ld4 r22=[r17] // r22=ptce_stride[1] + mov r24=r0 + ;; + adds r20=-1,r20 + ;; +#undef O +2: + cmp.ltu p6,p7=r24,r19 +(p7) br.cond.dpnt.few 4f + mov ar.lc=r20 +3: + ptc.e r18 + ;; + add r18=r22,r18 + br.cloop.sptk.few 3b + ;; + add r18=r21,r18 + add r24=1,r24 + ;; + br.sptk.few 2b +4: + srlz.i + ;; + //purge TR entry for kernel text and data + movl r16=KERNEL_START + mov r18=KERNEL_TR_PAGE_SHIFT<<2 + ;; + ptr.i r16, r18 + ptr.d r16, r18 + ;; + srlz.i + ;; + + // purge TR entry for percpu data + movl r16=PERCPU_ADDR + mov r18=PERCPU_PAGE_SHIFT<<2 + ;; + ptr.d r16,r18 + ;; + srlz.d + ;; + + // purge TR entry for pal code + mov r16=in3 + mov r18=IA64_GRANULE_SHIFT<<2 + ;; + ptr.i r16,r18 + ;; + srlz.i + ;; + + // purge TR entry for stack + mov r16=IA64_KR(CURRENT_STACK) + ;; + shl r16=r16,IA64_GRANULE_SHIFT + movl r19=PAGE_OFFSET + ;; + add r16=r19,r16 + mov r18=IA64_GRANULE_SHIFT<<2 + ;; + ptr.d r16,r18 + ;; + srlz.i + ;; + + //copy segments + movl r16=PAGE_MASK + mov r30=in0 // in0 is page_list + br.sptk.few .dest_page + ;; +.loop: + ld8 r30=[in0], 8;; +.dest_page: + tbit.z p0, p6=r30, 0;; // 0x1 dest page +(p6) and r17=r30, r16 +(p6) br.cond.sptk.few .loop;; + + tbit.z p0, p6=r30, 1;; // 0x2 indirect page +(p6) and in0=r30, r16 +(p6) br.cond.sptk.few .loop;; + + tbit.z p0, p6=r30, 2;; // 0x4 end flag +(p6) br.cond.sptk.few .end_loop;; + + tbit.z p6, p0=r30, 3;; // 0x8 source page +(p6) br.cond.sptk.few .loop + + and r18=r30, r16 + + // simple copy page, may optimize later + movl r14=PAGE_SIZE/8 - 1;; + mov ar.lc=r14;; +1: + ld8 r14=[r18], 8;; + st8 [r17]=r14, 8;; + fc.i r17 + br.ctop.sptk.few 1b + br.sptk.few .loop + ;; + +.end_loop: + sync.i // for fc.i + ;; + srlz.i + ;; + srlz.d + ;; + br.call.sptk.many b0=b6;; + +.align 32 +memory_stack: + .fill 8192, 1, 0 +memory_stack_end: +register_stack: + .fill 8192, 1, 0 +register_stack_end: +relocate_new_kernel_end: +END(relocate_new_kernel) + +GLOBAL_ENTRY(kexec_fake_sal_rendez) + .prologue + alloc r31=ar.pfs,3,0,0,0 + .body +.rendez_entry: + rsm psr.i | psr.ic + mov r25=ip + ;; + { + flushrs + srlz.i + } + ;; + /* See where I am running, and compute gp */ + { + mov ar.rsc = 0 /* Put RSE in enforce lacy, LE mode */ + mov gp = ip /* gp == relocate_new_kernel */ + } + + movl r8=0x00000100000000 + ;; + mov cr.iva=r8 + /* Transition from virtual to physical mode */ + srlz.i + ;; + add r17=5f-.rendez_entry, r25 + movl r16=(IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_IC | IA64_PSR_MFL) + ;; + tpa r17=r17 + mov cr.ipsr=r16 + ;; + mov cr.iip=r17 + mov cr.ifs=r0 + ;; + rfi + ;; +5: + mov b6=in0 /* _start addr */ + mov r8=in1 /* ap_wakeup_vector */ + mov r26=in2 /* PAL addr */ + ;; + /* Purge kernel TRs */ + movl r16=KERNEL_START + mov r18=KERNEL_TR_PAGE_SHIFT<<2 + ;; + ptr.i r16,r18 + ptr.d r16,r18 + ;; + srlz.i + ;; + srlz.d + ;; + /* Purge percpu TR */ + movl r16=PERCPU_ADDR + mov r18=PERCPU_PAGE_SHIFT<<2 + ;; + ptr.d r16,r18 + ;; + srlz.d + ;; + /* Purge PAL TR */ + mov r18=IA64_GRANULE_SHIFT<<2 + ;; + ptr.i r26,r18 + ;; + srlz.i + ;; + /* Purge stack TR */ + mov r16=IA64_KR(CURRENT_STACK) + ;; + shl r16=r16,IA64_GRANULE_SHIFT + movl r19=PAGE_OFFSET + ;; + add r16=r19,r16 + mov r18=IA64_GRANULE_SHIFT<<2 + ;; + ptr.d r16,r18 + ;; + srlz.i + ;; + + /* Ensure we can read and clear external interrupts */ + mov cr.tpr=r0 + srlz.d + + shr.u r9=r8,6 /* which irr */ + ;; + and r8=63,r8 /* bit offset into irr */ + ;; + mov r10=1;; + ;; + shl r10=r10,r8 /* bit mask off irr we want */ + cmp.eq p6,p0=0,r9 + ;; +(p6) br.cond.sptk.few check_irr0 + cmp.eq p7,p0=1,r9 + ;; +(p7) br.cond.sptk.few check_irr1 + cmp.eq p8,p0=2,r9 + ;; +(p8) br.cond.sptk.few check_irr2 + cmp.eq p9,p0=3,r9 + ;; +(p9) br.cond.sptk.few check_irr3 + +check_irr0: + mov r8=cr.irr0 + ;; + and r8=r8,r10 + ;; + cmp.eq p6,p0=0,r8 +(p6) br.cond.sptk.few check_irr0 + br.few call_start + +check_irr1: + mov r8=cr.irr1 + ;; + and r8=r8,r10 + ;; + cmp.eq p6,p0=0,r8 +(p6) br.cond.sptk.few check_irr1 + br.few call_start + +check_irr2: + mov r8=cr.irr2 + ;; + and r8=r8,r10 + ;; + cmp.eq p6,p0=0,r8 +(p6) br.cond.sptk.few check_irr2 + br.few call_start + +check_irr3: + mov r8=cr.irr3 + ;; + and r8=r8,r10 + ;; + cmp.eq p6,p0=0,r8 +(p6) br.cond.sptk.few check_irr3 + br.few call_start + +call_start: + mov cr.eoi=r0 + ;; + srlz.d + ;; + mov r8=cr.ivr + ;; + srlz.d + ;; + cmp.eq p0,p6=15,r8 +(p6) br.cond.sptk.few call_start + br.sptk.few b6 +kexec_fake_sal_rendez_end: +END(kexec_fake_sal_rendez) + + .global relocate_new_kernel_size +relocate_new_kernel_size: + data8 kexec_fake_sal_rendez_end - relocate_new_kernel + diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 7ad0d9c..74821be 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -43,6 +43,8 @@ #include #include #include #include +#include +#include #include #include @@ -250,6 +252,32 @@ #ifdef CONFIG_BLK_DEV_INITRD } #endif +#ifdef CONFIG_KEXEC + /* crashkernel=size@addr specifies the location to reserve for + * a crash kernel. By reserving this memory we guarantee + * that linux never set's it up as a DMA target. + * Useful for holding code to do something appropriate + * after a kernel panic. + */ + { + char *from = strstr(saved_command_line, "crashkernel="); + if (from) { + unsigned long size, base; + size = memparse(from + 12, &from); + if (*from == '@') { + base = memparse(from + 1, &from); + rsvd_region[n].start = + (unsigned long)__va(base); + rsvd_region[n].end = + (unsigned long)__va(base + size); + crashk_res.start = base; + crashk_res.end = base + size - 1; + n++; + } + } + } +#endif + efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; @@ -484,6 +512,16 @@ #endif if (!nomca) ia64_mca_init(); +#ifdef CONFIG_CRASH_DUMP + { + char *from = strstr(saved_command_line, "elfcorehdr="); + + if (from) + elfcorehdr_addr = memparse(from+11, &from); + saved_max_pfn = (unsigned long) -1; + } +#endif + platform_setup(cmdline_p); paging_init(); } @@ -509,7 +547,7 @@ #endif { 1UL << 1, "spontaneous deferral"}, { 1UL << 2, "16-byte atomic ops" } }; - char family[32], features[128], *cp, sep; + char features[128], *cp, sep; struct cpuinfo_ia64 *c = v; unsigned long mask; unsigned long proc_freq; @@ -517,12 +555,6 @@ #endif mask = c->features; - switch (c->family) { - case 0x07: memcpy(family, "Itanium", 8); break; - case 0x1f: memcpy(family, "Itanium 2", 10); break; - default: sprintf(family, "%u", c->family); break; - } - /* build the feature string: */ memcpy(features, " standard", 10); cp = features; @@ -553,8 +585,9 @@ #endif "processor : %d\n" "vendor : %s\n" "arch : IA-64\n" - "family : %s\n" + "family : %u\n" "model : %u\n" + "model name : %s\n" "revision : %u\n" "archrev : %u\n" "features :%s\n" /* don't change this---it _is_ right! */ @@ -563,7 +596,8 @@ #endif "cpu MHz : %lu.%06lu\n" "itc MHz : %lu.%06lu\n" "BogoMIPS : %lu.%02lu\n", - cpunum, c->vendor, family, c->model, c->revision, c->archrev, + cpunum, c->vendor, c->family, c->model, + c->model_name, c->revision, c->archrev, features, c->ppn, c->number, proc_freq / 1000, proc_freq % 1000, c->itc_freq / 1000000, c->itc_freq % 1000000, @@ -611,6 +645,31 @@ struct seq_operations cpuinfo_op = { .show = show_cpuinfo }; +static char brandname[128]; + +static char * __cpuinit +get_model_name(__u8 family, __u8 model) +{ + char brand[128]; + + if (ia64_pal_get_brand_info(brand)) { + if (family == 0x7) + memcpy(brand, "Merced", 7); + else if (family == 0x1f) switch (model) { + case 0: memcpy(brand, "McKinley", 9); break; + case 1: memcpy(brand, "Madison", 8); break; + case 2: memcpy(brand, "Madison up to 9M cache", 23); break; + } else + memcpy(brand, "Unknown", 8); + } + if (brandname[0] == '\0') + return strcpy(brandname, brand); + else if (strcmp(brandname, brand) == 0) + return brandname; + else + return kstrdup(brand, GFP_KERNEL); +} + static void __cpuinit identify_cpu (struct cpuinfo_ia64 *c) { @@ -640,7 +699,6 @@ identify_cpu (struct cpuinfo_ia64 *c) pal_status_t status; unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; - for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); @@ -663,6 +721,7 @@ #endif c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; + c->model_name = get_model_name(c->family, c->model); status = ia64_pal_vm_summary(&vm1, &vm2); if (status == PAL_STATUS_SUCCESS) { diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 657ac99..6337278 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -30,6 +30,7 @@ #include #include #include #include +#include #include #include @@ -84,6 +85,34 @@ unlock_ipi_calllock(void) spin_unlock_irq(&call_lock); } +#ifdef CONFIG_KEXEC +/* + * Stop the CPU and put it in fake SAL rendezvous. This allows CPU to wake + * up with IPI from boot processor + */ +void +kexec_stop_this_cpu (void *func) +{ + unsigned long pta, impl_va_bits, pal_base; + + /* + * Remove this CPU by putting it into fake SAL rendezvous + */ + cpu_clear(smp_processor_id(), cpu_online_map); + max_xtp(); + ia64_eoi(); + + /* Disable VHPT */ + impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); + pta = POW2(61) - POW2(vmlpt_bits); + ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | 0); + + local_irq_disable(); + pal_base = __get_cpu_var(ia64_mca_pal_base); + kexec_fake_sal_rendez(func, ap_wakeup_vector, pal_base); +} +#endif + static void stop_this_cpu (void) { diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h new file mode 100644 index 0000000..84aac0e --- /dev/null +++ b/include/asm-ia64/esi.h @@ -0,0 +1,30 @@ +/* + * ESI service calls. + * + * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P. + * Alex Williamson + */ +#ifndef esi_h +#define esi_h + +#include + +#define ESI_QUERY 0x00000001 +#define ESI_OPEN_HANDLE 0x02000000 +#define ESI_CLOSE_HANDLE 0x02000001 + +enum esi_proc_type { + ESI_PROC_SERIALIZED, /* calls need to be serialized */ + ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */ + ESI_PROC_REENTRANT /* MP-safe and reentrant */ +}; + +extern int ia64_esi_init (void); +extern struct ia64_sal_retval esi_call_phys (void *, u64 *); +extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, + enum esi_proc_type, + u64, u64, u64, u64, u64, u64, u64, u64); +extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64, + u64, u64, u64, u64, u64, u64); + +#endif /* esi_h */ diff --git a/include/asm-ia64/kexec.h b/include/asm-ia64/kexec.h new file mode 100644 index 0000000..d45c03f --- /dev/null +++ b/include/asm-ia64/kexec.h @@ -0,0 +1,34 @@ +#ifndef _ASM_IA64_KEXEC_H +#define _ASM_IA64_KEXEC_H + + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE + +#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096) + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_IA_64 + +#define MAX_NOTE_BYTES 1024 + +#define pte_bits 3 +#define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) +#define POW2(n) (1ULL << (n)) + +DECLARE_PER_CPU(u64, ia64_mca_pal_base); +const extern unsigned int relocate_new_kernel_size; +volatile extern long kexec_rendez; +extern void relocate_new_kernel(unsigned long, unsigned long, + struct ia64_boot_param *, unsigned long); +extern void kexec_fake_sal_rendez(void *start, unsigned long wake_up, + unsigned long pal_base); +static inline void +crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) +{ +} +#endif /* _ASM_IA64_KEXEC_H */ diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h index e90daf9..2ae54c3 100644 --- a/include/asm-ia64/machvec_hpzx1.h +++ b/include/asm-ia64/machvec_hpzx1.h @@ -34,4 +34,6 @@ #define platform_dma_sync_sg_for_device #define platform_dma_supported sba_dma_supported #define platform_dma_mapping_error sba_dma_mapping_error +extern void ioc_iova_disable(void); + #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 6a33a07..22ad58c 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -15,11 +15,12 @@ #define meminit_h * - initrd (optional) * - command line string * - kernel code & data + * - crash dumping code reserved region * - Kernel memory map built from EFI memory map * * More could be added if necessary */ -#define IA64_MAX_RSVD_REGIONS 6 +#define IA64_MAX_RSVD_REGIONS 7 struct rsvd_region { unsigned long start; /* virtual address of beginning of element */ diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 20a8d61..d1587e4 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -78,6 +78,7 @@ #define PAL_CACHE_WRITE 260 /* write ta #define PAL_VM_TR_READ 261 /* read contents of translation register */ #define PAL_GET_PSTATE 262 /* get the current P-state */ #define PAL_SET_PSTATE 263 /* set the P-state */ +#define PAL_BRAND_INFO 274 /* Processor branding information */ #ifndef __ASSEMBLY__ @@ -1133,6 +1134,15 @@ ia64_pal_set_pstate (u64 pstate_index) return iprv.status; } +/* Processor branding information*/ +static inline s64 +ia64_pal_get_brand_info (char *brand_info) +{ + struct ia64_pal_retval iprv; + PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0); + return iprv.status; +} + /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are * suspended, but cache and TLB coherency is maintained. */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 265f482..ead68f8 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -163,6 +163,7 @@ #endif __u8 family; __u8 archrev; char vendor[16]; + char *model_name; #ifdef CONFIG_NUMA struct ia64_node_data *node_data; diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 719ff30..fe6954b 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -128,6 +128,9 @@ extern void smp_send_reschedule (int cpu extern void lock_ipi_calllock(void); extern void unlock_ipi_calllock(void); extern void identify_siblings (struct cpuinfo_ia64 *); +#ifdef CONFIG_KEXEC +extern void kexec_stop_this_cpu(void *); +#endif #else diff --git a/include/linux/irq.h b/include/linux/irq.h index fbf6d90..9b5349f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -182,6 +182,7 @@ typedef struct irq_desc irq_desc_t; #include extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void terminate_irqs(void); #ifdef CONFIG_GENERIC_HARDIRQS diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 92be519..e058bad 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -476,3 +476,15 @@ #endif } EXPORT_SYMBOL(request_irq); +/* + * Terminate any outstanding interrupts + */ +void terminate_irqs(void) +{ + irq_desc_t *desc = irq_desc; + int i; + + for (i = 0; i < NR_IRQS; i++, desc++) + if (desc->action && desc->chip->end) + desc->chip->end(i); +}