Subject: [PATCH] [acpi driver model] Move CPU driver Signed-off-by: Patrick Mochel --- drivers/acpi/Makefile | 6 drivers/acpi/drivers/legacy/Makefile | 9 drivers/acpi/drivers/legacy/processor_core.c | 986 ++++++++++++++++++ drivers/acpi/drivers/legacy/processor_idle.c | 1119 ++++++++++++++++++++ drivers/acpi/drivers/legacy/processor_perflib.c | 624 +++++++++++ drivers/acpi/drivers/legacy/processor_thermal.c | 399 +++++++ drivers/acpi/drivers/legacy/processor_throttling.c | 342 ++++++ drivers/acpi/processor_core.c | 986 ------------------ drivers/acpi/processor_idle.c | 1119 -------------------- drivers/acpi/processor_perflib.c | 624 ----------- drivers/acpi/processor_thermal.c | 399 ------- drivers/acpi/processor_throttling.c | 342 ------ 12 files changed, 3479 insertions(+), 3476 deletions(-) create mode 100644 drivers/acpi/drivers/legacy/processor_core.c create mode 100644 drivers/acpi/drivers/legacy/processor_idle.c create mode 100644 drivers/acpi/drivers/legacy/processor_perflib.c create mode 100644 drivers/acpi/drivers/legacy/processor_thermal.c create mode 100644 drivers/acpi/drivers/legacy/processor_throttling.c delete mode 100644 drivers/acpi/processor_core.c delete mode 100644 drivers/acpi/processor_idle.c delete mode 100644 drivers/acpi/processor_perflib.c delete mode 100644 drivers/acpi/processor_thermal.c delete mode 100644 drivers/acpi/processor_throttling.c applies-to: 518c29fe007dbc7398cefd86a4bf92f45bcbe213 f9fbc66e2e13343f1347f5adc998d6a16ae6fae3 diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 50256b7..261db1f 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -29,18 +29,12 @@ obj-y += osl.o utils.o \ # # ACPI Bus and Device Drivers # -processor-objs += processor_core.o processor_throttling.o \ - processor_idle.o processor_thermal.o -ifdef CONFIG_CPU_FREQ -processor-objs += processor_perflib.o -endif obj-y += sleep/ obj-y += bus.o glue.o obj-y += drivers/ obj-$(CONFIG_ACPI_VIDEO) += video.o obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o -obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o obj-$(CONFIG_ACPI_DEBUG) += debug.o diff --git a/drivers/acpi/drivers/legacy/Makefile b/drivers/acpi/drivers/legacy/Makefile index a8158ae..d9c4280 100644 --- a/drivers/acpi/drivers/legacy/Makefile +++ b/drivers/acpi/drivers/legacy/Makefile @@ -8,3 +8,12 @@ obj-$(CONFIG_ACPI_FAN) += fan.o obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o obj-$(CONFIG_ACPI_NUMA) += numa.o obj-$(CONFIG_ACPI_POWER) += power.o + + +processor-objs += processor_core.o processor_throttling.o \ + processor_idle.o processor_thermal.o +ifdef CONFIG_CPU_FREQ +processor-objs += processor_perflib.o +endif + +obj-$(CONFIG_ACPI_PROCESSOR) += processor.o diff --git a/drivers/acpi/drivers/legacy/processor_core.c b/drivers/acpi/drivers/legacy/processor_core.c new file mode 100644 index 0000000..0c561c5 --- /dev/null +++ b/drivers/acpi/drivers/legacy/processor_core.c @@ -0,0 +1,986 @@ +/* + * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004 Anil S Keshavamurthy + * - Added processor hotplug support + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * TBD: + * 1. Make # power states dynamic. + * 2. Support duty_cycle values that span bit 4. + * 3. Optimize by having scheduler determine business instead of + * having us try to calculate it here. + * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define ACPI_PROCESSOR_DEVICE_NAME "Processor" +#define ACPI_PROCESSOR_FILE_INFO "info" +#define ACPI_PROCESSOR_FILE_THROTTLING "throttling" +#define ACPI_PROCESSOR_FILE_LIMIT "limit" +#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 +#define ACPI_PROCESSOR_NOTIFY_POWER 0x81 + +#define ACPI_PROCESSOR_LIMIT_USER 0 +#define ACPI_PROCESSOR_LIMIT_THERMAL 1 + +#define ACPI_STA_PRESENT 0x00000001 + +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME("acpi_processor") + + MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME); +MODULE_LICENSE("GPL"); + +static int acpi_processor_add(struct acpi_device *device); +static int acpi_processor_start(struct acpi_device *device); +static int acpi_processor_remove(struct acpi_device *device, int type); +static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); +static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); +static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); +static int acpi_processor_handle_eject(struct acpi_processor *pr); + +static struct acpi_driver acpi_processor_driver = { + .name = ACPI_PROCESSOR_DRIVER_NAME, + .class = ACPI_PROCESSOR_CLASS, + .ids = ACPI_PROCESSOR_HID, + .ops = { + .add = acpi_processor_add, + .remove = acpi_processor_remove, + .start = acpi_processor_start, + }, +}; + +#define INSTALL_NOTIFY_HANDLER 1 +#define UNINSTALL_NOTIFY_HANDLER 2 + +static struct file_operations acpi_processor_info_fops = { + .open = acpi_processor_info_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct acpi_processor *processors[NR_CPUS]; +struct acpi_processor_errata errata; + +/* -------------------------------------------------------------------------- + Errata Handling + -------------------------------------------------------------------------- */ + +static int acpi_processor_errata_piix4(struct pci_dev *dev) +{ + u8 rev = 0; + u8 value1 = 0; + u8 value2 = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4"); + + if (!dev) + return_VALUE(-EINVAL); + + /* + * Note that 'dev' references the PIIX4 ACPI Controller. + */ + + pci_read_config_byte(dev, PCI_REVISION_ID, &rev); + + switch (rev) { + case 0: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); + break; + case 1: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); + break; + case 2: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); + break; + case 3: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); + break; + } + + switch (rev) { + + case 0: /* PIIX4 A-step */ + case 1: /* PIIX4 B-step */ + /* + * See specification changes #13 ("Manual Throttle Duty Cycle") + * and #14 ("Enabling and Disabling Manual Throttle"), plus + * erratum #5 ("STPCLK# Deassertion Time") from the January + * 2002 PIIX4 specification update. Applies to only older + * PIIX4 models. + */ + errata.piix4.throttle = 1; + + case 2: /* PIIX4E */ + case 3: /* PIIX4M */ + /* + * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA + * Livelock") from the January 2002 PIIX4 specification update. + * Applies to all PIIX4 models. + */ + + /* + * BM-IDE + * ------ + * Find the PIIX4 IDE Controller and get the Bus Master IDE + * Status register address. We'll use this later to read + * each IDE controller's DMA status to make sure we catch all + * DMA activity. + */ + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB, + PCI_ANY_ID, PCI_ANY_ID, NULL); + if (dev) { + errata.piix4.bmisx = pci_resource_start(dev, 4); + pci_dev_put(dev); + } + + /* + * Type-F DMA + * ---------- + * Find the PIIX4 ISA Controller and read the Motherboard + * DMA controller's status to see if Type-F (Fast) DMA mode + * is enabled (bit 7) on either channel. Note that we'll + * disable C3 support if this is enabled, as some legacy + * devices won't operate well if fast DMA is disabled. + */ + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_0, + PCI_ANY_ID, PCI_ANY_ID, NULL); + if (dev) { + pci_read_config_byte(dev, 0x76, &value1); + pci_read_config_byte(dev, 0x77, &value2); + if ((value1 & 0x80) || (value2 & 0x80)) + errata.piix4.fdma = 1; + pci_dev_put(dev); + } + + break; + } + + if (errata.piix4.bmisx) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Bus master activity detection (BM-IDE) erratum enabled\n")); + if (errata.piix4.fdma) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Type-F DMA livelock erratum (C3 disabled)\n")); + + return_VALUE(0); +} + +static int acpi_processor_errata(struct acpi_processor *pr) +{ + int result = 0; + struct pci_dev *dev = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_errata"); + + if (!pr) + return_VALUE(-EINVAL); + + /* + * PIIX4 + */ + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, + PCI_ANY_ID, NULL); + if (dev) { + result = acpi_processor_errata_piix4(dev); + pci_dev_put(dev); + } + + return_VALUE(result); +} + +/* -------------------------------------------------------------------------- + Common ACPI processor fucntions + -------------------------------------------------------------------------- */ + +/* + * _PDC is required for a BIOS-OS handshake for most of the newer + * ACPI processor features. + */ + +int acpi_processor_set_pdc(struct acpi_processor *pr, + struct acpi_object_list *pdc_in) +{ + acpi_status status = AE_OK; + u32 arg0_buf[3]; + union acpi_object arg0 = { ACPI_TYPE_BUFFER }; + struct acpi_object_list no_object = { 1, &arg0 }; + struct acpi_object_list *pdc; + + ACPI_FUNCTION_TRACE("acpi_processor_set_pdc"); + + arg0.buffer.length = 12; + arg0.buffer.pointer = (u8 *) arg0_buf; + arg0_buf[0] = ACPI_PDC_REVISION_ID; + arg0_buf[1] = 0; + arg0_buf[2] = 0; + + pdc = (pdc_in) ? pdc_in : &no_object; + + status = acpi_evaluate_object(pr->handle, "_PDC", pdc, NULL); + + if ((ACPI_FAILURE(status)) && (pdc_in)) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Error evaluating _PDC, using legacy perf. control...\n")); + + return_VALUE(status); +} + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_processor_dir = NULL; + +static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_processor *pr = (struct acpi_processor *)seq->private; + + ACPI_FUNCTION_TRACE("acpi_processor_info_seq_show"); + + if (!pr) + goto end; + + seq_printf(seq, "processor id: %d\n" + "acpi id: %d\n" + "bus mastering control: %s\n" + "power management: %s\n" + "throttling control: %s\n" + "limit interface: %s\n", + pr->id, + pr->acpi_id, + pr->flags.bm_control ? "yes" : "no", + pr->flags.power ? "yes" : "no", + pr->flags.throttling ? "yes" : "no", + pr->flags.limit ? "yes" : "no"); + + end: + return_VALUE(0); +} + +static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_processor_info_seq_show, + PDE(inode)->data); +} + +static int acpi_processor_add_fs(struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_add_fs"); + + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_processor_dir); + if (!acpi_device_dir(device)) + return_VALUE(-ENODEV); + } + acpi_device_dir(device)->owner = THIS_MODULE; + + /* 'info' [R] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_INFO)); + else { + entry->proc_fops = &acpi_processor_info_fops; + entry->data = acpi_driver_data(device); + entry->owner = THIS_MODULE; + } + + /* 'throttling' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, + S_IFREG | S_IRUGO | S_IWUSR, + acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_THROTTLING)); + else { + entry->proc_fops = &acpi_processor_throttling_fops; + entry->proc_fops->write = acpi_processor_write_throttling; + entry->data = acpi_driver_data(device); + entry->owner = THIS_MODULE; + } + + /* 'limit' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, + S_IFREG | S_IRUGO | S_IWUSR, + acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_LIMIT)); + else { + entry->proc_fops = &acpi_processor_limit_fops; + entry->proc_fops->write = acpi_processor_write_limit; + entry->data = acpi_driver_data(device); + entry->owner = THIS_MODULE; + } + + return_VALUE(0); +} + +static int acpi_processor_remove_fs(struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_processor_remove_fs"); + + if (acpi_device_dir(device)) { + remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, + acpi_device_dir(device)); + remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, + acpi_device_dir(device)); + remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, + acpi_device_dir(device)); + remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); + acpi_device_dir(device) = NULL; + } + + return_VALUE(0); +} + +/* Use the acpiid in MADT to map cpus in case of SMP */ +#ifndef CONFIG_SMP +#define convert_acpiid_to_cpu(acpi_id) (0xff) +#else + +#ifdef CONFIG_IA64 +#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid +#define arch_cpu_to_apicid ia64_cpu_to_sapicid +#define ARCH_BAD_APICID (0xffff) +#else +#define arch_acpiid_to_apicid x86_acpiid_to_apicid +#define arch_cpu_to_apicid x86_cpu_to_apicid +#define ARCH_BAD_APICID (0xff) +#endif + +static u8 convert_acpiid_to_cpu(u8 acpi_id) +{ + u16 apic_id; + int i; + + apic_id = arch_acpiid_to_apicid[acpi_id]; + if (apic_id == ARCH_BAD_APICID) + return -1; + + for (i = 0; i < NR_CPUS; i++) { + if (arch_cpu_to_apicid[i] == apic_id) + return i; + } + return -1; +} +#endif + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ + +static int acpi_processor_get_info(struct acpi_processor *pr) +{ + acpi_status status = 0; + union acpi_object object = { 0 }; + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; + u8 cpu_index; + static int cpu0_initialized; + + ACPI_FUNCTION_TRACE("acpi_processor_get_info"); + + if (!pr) + return_VALUE(-EINVAL); + + if (num_online_cpus() > 1) + errata.smp = TRUE; + + acpi_processor_errata(pr); + + /* + * Check to see if we have bus mastering arbitration control. This + * is required for proper C3 usage (to maintain cache coherency). + */ + if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { + pr->flags.bm_control = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Bus mastering arbitration control present\n")); + } else + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "No bus mastering arbitration control\n")); + + /* + * Evalute the processor object. Note that it is common on SMP to + * have the first (boot) processor with a valid PBLK address while + * all others have a NULL address. + */ + status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error evaluating processor object\n")); + return_VALUE(-ENODEV); + } + + /* + * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. + * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c + */ + pr->acpi_id = object.processor.proc_id; + + cpu_index = convert_acpiid_to_cpu(pr->acpi_id); + + /* Handle UP system running SMP kernel, with no LAPIC in MADT */ + if (!cpu0_initialized && (cpu_index == 0xff) && + (num_online_cpus() == 1)) { + cpu_index = 0; + } + + cpu0_initialized = 1; + + pr->id = cpu_index; + + /* + * Extra Processor objects may be enumerated on MP systems with + * less than the max # of CPUs. They should be ignored _iff + * they are physically not present. + */ + if (cpu_index >= NR_CPUS) { + if (ACPI_FAILURE + (acpi_processor_hotadd_init(pr->handle, &pr->id))) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error getting cpuindex for acpiid 0x%x\n", + pr->acpi_id)); + return_VALUE(-ENODEV); + } + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, + pr->acpi_id)); + + if (!object.processor.pblk_address) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); + else if (object.processor.pblk_length != 6) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n", + object.processor.pblk_length)); + else { + pr->throttling.address = object.processor.pblk_address; + pr->throttling.duty_offset = acpi_fadt.duty_offset; + pr->throttling.duty_width = acpi_fadt.duty_width; + + pr->pblk = object.processor.pblk_address; + + /* + * We don't care about error returns - we just try to mark + * these reserved so that nobody else is confused into thinking + * that this region might be unused.. + * + * (In particular, allocating the IO range for Cardbus) + */ + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); + } + +#ifdef CONFIG_CPU_FREQ + acpi_processor_ppc_has_changed(pr); +#endif + acpi_processor_get_throttling_info(pr); + acpi_processor_get_limit_info(pr); + + return_VALUE(0); +} + +static void *processor_device_array[NR_CPUS]; + +static int acpi_processor_start(struct acpi_device *device) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_processor *pr; + + ACPI_FUNCTION_TRACE("acpi_processor_start"); + + pr = acpi_driver_data(device); + + result = acpi_processor_get_info(pr); + if (result) { + /* Processor is physically not present */ + return_VALUE(0); + } + + BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0)); + + /* + * Buggy BIOS check + * ACPI id of processors can be reported wrongly by the BIOS. + * Don't trust it blindly + */ + if (processor_device_array[pr->id] != NULL && + processor_device_array[pr->id] != (void *)device) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "BIOS reporting wrong ACPI id" + "for the processor\n")); + return_VALUE(-ENODEV); + } + processor_device_array[pr->id] = (void *)device; + + processors[pr->id] = pr; + + result = acpi_processor_add_fs(device); + if (result) + goto end; + + status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, + acpi_processor_notify, pr); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing device notify handler\n")); + } + + acpi_processor_power_init(pr, device); + + if (pr->flags.throttling) { + printk(KERN_INFO PREFIX "%s [%s] (supports", + acpi_device_name(device), acpi_device_bid(device)); + printk(" %d throttling states", pr->throttling.state_count); + printk(")\n"); + } + + end: + + return_VALUE(result); +} + +static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_processor *pr = (struct acpi_processor *)data; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_notify"); + + if (!pr) + return_VOID; + + if (acpi_bus_get_device(pr->handle, &device)) + return_VOID; + + switch (event) { + case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: + acpi_processor_ppc_has_changed(pr); + acpi_bus_generate_event(device, event, + pr->performance_platform_limit); + break; + case ACPI_PROCESSOR_NOTIFY_POWER: + acpi_processor_cst_has_changed(pr); + acpi_bus_generate_event(device, event, 0); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + +static int acpi_processor_add(struct acpi_device *device) +{ + struct acpi_processor *pr = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_add"); + + if (!device) + return_VALUE(-EINVAL); + + pr = kmalloc(sizeof(struct acpi_processor), GFP_KERNEL); + if (!pr) + return_VALUE(-ENOMEM); + memset(pr, 0, sizeof(struct acpi_processor)); + + pr->handle = device->handle; + strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); + acpi_driver_data(device) = pr; + + return_VALUE(0); +} + +static int acpi_processor_remove(struct acpi_device *device, int type) +{ + acpi_status status = AE_OK; + struct acpi_processor *pr = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_remove"); + + if (!device || !acpi_driver_data(device)) + return_VALUE(-EINVAL); + + pr = (struct acpi_processor *)acpi_driver_data(device); + + if (pr->id >= NR_CPUS) { + kfree(pr); + return_VALUE(0); + } + + if (type == ACPI_BUS_REMOVAL_EJECT) { + if (acpi_processor_handle_eject(pr)) + return_VALUE(-EINVAL); + } + + acpi_processor_power_exit(pr, device); + + status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, + acpi_processor_notify); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error removing notify handler\n")); + } + + acpi_processor_remove_fs(device); + + processors[pr->id] = NULL; + + kfree(pr); + + return_VALUE(0); +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/**************************************************************************** + * Acpi processor hotplug support * + ****************************************************************************/ + +static int is_processor_present(acpi_handle handle); + +static int is_processor_present(acpi_handle handle) +{ + acpi_status status; + unsigned long sta = 0; + + ACPI_FUNCTION_TRACE("is_processor_present"); + + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_PRESENT)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Processor Device is not present\n")); + return_VALUE(0); + } + return_VALUE(1); +} + +static +int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) +{ + acpi_handle phandle; + struct acpi_device *pdev; + struct acpi_processor *pr; + + ACPI_FUNCTION_TRACE("acpi_processor_device_add"); + + if (acpi_get_parent(handle, &phandle)) { + return_VALUE(-ENODEV); + } + + if (acpi_bus_get_device(phandle, &pdev)) { + return_VALUE(-ENODEV); + } + + if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { + return_VALUE(-ENODEV); + } + + acpi_bus_start(*device); + + pr = acpi_driver_data(*device); + if (!pr) + return_VALUE(-ENODEV); + + if ((pr->id >= 0) && (pr->id < NR_CPUS)) { + kobject_hotplug(&(*device)->kobj, KOBJ_ONLINE); + } + return_VALUE(0); +} + +static void +acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) +{ + struct acpi_processor *pr; + struct acpi_device *device = NULL; + int result; + + ACPI_FUNCTION_TRACE("acpi_processor_hotplug_notify"); + + switch (event) { + case ACPI_NOTIFY_BUS_CHECK: + case ACPI_NOTIFY_DEVICE_CHECK: + printk("Processor driver received %s event\n", + (event == ACPI_NOTIFY_BUS_CHECK) ? + "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); + + if (!is_processor_present(handle)) + break; + + if (acpi_bus_get_device(handle, &device)) { + result = acpi_processor_device_add(handle, &device); + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to add the device\n")); + break; + } + + pr = acpi_driver_data(device); + if (!pr) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Driver data is NULL\n")); + break; + } + + if (pr->id >= 0 && (pr->id < NR_CPUS)) { + kobject_hotplug(&device->kobj, KOBJ_OFFLINE); + break; + } + + result = acpi_processor_start(device); + if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { + kobject_hotplug(&device->kobj, KOBJ_ONLINE); + } else { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Device [%s] failed to start\n", + acpi_device_bid(device))); + } + break; + case ACPI_NOTIFY_EJECT_REQUEST: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "received ACPI_NOTIFY_EJECT_REQUEST\n")); + + if (acpi_bus_get_device(handle, &device)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Device don't exist, dropping EJECT\n")); + break; + } + pr = acpi_driver_data(device); + if (!pr) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Driver data is NULL, dropping EJECT\n")); + return_VOID; + } + + if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) + kobject_hotplug(&device->kobj, KOBJ_OFFLINE); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } + + return_VOID; +} + +static acpi_status +processor_walk_namespace_cb(acpi_handle handle, + u32 lvl, void *context, void **rv) +{ + acpi_status status; + int *action = context; + acpi_object_type type = 0; + + status = acpi_get_type(handle, &type); + if (ACPI_FAILURE(status)) + return (AE_OK); + + if (type != ACPI_TYPE_PROCESSOR) + return (AE_OK); + + switch (*action) { + case INSTALL_NOTIFY_HANDLER: + acpi_install_notify_handler(handle, + ACPI_SYSTEM_NOTIFY, + acpi_processor_hotplug_notify, + NULL); + break; + case UNINSTALL_NOTIFY_HANDLER: + acpi_remove_notify_handler(handle, + ACPI_SYSTEM_NOTIFY, + acpi_processor_hotplug_notify); + break; + default: + break; + } + + return (AE_OK); +} + +static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +{ + ACPI_FUNCTION_TRACE("acpi_processor_hotadd_init"); + + if (!is_processor_present(handle)) { + return_VALUE(AE_ERROR); + } + + if (acpi_map_lsapic(handle, p_cpu)) + return_VALUE(AE_ERROR); + + if (arch_register_cpu(*p_cpu)) { + acpi_unmap_lsapic(*p_cpu); + return_VALUE(AE_ERROR); + } + + return_VALUE(AE_OK); +} + +static int acpi_processor_handle_eject(struct acpi_processor *pr) +{ + if (cpu_online(pr->id)) { + return (-EINVAL); + } + arch_unregister_cpu(pr->id); + acpi_unmap_lsapic(pr->id); + return (0); +} +#else +static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +{ + return AE_ERROR; +} +static int acpi_processor_handle_eject(struct acpi_processor *pr) +{ + return (-EINVAL); +} +#endif + +static +void acpi_processor_install_hotplug_notify(void) +{ +#ifdef CONFIG_ACPI_HOTPLUG_CPU + int action = INSTALL_NOTIFY_HANDLER; + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, + ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + processor_walk_namespace_cb, &action, NULL); +#endif +} + +static +void acpi_processor_uninstall_hotplug_notify(void) +{ +#ifdef CONFIG_ACPI_HOTPLUG_CPU + int action = UNINSTALL_NOTIFY_HANDLER; + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, + ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + processor_walk_namespace_cb, &action, NULL); +#endif +} + +/* + * We keep the driver loaded even when ACPI is not running. + * This is needed for the powernow-k8 driver, that works even without + * ACPI, but needs symbols from this driver + */ + +static int __init acpi_processor_init(void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_init"); + + memset(&processors, 0, sizeof(processors)); + memset(&errata, 0, sizeof(errata)); + + acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); + if (!acpi_processor_dir) + return_VALUE(0); + acpi_processor_dir->owner = THIS_MODULE; + + result = acpi_bus_register_driver(&acpi_processor_driver); + if (result < 0) { + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); + return_VALUE(0); + } + + acpi_processor_install_hotplug_notify(); + + acpi_thermal_cpufreq_init(); + + acpi_processor_ppc_init(); + + return_VALUE(0); +} + +static void __exit acpi_processor_exit(void) +{ + ACPI_FUNCTION_TRACE("acpi_processor_exit"); + + acpi_processor_ppc_exit(); + + acpi_thermal_cpufreq_exit(); + + acpi_processor_uninstall_hotplug_notify(); + + acpi_bus_unregister_driver(&acpi_processor_driver); + + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); + + return_VOID; +} + +module_init(acpi_processor_init); +module_exit(acpi_processor_exit); + +EXPORT_SYMBOL(acpi_processor_set_thermal_limit); + +MODULE_ALIAS("processor"); diff --git a/drivers/acpi/drivers/legacy/processor_idle.c b/drivers/acpi/drivers/legacy/processor_idle.c new file mode 100644 index 0000000..807b0df --- /dev/null +++ b/drivers/acpi/drivers/legacy/processor_idle.c @@ -0,0 +1,1119 @@ +/* + * processor_idle - idle state submodule to the ACPI processor driver + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004 Anil S Keshavamurthy + * - Added processor hotplug support + * Copyright (C) 2005 Venkatesh Pallipadi + * - Added support for C3 on SMP + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* need_resched() */ + +#include +#include + +#include +#include + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME("acpi_processor") +#define ACPI_PROCESSOR_FILE_POWER "power" +#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) +#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ +#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ +static void (*pm_idle_save) (void); +module_param(max_cstate, uint, 0644); + +static unsigned int nocst = 0; +module_param(nocst, uint, 0000); + +/* + * bm_history -- bit-mask with a bit per jiffy of bus-master activity + * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms + * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms + * 100 HZ: 0x0000000F: 4 jiffies = 40ms + * reduce history for more aggressive entry into C3 + */ +static unsigned int bm_history = + (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); +module_param(bm_history, uint, 0644); +/* -------------------------------------------------------------------------- + Power Management + -------------------------------------------------------------------------- */ + +/* + * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. + * For now disable this. Probably a bug somewhere else. + * + * To skip this limit, boot/load with a large max_cstate limit. + */ +static int set_max_cstate(struct dmi_system_id *id) +{ + if (max_cstate > ACPI_PROCESSOR_MAX_POWER) + return 0; + + printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." + " Override with \"processor.max_cstate=%d\"\n", id->ident, + (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); + + max_cstate = (long)id->driver_data; + + return 0; +} + +static struct dmi_system_id __initdata processor_power_dmi_table[] = { + {set_max_cstate, "IBM ThinkPad R40e", { + DMI_MATCH(DMI_BIOS_VENDOR, + "IBM"), + DMI_MATCH(DMI_BIOS_VERSION, + "1SET60WW")}, + (void *)1}, + {set_max_cstate, "Medion 41700", { + DMI_MATCH(DMI_BIOS_VENDOR, + "Phoenix Technologies LTD"), + DMI_MATCH(DMI_BIOS_VERSION, + "R01-A1J")}, (void *)1}, + {set_max_cstate, "Clevo 5600D", { + DMI_MATCH(DMI_BIOS_VENDOR, + "Phoenix Technologies LTD"), + DMI_MATCH(DMI_BIOS_VERSION, + "SHE845M0.86C.0013.D.0302131307")}, + (void *)2}, + {}, +}; + +static inline u32 ticks_elapsed(u32 t1, u32 t2) +{ + if (t2 >= t1) + return (t2 - t1); + else if (!acpi_fadt.tmr_val_ext) + return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); + else + return ((0xFFFFFFFF - t1) + t2); +} + +static void +acpi_processor_power_activate(struct acpi_processor *pr, + struct acpi_processor_cx *new) +{ + struct acpi_processor_cx *old; + + if (!pr || !new) + return; + + old = pr->power.state; + + if (old) + old->promotion.count = 0; + new->demotion.count = 0; + + /* Cleanup from old state. */ + if (old) { + switch (old->type) { + case ACPI_STATE_C3: + /* Disable bus master reload */ + if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, + ACPI_MTX_DO_NOT_LOCK); + break; + } + } + + /* Prepare to use new state. */ + switch (new->type) { + case ACPI_STATE_C3: + /* Enable bus master reload */ + if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, + ACPI_MTX_DO_NOT_LOCK); + break; + } + + pr->power.state = new; + + return; +} + +static void acpi_safe_halt(void) +{ + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); + if (!need_resched()) + safe_halt(); + set_thread_flag(TIF_POLLING_NRFLAG); +} + +static atomic_t c3_cpu_count; + +static void acpi_processor_idle(void) +{ + struct acpi_processor *pr = NULL; + struct acpi_processor_cx *cx = NULL; + struct acpi_processor_cx *next_state = NULL; + int sleep_ticks = 0; + u32 t1, t2 = 0; + + pr = processors[smp_processor_id()]; + if (!pr) + return; + + /* + * Interrupts must be disabled during bus mastering calculations and + * for C2/C3 transitions. + */ + local_irq_disable(); + + /* + * Check whether we truly need to go idle, or should + * reschedule: + */ + if (unlikely(need_resched())) { + local_irq_enable(); + return; + } + + cx = pr->power.state; + if (!cx) { + if (pm_idle_save) + pm_idle_save(); + else + acpi_safe_halt(); + return; + } + + /* + * Check BM Activity + * ----------------- + * Check for bus mastering activity (if required), record, and check + * for demotion. + */ + if (pr->flags.bm_check) { + u32 bm_status = 0; + unsigned long diff = jiffies - pr->power.bm_check_timestamp; + + if (diff > 32) + diff = 32; + + while (diff) { + /* if we didn't get called, assume there was busmaster activity */ + diff--; + if (diff) + pr->power.bm_activity |= 0x1; + pr->power.bm_activity <<= 1; + } + + acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, + &bm_status, ACPI_MTX_DO_NOT_LOCK); + if (bm_status) { + pr->power.bm_activity++; + acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, + 1, ACPI_MTX_DO_NOT_LOCK); + } + /* + * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect + * the true state of bus mastering activity; forcing us to + * manually check the BMIDEA bit of each IDE channel. + */ + else if (errata.piix4.bmisx) { + if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) + || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) + pr->power.bm_activity++; + } + + pr->power.bm_check_timestamp = jiffies; + + /* + * Apply bus mastering demotion policy. Automatically demote + * to avoid a faulty transition. Note that the processor + * won't enter a low-power state during this call (to this + * funciton) but should upon the next. + * + * TBD: A better policy might be to fallback to the demotion + * state (use it for this quantum only) istead of + * demoting -- and rely on duration as our sole demotion + * qualification. This may, however, introduce DMA + * issues (e.g. floppy DMA transfer overrun/underrun). + */ + if (pr->power.bm_activity & cx->demotion.threshold.bm) { + local_irq_enable(); + next_state = cx->demotion.state; + goto end; + } + } + +#ifdef CONFIG_HOTPLUG_CPU + /* + * Check for P_LVL2_UP flag before entering C2 and above on + * an SMP system. We do it here instead of doing it at _CST/P_LVL + * detection phase, to work cleanly with logical CPU hotplug. + */ + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && !acpi_fadt.plvl2_up) + cx = &pr->power.states[ACPI_STATE_C1]; +#endif + + cx->usage++; + + /* + * Sleep: + * ------ + * Invoke the current Cx state to put the processor to sleep. + */ + if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); + if (need_resched()) { + set_thread_flag(TIF_POLLING_NRFLAG); + local_irq_enable(); + return; + } + } + + switch (cx->type) { + + case ACPI_STATE_C1: + /* + * Invoke C1. + * Use the appropriate idle routine, the one that would + * be used without acpi C-states. + */ + if (pm_idle_save) + pm_idle_save(); + else + acpi_safe_halt(); + + /* + * TBD: Can't get time duration while in C1, as resumes + * go to an ISR rather than here. Need to instrument + * base interrupt handler. + */ + sleep_ticks = 0xFFFFFFFF; + break; + + case ACPI_STATE_C2: + /* Get start time (ticks) */ + t1 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Invoke C2 */ + inb(cx->address); + /* Dummy op - must do something useless after P_LVL2 read */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Get end time (ticks) */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Re-enable interrupts */ + local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = + ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; + break; + + case ACPI_STATE_C3: + + if (pr->flags.bm_check) { + if (atomic_inc_return(&c3_cpu_count) == + num_online_cpus()) { + /* + * All CPUs are trying to go to C3 + * Disable bus master arbitration + */ + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, + ACPI_MTX_DO_NOT_LOCK); + } + } else { + /* SMP with no shared cache... Invalidate cache */ + ACPI_FLUSH_CPU_CACHE(); + } + + /* Get start time (ticks) */ + t1 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Invoke C3 */ + inb(cx->address); + /* Dummy op - must do something useless after P_LVL3 read */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Get end time (ticks) */ + t2 = inl(acpi_fadt.xpm_tmr_blk.address); + if (pr->flags.bm_check) { + /* Enable bus master arbitration */ + atomic_dec(&c3_cpu_count); + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, + ACPI_MTX_DO_NOT_LOCK); + } + + /* Re-enable interrupts */ + local_irq_enable(); + set_thread_flag(TIF_POLLING_NRFLAG); + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = + ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; + break; + + default: + local_irq_enable(); + return; + } + + next_state = pr->power.state; + +#ifdef CONFIG_HOTPLUG_CPU + /* Don't do promotion/demotion */ + if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && !acpi_fadt.plvl2_up) { + next_state = cx; + goto end; + } +#endif + + /* + * Promotion? + * ---------- + * Track the number of longs (time asleep is greater than threshold) + * and promote when the count threshold is reached. Note that bus + * mastering activity may prevent promotions. + * Do not promote above max_cstate. + */ + if (cx->promotion.state && + ((cx->promotion.state - pr->power.states) <= max_cstate)) { + if (sleep_ticks > cx->promotion.threshold.ticks) { + cx->promotion.count++; + cx->demotion.count = 0; + if (cx->promotion.count >= + cx->promotion.threshold.count) { + if (pr->flags.bm_check) { + if (! + (pr->power.bm_activity & cx-> + promotion.threshold.bm)) { + next_state = + cx->promotion.state; + goto end; + } + } else { + next_state = cx->promotion.state; + goto end; + } + } + } + } + + /* + * Demotion? + * --------- + * Track the number of shorts (time asleep is less than time threshold) + * and demote when the usage threshold is reached. + */ + if (cx->demotion.state) { + if (sleep_ticks < cx->demotion.threshold.ticks) { + cx->demotion.count++; + cx->promotion.count = 0; + if (cx->demotion.count >= cx->demotion.threshold.count) { + next_state = cx->demotion.state; + goto end; + } + } + } + + end: + /* + * Demote if current state exceeds max_cstate + */ + if ((pr->power.state - pr->power.states) > max_cstate) { + if (cx->demotion.state) + next_state = cx->demotion.state; + } + + /* + * New Cx State? + * ------------- + * If we're going to start using a new Cx state we must clean up + * from the previous and prepare to use the new. + */ + if (next_state != pr->power.state) + acpi_processor_power_activate(pr, next_state); +} + +static int acpi_processor_set_power_policy(struct acpi_processor *pr) +{ + unsigned int i; + unsigned int state_is_set = 0; + struct acpi_processor_cx *lower = NULL; + struct acpi_processor_cx *higher = NULL; + struct acpi_processor_cx *cx; + + ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); + + if (!pr) + return_VALUE(-EINVAL); + + /* + * This function sets the default Cx state policy (OS idle handler). + * Our scheme is to promote quickly to C2 but more conservatively + * to C3. We're favoring C2 for its characteristics of low latency + * (quick response), good power savings, and ability to allow bus + * mastering activity. Note that the Cx state policy is completely + * customizable and can be altered dynamically. + */ + + /* startup state */ + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + cx = &pr->power.states[i]; + if (!cx->valid) + continue; + + if (!state_is_set) + pr->power.state = cx; + state_is_set++; + break; + } + + if (!state_is_set) + return_VALUE(-ENODEV); + + /* demotion */ + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + cx = &pr->power.states[i]; + if (!cx->valid) + continue; + + if (lower) { + cx->demotion.state = lower; + cx->demotion.threshold.ticks = cx->latency_ticks; + cx->demotion.threshold.count = 1; + if (cx->type == ACPI_STATE_C3) + cx->demotion.threshold.bm = bm_history; + } + + lower = cx; + } + + /* promotion */ + for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { + cx = &pr->power.states[i]; + if (!cx->valid) + continue; + + if (higher) { + cx->promotion.state = higher; + cx->promotion.threshold.ticks = cx->latency_ticks; + if (cx->type >= ACPI_STATE_C2) + cx->promotion.threshold.count = 4; + else + cx->promotion.threshold.count = 10; + if (higher->type == ACPI_STATE_C3) + cx->promotion.threshold.bm = bm_history; + } + + higher = cx; + } + + return_VALUE(0); +} + +static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) +{ + ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->pblk) + return_VALUE(-ENODEV); + + memset(pr->power.states, 0, sizeof(pr->power.states)); + + /* if info is obtained from pblk/fadt, type equals state */ + pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; + pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; + pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; + + /* the C0 state only exists as a filler in our array, + * and all processors need to support C1 */ + pr->power.states[ACPI_STATE_C0].valid = 1; + pr->power.states[ACPI_STATE_C1].valid = 1; + +#ifndef CONFIG_HOTPLUG_CPU + /* + * Check for P_LVL2_UP flag before entering C2 and above on + * an SMP system. + */ + if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) + return_VALUE(-ENODEV); +#endif + + /* determine C2 and C3 address from pblk */ + pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; + pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; + + /* determine latencies from FADT */ + pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; + pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "lvl2[0x%08x] lvl3[0x%08x]\n", + pr->power.states[ACPI_STATE_C2].address, + pr->power.states[ACPI_STATE_C3].address)); + + return_VALUE(0); +} + +static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) +{ + ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); + + memset(pr->power.states, 0, sizeof(pr->power.states)); + + /* if info is obtained from pblk/fadt, type equals state */ + pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; + pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; + pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; + + /* the C0 state only exists as a filler in our array, + * and all processors need to support C1 */ + pr->power.states[ACPI_STATE_C0].valid = 1; + pr->power.states[ACPI_STATE_C1].valid = 1; + + return_VALUE(0); +} + +static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) +{ + acpi_status status = 0; + acpi_integer count; + int i; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *cst; + + ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); + + if (nocst) + return_VALUE(-ENODEV); + + pr->power.count = 0; + for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) + memset(&(pr->power.states[i]), 0, + sizeof(struct acpi_processor_cx)); + + status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); + return_VALUE(-ENODEV); + } + + cst = (union acpi_object *)buffer.pointer; + + /* There must be at least 2 elements */ + if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "not enough elements in _CST\n")); + status = -EFAULT; + goto end; + } + + count = cst->package.elements[0].integer.value; + + /* Validate number of power states. */ + if (count < 1 || count != cst->package.count - 1) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "count given by _CST is not valid\n")); + status = -EFAULT; + goto end; + } + + /* We support up to ACPI_PROCESSOR_MAX_POWER. */ + if (count > ACPI_PROCESSOR_MAX_POWER) { + printk(KERN_WARNING + "Limiting number of power states to max (%d)\n", + ACPI_PROCESSOR_MAX_POWER); + printk(KERN_WARNING + "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); + count = ACPI_PROCESSOR_MAX_POWER; + } + + /* Tell driver that at least _CST is supported. */ + pr->flags.has_cst = 1; + + for (i = 1; i <= count; i++) { + union acpi_object *element; + union acpi_object *obj; + struct acpi_power_register *reg; + struct acpi_processor_cx cx; + + memset(&cx, 0, sizeof(cx)); + + element = (union acpi_object *)&(cst->package.elements[i]); + if (element->type != ACPI_TYPE_PACKAGE) + continue; + + if (element->package.count != 4) + continue; + + obj = (union acpi_object *)&(element->package.elements[0]); + + if (obj->type != ACPI_TYPE_BUFFER) + continue; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && + (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) + continue; + + cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? + 0 : reg->address; + + /* There should be an easy way to extract an integer... */ + obj = (union acpi_object *)&(element->package.elements[1]); + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.type = obj->integer.value; + + if ((cx.type != ACPI_STATE_C1) && + (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) + continue; + + if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3)) + continue; + + obj = (union acpi_object *)&(element->package.elements[2]); + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.latency = obj->integer.value; + + obj = (union acpi_object *)&(element->package.elements[3]); + if (obj->type != ACPI_TYPE_INTEGER) + continue; + + cx.power = obj->integer.value; + + (pr->power.count)++; + memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", + pr->power.count)); + + /* Validate number of power states discovered */ + if (pr->power.count < 2) + status = -EFAULT; + + end: + acpi_os_free(buffer.pointer); + + return_VALUE(status); +} + +static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) +{ + ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); + + if (!cx->address) + return_VOID; + + /* + * C2 latency must be less than or equal to 100 + * microseconds. + */ + else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "latency too large [%d]\n", cx->latency)); + return_VOID; + } + + /* + * Otherwise we've met all of our C2 requirements. + * Normalize the C2 latency to expidite policy + */ + cx->valid = 1; + cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); + + return_VOID; +} + +static void acpi_processor_power_verify_c3(struct acpi_processor *pr, + struct acpi_processor_cx *cx) +{ + static int bm_check_flag; + + ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); + + if (!cx->address) + return_VOID; + + /* + * C3 latency must be less than or equal to 1000 + * microseconds. + */ + else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "latency too large [%d]\n", cx->latency)); + return_VOID; + } + + /* + * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) + * DMA transfers are used by any ISA device to avoid livelock. + * Note that we could disable Type-F DMA (as recommended by + * the erratum), but this is known to disrupt certain ISA + * devices thus we take the conservative approach. + */ + else if (errata.piix4.fdma) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 not supported on PIIX4 with Type-F DMA\n")); + return_VOID; + } + + /* All the logic here assumes flags.bm_check is same across all CPUs */ + if (!bm_check_flag) { + /* Determine whether bm_check is needed based on CPU */ + acpi_processor_power_init_bm_check(&(pr->flags), pr->id); + bm_check_flag = pr->flags.bm_check; + } else { + pr->flags.bm_check = bm_check_flag; + } + + if (pr->flags.bm_check) { + /* bus mastering control is necessary */ + if (!pr->flags.bm_control) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support requires bus mastering control\n")); + return_VOID; + } + } else { + /* + * WBINVD should be set in fadt, for C3 state to be + * supported on when bm_check is not required. + */ + if (acpi_fadt.wb_invd != 1) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Cache invalidation should work properly" + " for C3 to be enabled on SMP systems\n")); + return_VOID; + } + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, + 0, ACPI_MTX_DO_NOT_LOCK); + } + + /* + * Otherwise we've met all of our C3 requirements. + * Normalize the C3 latency to expidite policy. Enable + * checking of bus mastering status (bm_check) so we can + * use this in our C3 policy + */ + cx->valid = 1; + cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); + + return_VOID; +} + +static int acpi_processor_power_verify(struct acpi_processor *pr) +{ + unsigned int i; + unsigned int working = 0; + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + struct acpi_processor_cx *cx = &pr->power.states[i]; + + switch (cx->type) { + case ACPI_STATE_C1: + cx->valid = 1; + break; + + case ACPI_STATE_C2: + acpi_processor_power_verify_c2(cx); + break; + + case ACPI_STATE_C3: + acpi_processor_power_verify_c3(pr, cx); + break; + } + + if (cx->valid) + working++; + } + + return (working); +} + +static int acpi_processor_get_power_info(struct acpi_processor *pr) +{ + unsigned int i; + int result; + + ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); + + /* NOTE: the idle thread may not be running while calling + * this function */ + + result = acpi_processor_get_power_info_cst(pr); + if (result == -ENODEV) + result = acpi_processor_get_power_info_fadt(pr); + + if ((result) || (acpi_processor_power_verify(pr) < 2)) + result = acpi_processor_get_power_info_default_c1(pr); + + /* + * Set Default Policy + * ------------------ + * Now that we know which states are supported, set the default + * policy. Note that this policy can be changed dynamically + * (e.g. encourage deeper sleeps to conserve battery life when + * not on AC). + */ + result = acpi_processor_set_power_policy(pr); + if (result) + return_VALUE(result); + + /* + * if one state of type C2 or C3 is available, mark this + * CPU as being "idle manageable" + */ + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { + if (pr->power.states[i].valid) { + pr->power.count = i; + if (pr->power.states[i].type >= ACPI_STATE_C2) + pr->flags.power = 1; + } + } + + return_VALUE(0); +} + +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); + + if (!pr) + return_VALUE(-EINVAL); + + if (nocst) { + return_VALUE(-ENODEV); + } + + if (!pr->flags.power_setup_done) + return_VALUE(-ENODEV); + + /* Fall back to the default idle loop */ + pm_idle = pm_idle_save; + synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ + + pr->flags.power = 0; + result = acpi_processor_get_power_info(pr); + if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) + pm_idle = acpi_processor_idle; + + return_VALUE(result); +} + +/* proc interface */ + +static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_processor *pr = (struct acpi_processor *)seq->private; + unsigned int i; + + ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); + + if (!pr) + goto end; + + seq_printf(seq, "active state: C%zd\n" + "max_cstate: C%d\n" + "bus master activity: %08x\n", + pr->power.state ? pr->power.state - pr->power.states : 0, + max_cstate, (unsigned)pr->power.bm_activity); + + seq_puts(seq, "states:\n"); + + for (i = 1; i <= pr->power.count; i++) { + seq_printf(seq, " %cC%d: ", + (&pr->power.states[i] == + pr->power.state ? '*' : ' '), i); + + if (!pr->power.states[i].valid) { + seq_puts(seq, "\n"); + continue; + } + + switch (pr->power.states[i].type) { + case ACPI_STATE_C1: + seq_printf(seq, "type[C1] "); + break; + case ACPI_STATE_C2: + seq_printf(seq, "type[C2] "); + break; + case ACPI_STATE_C3: + seq_printf(seq, "type[C3] "); + break; + default: + seq_printf(seq, "type[--] "); + break; + } + + if (pr->power.states[i].promotion.state) + seq_printf(seq, "promotion[C%zd] ", + (pr->power.states[i].promotion.state - + pr->power.states)); + else + seq_puts(seq, "promotion[--] "); + + if (pr->power.states[i].demotion.state) + seq_printf(seq, "demotion[C%zd] ", + (pr->power.states[i].demotion.state - + pr->power.states)); + else + seq_puts(seq, "demotion[--] "); + + seq_printf(seq, "latency[%03d] usage[%08d]\n", + pr->power.states[i].latency, + pr->power.states[i].usage); + } + + end: + return_VALUE(0); +} + +static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_processor_power_seq_show, + PDE(inode)->data); +} + +static struct file_operations acpi_processor_power_fops = { + .open = acpi_processor_power_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int acpi_processor_power_init(struct acpi_processor *pr, + struct acpi_device *device) +{ + acpi_status status = 0; + static int first_run = 0; + struct proc_dir_entry *entry = NULL; + unsigned int i; + + ACPI_FUNCTION_TRACE("acpi_processor_power_init"); + + if (!first_run) { + dmi_check_system(processor_power_dmi_table); + if (max_cstate < ACPI_C_STATES_MAX) + printk(KERN_NOTICE + "ACPI: processor limited to max C-state %d\n", + max_cstate); + first_run++; + } + + if (!pr) + return_VALUE(-EINVAL); + + if (acpi_fadt.cst_cnt && !nocst) { + status = + acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Notifying BIOS of _CST ability failed\n")); + } + } + + acpi_processor_power_init_pdc(&(pr->power), pr->id); + acpi_processor_set_pdc(pr, pr->power.pdc); + acpi_processor_get_power_info(pr); + + /* + * Install the idle handler if processor power management is supported. + * Note that we use previously set idle handler will be used on + * platforms that only support C1. + */ + if ((pr->flags.power) && (!boot_option_idle_override)) { + printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); + for (i = 1; i <= pr->power.count; i++) + if (pr->power.states[i].valid) + printk(" C%d[C%d]", i, + pr->power.states[i].type); + printk(")\n"); + + if (pr->id == 0) { + pm_idle_save = pm_idle; + pm_idle = acpi_processor_idle; + } + } + + /* 'power' [R] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, + S_IRUGO, acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_POWER)); + else { + entry->proc_fops = &acpi_processor_power_fops; + entry->data = acpi_driver_data(device); + entry->owner = THIS_MODULE; + } + + pr->flags.power_setup_done = 1; + + return_VALUE(0); +} + +int acpi_processor_power_exit(struct acpi_processor *pr, + struct acpi_device *device) +{ + ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); + + pr->flags.power_setup_done = 0; + + if (acpi_device_dir(device)) + remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, + acpi_device_dir(device)); + + /* Unregister the idle handler when processor #0 is removed. */ + if (pr->id == 0) { + pm_idle = pm_idle_save; + + /* + * We are about to unload the current idle thread pm callback + * (pm_idle), Wait for all processors to update cached/local + * copies of pm_idle before proceeding. + */ + cpu_idle_wait(); + } + + return_VALUE(0); +} diff --git a/drivers/acpi/drivers/legacy/processor_perflib.c b/drivers/acpi/drivers/legacy/processor_perflib.c new file mode 100644 index 0000000..22c7bb6 --- /dev/null +++ b/drivers/acpi/drivers/legacy/processor_perflib.c @@ -0,0 +1,624 @@ +/* + * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004 Anil S Keshavamurthy + * - Added processor hotplug support + * + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + */ + +#include +#include +#include +#include + +#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF +#include +#include + +#include +#endif + +#include +#include + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME("acpi_processor") + +static DECLARE_MUTEX(performance_sem); + +/* + * _PPC support is implemented as a CPUfreq policy notifier: + * This means each time a CPUfreq driver registered also with + * the ACPI core is asked to change the speed policy, the maximum + * value is adjusted so that it is within the platform limit. + * + * Also, when a new platform limit value is detected, the CPUfreq + * policy is adjusted accordingly. + */ + +#define PPC_REGISTERED 1 +#define PPC_IN_USE 2 + +static int acpi_processor_ppc_status = 0; + +static int acpi_processor_ppc_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + struct acpi_processor *pr; + unsigned int ppc = 0; + + down(&performance_sem); + + if (event != CPUFREQ_INCOMPATIBLE) + goto out; + + pr = processors[policy->cpu]; + if (!pr || !pr->performance) + goto out; + + ppc = (unsigned int)pr->performance_platform_limit; + if (!ppc) + goto out; + + if (ppc > pr->performance->state_count) + goto out; + + cpufreq_verify_within_limits(policy, 0, + pr->performance->states[ppc]. + core_frequency * 1000); + + out: + up(&performance_sem); + + return 0; +} + +static struct notifier_block acpi_ppc_notifier_block = { + .notifier_call = acpi_processor_ppc_notifier, +}; + +static int acpi_processor_get_platform_limit(struct acpi_processor *pr) +{ + acpi_status status = 0; + unsigned long ppc = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit"); + + if (!pr) + return_VALUE(-EINVAL); + + /* + * _PPC indicates the maximum state currently supported by the platform + * (e.g. 0 = states 0..n; 1 = states 1..n; etc. + */ + status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); + + if (status != AE_NOT_FOUND) + acpi_processor_ppc_status |= PPC_IN_USE; + + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n")); + return_VALUE(-ENODEV); + } + + pr->performance_platform_limit = (int)ppc; + + return_VALUE(0); +} + +int acpi_processor_ppc_has_changed(struct acpi_processor *pr) +{ + int ret = acpi_processor_get_platform_limit(pr); + if (ret < 0) + return (ret); + else + return cpufreq_update_policy(pr->id); +} + +void acpi_processor_ppc_init(void) +{ + if (!cpufreq_register_notifier + (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) + acpi_processor_ppc_status |= PPC_REGISTERED; + else + printk(KERN_DEBUG + "Warning: Processor Platform Limit not supported.\n"); +} + +void acpi_processor_ppc_exit(void) +{ + if (acpi_processor_ppc_status & PPC_REGISTERED) + cpufreq_unregister_notifier(&acpi_ppc_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + + acpi_processor_ppc_status &= ~PPC_REGISTERED; +} + +static int acpi_processor_get_performance_control(struct acpi_processor *pr) +{ + int result = 0; + acpi_status status = 0; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *pct = NULL; + union acpi_object obj = { 0 }; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control"); + + status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n")); + return_VALUE(-ENODEV); + } + + pct = (union acpi_object *)buffer.pointer; + if (!pct || (pct->type != ACPI_TYPE_PACKAGE) + || (pct->package.count != 2)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n")); + result = -EFAULT; + goto end; + } + + /* + * control_register + */ + + obj = pct->package.elements[0]; + + if ((obj.type != ACPI_TYPE_BUFFER) + || (obj.buffer.length < sizeof(struct acpi_pct_register)) + || (obj.buffer.pointer == NULL)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PCT data (control_register)\n")); + result = -EFAULT; + goto end; + } + memcpy(&pr->performance->control_register, obj.buffer.pointer, + sizeof(struct acpi_pct_register)); + + /* + * status_register + */ + + obj = pct->package.elements[1]; + + if ((obj.type != ACPI_TYPE_BUFFER) + || (obj.buffer.length < sizeof(struct acpi_pct_register)) + || (obj.buffer.pointer == NULL)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PCT data (status_register)\n")); + result = -EFAULT; + goto end; + } + + memcpy(&pr->performance->status_register, obj.buffer.pointer, + sizeof(struct acpi_pct_register)); + + end: + acpi_os_free(buffer.pointer); + + return_VALUE(result); +} + +static int acpi_processor_get_performance_states(struct acpi_processor *pr) +{ + int result = 0; + acpi_status status = AE_OK; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; + struct acpi_buffer state = { 0, NULL }; + union acpi_object *pss = NULL; + int i; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states"); + + status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n")); + return_VALUE(-ENODEV); + } + + pss = (union acpi_object *)buffer.pointer; + if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); + result = -EFAULT; + goto end; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", + pss->package.count)); + + pr->performance->state_count = pss->package.count; + pr->performance->states = + kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, + GFP_KERNEL); + if (!pr->performance->states) { + result = -ENOMEM; + goto end; + } + + for (i = 0; i < pr->performance->state_count; i++) { + + struct acpi_processor_px *px = &(pr->performance->states[i]); + + state.length = sizeof(struct acpi_processor_px); + state.pointer = px; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); + + status = acpi_extract_package(&(pss->package.elements[i]), + &format, &state); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PSS data\n")); + result = -EFAULT; + kfree(pr->performance->states); + goto end; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", + i, + (u32) px->core_frequency, + (u32) px->power, + (u32) px->transition_latency, + (u32) px->bus_master_latency, + (u32) px->control, (u32) px->status)); + + if (!px->core_frequency) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid _PSS data: freq is zero\n")); + result = -EFAULT; + kfree(pr->performance->states); + goto end; + } + } + + end: + acpi_os_free(buffer.pointer); + + return_VALUE(result); +} + +static int acpi_processor_get_performance_info(struct acpi_processor *pr) +{ + int result = 0; + acpi_status status = AE_OK; + acpi_handle handle = NULL; + + ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info"); + + if (!pr || !pr->performance || !pr->handle) + return_VALUE(-EINVAL); + + acpi_processor_set_pdc(pr, pr->performance->pdc); + + status = acpi_get_handle(pr->handle, "_PCT", &handle); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "ACPI-based processor performance control unavailable\n")); + return_VALUE(-ENODEV); + } + + result = acpi_processor_get_performance_control(pr); + if (result) + return_VALUE(result); + + result = acpi_processor_get_performance_states(pr); + if (result) + return_VALUE(result); + + result = acpi_processor_get_platform_limit(pr); + if (result) + return_VALUE(result); + + return_VALUE(0); +} + +int acpi_processor_notify_smm(struct module *calling_module) +{ + acpi_status status; + static int is_done = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_notify_smm"); + + if (!(acpi_processor_ppc_status & PPC_REGISTERED)) + return_VALUE(-EBUSY); + + if (!try_module_get(calling_module)) + return_VALUE(-EINVAL); + + /* is_done is set to negative if an error occured, + * and to postitive if _no_ error occured, but SMM + * was already notified. This avoids double notification + * which might lead to unexpected results... + */ + if (is_done > 0) { + module_put(calling_module); + return_VALUE(0); + } else if (is_done < 0) { + module_put(calling_module); + return_VALUE(is_done); + } + + is_done = -EIO; + + /* Can't write pstate_cnt to smi_cmd if either value is zero */ + if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); + module_put(calling_module); + return_VALUE(0); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", + acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); + + /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use + * it anyway, so we need to support it... */ + if (acpi_fadt_is_v1) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Using v1.0 FADT reserved value for pstate_cnt\n")); + } + + status = acpi_os_write_port(acpi_fadt.smi_cmd, + (u32) acpi_fadt.pstate_cnt, 8); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Failed to write pstate_cnt [0x%x] to " + "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt, + acpi_fadt.smi_cmd)); + module_put(calling_module); + return_VALUE(status); + } + + /* Success. If there's no _PPC, we need to fear nothing, so + * we can allow the cpufreq driver to be rmmod'ed. */ + is_done = 1; + + if (!(acpi_processor_ppc_status & PPC_IN_USE)) + module_put(calling_module); + + return_VALUE(0); +} + +EXPORT_SYMBOL(acpi_processor_notify_smm); + +#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF +/* /proc/acpi/processor/../performance interface (DEPRECATED) */ + +static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); +static struct file_operations acpi_processor_perf_fops = { + .open = acpi_processor_perf_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_processor *pr = (struct acpi_processor *)seq->private; + int i; + + ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show"); + + if (!pr) + goto end; + + if (!pr->performance) { + seq_puts(seq, "\n"); + goto end; + } + + seq_printf(seq, "state count: %d\n" + "active state: P%d\n", + pr->performance->state_count, pr->performance->state); + + seq_puts(seq, "states:\n"); + for (i = 0; i < pr->performance->state_count; i++) + seq_printf(seq, + " %cP%d: %d MHz, %d mW, %d uS\n", + (i == pr->performance->state ? '*' : ' '), i, + (u32) pr->performance->states[i].core_frequency, + (u32) pr->performance->states[i].power, + (u32) pr->performance->states[i].transition_latency); + + end: + return_VALUE(0); +} + +static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_processor_perf_seq_show, + PDE(inode)->data); +} + +static ssize_t +acpi_processor_write_performance(struct file *file, + const char __user * buffer, + size_t count, loff_t * data) +{ + int result = 0; + struct seq_file *m = (struct seq_file *)file->private_data; + struct acpi_processor *pr = (struct acpi_processor *)m->private; + struct acpi_processor_performance *perf; + char state_string[12] = { '\0' }; + unsigned int new_state = 0; + struct cpufreq_policy policy; + + ACPI_FUNCTION_TRACE("acpi_processor_write_performance"); + + if (!pr || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + perf = pr->performance; + if (!perf) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + new_state = simple_strtoul(state_string, NULL, 0); + + if (new_state >= perf->state_count) + return_VALUE(-EINVAL); + + cpufreq_get_policy(&policy, pr->id); + + policy.cpu = pr->id; + policy.min = perf->states[new_state].core_frequency * 1000; + policy.max = perf->states[new_state].core_frequency * 1000; + + result = cpufreq_set_policy(&policy); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + +static void acpi_cpufreq_add_file(struct acpi_processor *pr) +{ + struct proc_dir_entry *entry = NULL; + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); + + if (acpi_bus_get_device(pr->handle, &device)) + return_VOID; + + /* add file 'performance' [R/W] */ + entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, + S_IFREG | S_IRUGO | S_IWUSR, + acpi_device_dir(device)); + if (!entry) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to create '%s' fs entry\n", + ACPI_PROCESSOR_FILE_PERFORMANCE)); + else { + entry->proc_fops = &acpi_processor_perf_fops; + entry->proc_fops->write = acpi_processor_write_performance; + entry->data = acpi_driver_data(device); + entry->owner = THIS_MODULE; + } + return_VOID; +} + +static void acpi_cpufreq_remove_file(struct acpi_processor *pr) +{ + struct acpi_device *device = NULL; + + ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); + + if (acpi_bus_get_device(pr->handle, &device)) + return_VOID; + + /* remove file 'performance' */ + remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, + acpi_device_dir(device)); + + return_VOID; +} + +#else +static void acpi_cpufreq_add_file(struct acpi_processor *pr) +{ + return; +} +static void acpi_cpufreq_remove_file(struct acpi_processor *pr) +{ + return; +} +#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */ + +int +acpi_processor_register_performance(struct acpi_processor_performance + *performance, unsigned int cpu) +{ + struct acpi_processor *pr; + + ACPI_FUNCTION_TRACE("acpi_processor_register_performance"); + + if (!(acpi_processor_ppc_status & PPC_REGISTERED)) + return_VALUE(-EINVAL); + + down(&performance_sem); + + pr = processors[cpu]; + if (!pr) { + up(&performance_sem); + return_VALUE(-ENODEV); + } + + if (pr->performance) { + up(&performance_sem); + return_VALUE(-EBUSY); + } + + pr->performance = performance; + + if (acpi_processor_get_performance_info(pr)) { + pr->performance = NULL; + up(&performance_sem); + return_VALUE(-EIO); + } + + acpi_cpufreq_add_file(pr); + + up(&performance_sem); + return_VALUE(0); +} + +EXPORT_SYMBOL(acpi_processor_register_performance); + +void +acpi_processor_unregister_performance(struct acpi_processor_performance + *performance, unsigned int cpu) +{ + struct acpi_processor *pr; + + ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance"); + + down(&performance_sem); + + pr = processors[cpu]; + if (!pr) { + up(&performance_sem); + return_VOID; + } + + kfree(pr->performance->states); + pr->performance = NULL; + + acpi_cpufreq_remove_file(pr); + + up(&performance_sem); + + return_VOID; +} + +EXPORT_SYMBOL(acpi_processor_unregister_performance); diff --git a/drivers/acpi/drivers/legacy/processor_thermal.c b/drivers/acpi/drivers/legacy/processor_thermal.c new file mode 100644 index 0000000..dc9817c --- /dev/null +++ b/drivers/acpi/drivers/legacy/processor_thermal.c @@ -0,0 +1,399 @@ +/* + * processor_thermal.c - Passive cooling submodule of the ACPI processor driver + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004 Anil S Keshavamurthy + * - Added processor hotplug support + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME("acpi_processor") + +/* -------------------------------------------------------------------------- + Limit Interface + -------------------------------------------------------------------------- */ +static int acpi_processor_apply_limit(struct acpi_processor *pr) +{ + int result = 0; + u16 px = 0; + u16 tx = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_apply_limit"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->flags.limit) + return_VALUE(-ENODEV); + + if (pr->flags.throttling) { + if (pr->limit.user.tx > tx) + tx = pr->limit.user.tx; + if (pr->limit.thermal.tx > tx) + tx = pr->limit.thermal.tx; + + result = acpi_processor_set_throttling(pr, tx); + if (result) + goto end; + } + + pr->limit.state.px = px; + pr->limit.state.tx = tx; + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Processor [%d] limit set to (P%d:T%d)\n", pr->id, + pr->limit.state.px, pr->limit.state.tx)); + + end: + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to set limit\n")); + + return_VALUE(result); +} + +#ifdef CONFIG_CPU_FREQ + +/* If a passive cooling situation is detected, primarily CPUfreq is used, as it + * offers (in most cases) voltage scaling in addition to frequency scaling, and + * thus a cubic (instead of linear) reduction of energy. Also, we allow for + * _any_ cpufreq driver and not only the acpi-cpufreq driver. + */ + +static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS]; +static unsigned int acpi_thermal_cpufreq_is_init = 0; + +static int cpu_has_cpufreq(unsigned int cpu) +{ + struct cpufreq_policy policy; + if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) + return 0; + return 1; +} + +static int acpi_thermal_cpufreq_increase(unsigned int cpu) +{ + if (!cpu_has_cpufreq(cpu)) + return -ENODEV; + + if (cpufreq_thermal_reduction_pctg[cpu] < 60) { + cpufreq_thermal_reduction_pctg[cpu] += 20; + cpufreq_update_policy(cpu); + return 0; + } + + return -ERANGE; +} + +static int acpi_thermal_cpufreq_decrease(unsigned int cpu) +{ + if (!cpu_has_cpufreq(cpu)) + return -ENODEV; + + if (cpufreq_thermal_reduction_pctg[cpu] > 20) + cpufreq_thermal_reduction_pctg[cpu] -= 20; + else + cpufreq_thermal_reduction_pctg[cpu] = 0; + cpufreq_update_policy(cpu); + /* We reached max freq again and can leave passive mode */ + return !cpufreq_thermal_reduction_pctg[cpu]; +} + +static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + unsigned long max_freq = 0; + + if (event != CPUFREQ_ADJUST) + goto out; + + max_freq = + (policy->cpuinfo.max_freq * + (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100; + + cpufreq_verify_within_limits(policy, 0, max_freq); + + out: + return 0; +} + +static struct notifier_block acpi_thermal_cpufreq_notifier_block = { + .notifier_call = acpi_thermal_cpufreq_notifier, +}; + +void acpi_thermal_cpufreq_init(void) +{ + int i; + + for (i = 0; i < NR_CPUS; i++) + cpufreq_thermal_reduction_pctg[i] = 0; + + i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + if (!i) + acpi_thermal_cpufreq_is_init = 1; +} + +void acpi_thermal_cpufreq_exit(void) +{ + if (acpi_thermal_cpufreq_is_init) + cpufreq_unregister_notifier + (&acpi_thermal_cpufreq_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + + acpi_thermal_cpufreq_is_init = 0; +} + +#else /* ! CONFIG_CPU_FREQ */ + +static int acpi_thermal_cpufreq_increase(unsigned int cpu) +{ + return -ENODEV; +} +static int acpi_thermal_cpufreq_decrease(unsigned int cpu) +{ + return -ENODEV; +} + +#endif + +int acpi_processor_set_thermal_limit(acpi_handle handle, int type) +{ + int result = 0; + struct acpi_processor *pr = NULL; + struct acpi_device *device = NULL; + int tx = 0, max_tx_px = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit"); + + if ((type < ACPI_PROCESSOR_LIMIT_NONE) + || (type > ACPI_PROCESSOR_LIMIT_DECREMENT)) + return_VALUE(-EINVAL); + + result = acpi_bus_get_device(handle, &device); + if (result) + return_VALUE(result); + + pr = (struct acpi_processor *)acpi_driver_data(device); + if (!pr) + return_VALUE(-ENODEV); + + /* Thermal limits are always relative to the current Px/Tx state. */ + if (pr->flags.throttling) + pr->limit.thermal.tx = pr->throttling.state; + + /* + * Our default policy is to only use throttling at the lowest + * performance state. + */ + + tx = pr->limit.thermal.tx; + + switch (type) { + + case ACPI_PROCESSOR_LIMIT_NONE: + do { + result = acpi_thermal_cpufreq_decrease(pr->id); + } while (!result); + tx = 0; + break; + + case ACPI_PROCESSOR_LIMIT_INCREMENT: + /* if going up: P-states first, T-states later */ + + result = acpi_thermal_cpufreq_increase(pr->id); + if (!result) + goto end; + else if (result == -ERANGE) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At maximum performance state\n")); + + if (pr->flags.throttling) { + if (tx == (pr->throttling.state_count - 1)) + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At maximum throttling state\n")); + else + tx++; + } + break; + + case ACPI_PROCESSOR_LIMIT_DECREMENT: + /* if going down: T-states first, P-states later */ + + if (pr->flags.throttling) { + if (tx == 0) { + max_tx_px = 1; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At minimum throttling state\n")); + } else { + tx--; + goto end; + } + } + + result = acpi_thermal_cpufreq_decrease(pr->id); + if (result) { + /* + * We only could get -ERANGE, 1 or 0. + * In the first two cases we reached max freq again. + */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "At minimum performance state\n")); + max_tx_px = 1; + } else + max_tx_px = 0; + + break; + } + + end: + if (pr->flags.throttling) { + pr->limit.thermal.px = 0; + pr->limit.thermal.tx = tx; + + result = acpi_processor_apply_limit(pr); + if (result) + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Unable to set thermal limit\n")); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n", + pr->limit.thermal.px, pr->limit.thermal.tx)); + } else + result = 0; + if (max_tx_px) + return_VALUE(1); + else + return_VALUE(result); +} + +int acpi_processor_get_limit_info(struct acpi_processor *pr) +{ + ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info"); + + if (!pr) + return_VALUE(-EINVAL); + + if (pr->flags.throttling) + pr->flags.limit = 1; + + return_VALUE(0); +} + +/* /proc interface */ + +static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_processor *pr = (struct acpi_processor *)seq->private; + + ACPI_FUNCTION_TRACE("acpi_processor_limit_seq_show"); + + if (!pr) + goto end; + + if (!pr->flags.limit) { + seq_puts(seq, "\n"); + goto end; + } + + seq_printf(seq, "active limit: P%d:T%d\n" + "user limit: P%d:T%d\n" + "thermal limit: P%d:T%d\n", + pr->limit.state.px, pr->limit.state.tx, + pr->limit.user.px, pr->limit.user.tx, + pr->limit.thermal.px, pr->limit.thermal.tx); + + end: + return_VALUE(0); +} + +static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_processor_limit_seq_show, + PDE(inode)->data); +} + +ssize_t acpi_processor_write_limit(struct file * file, + const char __user * buffer, + size_t count, loff_t * data) +{ + int result = 0; + struct seq_file *m = (struct seq_file *)file->private_data; + struct acpi_processor *pr = (struct acpi_processor *)m->private; + char limit_string[25] = { '\0' }; + int px = 0; + int tx = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_write_limit"); + + if (!pr || (count > sizeof(limit_string) - 1)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n")); + return_VALUE(-EINVAL); + } + + if (copy_from_user(limit_string, buffer, count)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n")); + return_VALUE(-EFAULT); + } + + limit_string[count] = '\0'; + + if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n")); + return_VALUE(-EINVAL); + } + + if (pr->flags.throttling) { + if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid tx\n")); + return_VALUE(-EINVAL); + } + pr->limit.user.tx = tx; + } + + result = acpi_processor_apply_limit(pr); + + return_VALUE(count); +} + +struct file_operations acpi_processor_limit_fops = { + .open = acpi_processor_limit_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; diff --git a/drivers/acpi/drivers/legacy/processor_throttling.c b/drivers/acpi/drivers/legacy/processor_throttling.c new file mode 100644 index 0000000..74a52d4 --- /dev/null +++ b/drivers/acpi/drivers/legacy/processor_throttling.c @@ -0,0 +1,342 @@ +/* + * processor_throttling.c - Throttling submodule of the ACPI processor driver + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * Copyright (C) 2004 Dominik Brodowski + * Copyright (C) 2004 Anil S Keshavamurthy + * - Added processor hotplug support + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define ACPI_PROCESSOR_COMPONENT 0x01000000 +#define ACPI_PROCESSOR_CLASS "processor" +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" +#define _COMPONENT ACPI_PROCESSOR_COMPONENT +ACPI_MODULE_NAME("acpi_processor") + +/* -------------------------------------------------------------------------- + Throttling Control + -------------------------------------------------------------------------- */ +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ + int state = 0; + u32 value = 0; + u32 duty_mask = 0; + u32 duty_value = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_throttling"); + + if (!pr) + return_VALUE(-EINVAL); + + if (!pr->flags.throttling) + return_VALUE(-ENODEV); + + pr->throttling.state = 0; + + duty_mask = pr->throttling.state_count - 1; + + duty_mask <<= pr->throttling.duty_offset; + + local_irq_disable(); + + value = inl(pr->throttling.address); + + /* + * Compute the current throttling state when throttling is enabled + * (bit 4 is on). + */ + if (value & 0x10) { + duty_value = value & duty_mask; + duty_value >>= pr->throttling.duty_offset; + + if (duty_value) + state = pr->throttling.state_count - duty_value; + } + + pr->throttling.state = state; + + local_irq_enable(); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling state is T%d (%d%% throttling applied)\n", + state, pr->throttling.states[state].performance)); + + return_VALUE(0); +} + +int acpi_processor_set_throttling(struct acpi_processor *pr, int state) +{ + u32 value = 0; + u32 duty_mask = 0; + u32 duty_value = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_set_throttling"); + + if (!pr) + return_VALUE(-EINVAL); + + if ((state < 0) || (state > (pr->throttling.state_count - 1))) + return_VALUE(-EINVAL); + + if (!pr->flags.throttling) + return_VALUE(-ENODEV); + + if (state == pr->throttling.state) + return_VALUE(0); + + /* + * Calculate the duty_value and duty_mask. + */ + if (state) { + duty_value = pr->throttling.state_count - state; + + duty_value <<= pr->throttling.duty_offset; + + /* Used to clear all duty_value bits */ + duty_mask = pr->throttling.state_count - 1; + + duty_mask <<= acpi_fadt.duty_offset; + duty_mask = ~duty_mask; + } + + local_irq_disable(); + + /* + * Disable throttling by writing a 0 to bit 4. Note that we must + * turn it off before you can change the duty_value. + */ + value = inl(pr->throttling.address); + if (value & 0x10) { + value &= 0xFFFFFFEF; + outl(value, pr->throttling.address); + } + + /* + * Write the new duty_value and then enable throttling. Note + * that a state value of 0 leaves throttling disabled. + */ + if (state) { + value &= duty_mask; + value |= duty_value; + outl(value, pr->throttling.address); + + value |= 0x00000010; + outl(value, pr->throttling.address); + } + + pr->throttling.state = state; + + local_irq_enable(); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling state set to T%d (%d%%)\n", state, + (pr->throttling.states[state].performance ? pr-> + throttling.states[state].performance / 10 : 0))); + + return_VALUE(0); +} + +int acpi_processor_get_throttling_info(struct acpi_processor *pr) +{ + int result = 0; + int step = 0; + int i = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info"); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", + pr->throttling.address, + pr->throttling.duty_offset, + pr->throttling.duty_width)); + + if (!pr) + return_VALUE(-EINVAL); + + /* TBD: Support ACPI 2.0 objects */ + + if (!pr->throttling.address) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); + return_VALUE(0); + } else if (!pr->throttling.duty_width) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); + return_VALUE(0); + } + /* TBD: Support duty_cycle values that span bit 4. */ + else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n")); + return_VALUE(0); + } + + /* + * PIIX4 Errata: We don't support throttling on the original PIIX4. + * This shouldn't be an issue as few (if any) mobile systems ever + * used this part. + */ + if (errata.piix4.throttle) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Throttling not supported on PIIX4 A- or B-step\n")); + return_VALUE(0); + } + + pr->throttling.state_count = 1 << acpi_fadt.duty_width; + + /* + * Compute state values. Note that throttling displays a linear power/ + * performance relationship (at 50% performance the CPU will consume + * 50% power). Values are in 1/10th of a percent to preserve accuracy. + */ + + step = (1000 / pr->throttling.state_count); + + for (i = 0; i < pr->throttling.state_count; i++) { + pr->throttling.states[i].performance = step * i; + pr->throttling.states[i].power = step * i; + } + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", + pr->throttling.state_count)); + + pr->flags.throttling = 1; + + /* + * Disable throttling (if enabled). We'll let subsequent policy (e.g. + * thermal) decide to lower performance if it so chooses, but for now + * we'll crank up the speed. + */ + + result = acpi_processor_get_throttling(pr); + if (result) + goto end; + + if (pr->throttling.state) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Disabling throttling (was T%d)\n", + pr->throttling.state)); + result = acpi_processor_set_throttling(pr, 0); + if (result) + goto end; + } + + end: + if (result) + pr->flags.throttling = 0; + + return_VALUE(result); +} + +/* proc interface */ + +static int acpi_processor_throttling_seq_show(struct seq_file *seq, + void *offset) +{ + struct acpi_processor *pr = (struct acpi_processor *)seq->private; + int i = 0; + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_processor_throttling_seq_show"); + + if (!pr) + goto end; + + if (!(pr->throttling.state_count > 0)) { + seq_puts(seq, "\n"); + goto end; + } + + result = acpi_processor_get_throttling(pr); + + if (result) { + seq_puts(seq, + "Could not determine current throttling state.\n"); + goto end; + } + + seq_printf(seq, "state count: %d\n" + "active state: T%d\n", + pr->throttling.state_count, pr->throttling.state); + + seq_puts(seq, "states:\n"); + for (i = 0; i < pr->throttling.state_count; i++) + seq_printf(seq, " %cT%d: %02d%%\n", + (i == pr->throttling.state ? '*' : ' '), i, + (pr->throttling.states[i].performance ? pr-> + throttling.states[i].performance / 10 : 0)); + + end: + return_VALUE(0); +} + +static int acpi_processor_throttling_open_fs(struct inode *inode, + struct file *file) +{ + return single_open(file, acpi_processor_throttling_seq_show, + PDE(inode)->data); +} + +ssize_t acpi_processor_write_throttling(struct file * file, + const char __user * buffer, + size_t count, loff_t * data) +{ + int result = 0; + struct seq_file *m = (struct seq_file *)file->private_data; + struct acpi_processor *pr = (struct acpi_processor *)m->private; + char state_string[12] = { '\0' }; + + ACPI_FUNCTION_TRACE("acpi_processor_write_throttling"); + + if (!pr || (count > sizeof(state_string) - 1)) + return_VALUE(-EINVAL); + + if (copy_from_user(state_string, buffer, count)) + return_VALUE(-EFAULT); + + state_string[count] = '\0'; + + result = acpi_processor_set_throttling(pr, + simple_strtoul(state_string, + NULL, 0)); + if (result) + return_VALUE(result); + + return_VALUE(count); +} + +struct file_operations acpi_processor_throttling_fops = { + .open = acpi_processor_throttling_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c deleted file mode 100644 index 0c561c5..0000000 --- a/drivers/acpi/processor_core.c +++ /dev/null @@ -1,986 +0,0 @@ -/* - * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) - * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski - * Copyright (C) 2004 Anil S Keshavamurthy - * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * TBD: - * 1. Make # power states dynamic. - * 2. Support duty_cycle values that span bit 4. - * 3. Optimize by having scheduler determine business instead of - * having us try to calculate it here. - * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define ACPI_PROCESSOR_COMPONENT 0x01000000 -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -#define ACPI_PROCESSOR_DEVICE_NAME "Processor" -#define ACPI_PROCESSOR_FILE_INFO "info" -#define ACPI_PROCESSOR_FILE_THROTTLING "throttling" -#define ACPI_PROCESSOR_FILE_LIMIT "limit" -#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 -#define ACPI_PROCESSOR_NOTIFY_POWER 0x81 - -#define ACPI_PROCESSOR_LIMIT_USER 0 -#define ACPI_PROCESSOR_LIMIT_THERMAL 1 - -#define ACPI_STA_PRESENT 0x00000001 - -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") - - MODULE_AUTHOR("Paul Diefenbaugh"); -MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME); -MODULE_LICENSE("GPL"); - -static int acpi_processor_add(struct acpi_device *device); -static int acpi_processor_start(struct acpi_device *device); -static int acpi_processor_remove(struct acpi_device *device, int type); -static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); -static void acpi_processor_notify(acpi_handle handle, u32 event, void *data); -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); -static int acpi_processor_handle_eject(struct acpi_processor *pr); - -static struct acpi_driver acpi_processor_driver = { - .name = ACPI_PROCESSOR_DRIVER_NAME, - .class = ACPI_PROCESSOR_CLASS, - .ids = ACPI_PROCESSOR_HID, - .ops = { - .add = acpi_processor_add, - .remove = acpi_processor_remove, - .start = acpi_processor_start, - }, -}; - -#define INSTALL_NOTIFY_HANDLER 1 -#define UNINSTALL_NOTIFY_HANDLER 2 - -static struct file_operations acpi_processor_info_fops = { - .open = acpi_processor_info_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -struct acpi_processor *processors[NR_CPUS]; -struct acpi_processor_errata errata; - -/* -------------------------------------------------------------------------- - Errata Handling - -------------------------------------------------------------------------- */ - -static int acpi_processor_errata_piix4(struct pci_dev *dev) -{ - u8 rev = 0; - u8 value1 = 0; - u8 value2 = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_errata_piix4"); - - if (!dev) - return_VALUE(-EINVAL); - - /* - * Note that 'dev' references the PIIX4 ACPI Controller. - */ - - pci_read_config_byte(dev, PCI_REVISION_ID, &rev); - - switch (rev) { - case 0: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); - break; - case 1: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); - break; - case 2: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); - break; - case 3: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); - break; - } - - switch (rev) { - - case 0: /* PIIX4 A-step */ - case 1: /* PIIX4 B-step */ - /* - * See specification changes #13 ("Manual Throttle Duty Cycle") - * and #14 ("Enabling and Disabling Manual Throttle"), plus - * erratum #5 ("STPCLK# Deassertion Time") from the January - * 2002 PIIX4 specification update. Applies to only older - * PIIX4 models. - */ - errata.piix4.throttle = 1; - - case 2: /* PIIX4E */ - case 3: /* PIIX4M */ - /* - * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA - * Livelock") from the January 2002 PIIX4 specification update. - * Applies to all PIIX4 models. - */ - - /* - * BM-IDE - * ------ - * Find the PIIX4 IDE Controller and get the Bus Master IDE - * Status register address. We'll use this later to read - * each IDE controller's DMA status to make sure we catch all - * DMA activity. - */ - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82371AB, - PCI_ANY_ID, PCI_ANY_ID, NULL); - if (dev) { - errata.piix4.bmisx = pci_resource_start(dev, 4); - pci_dev_put(dev); - } - - /* - * Type-F DMA - * ---------- - * Find the PIIX4 ISA Controller and read the Motherboard - * DMA controller's status to see if Type-F (Fast) DMA mode - * is enabled (bit 7) on either channel. Note that we'll - * disable C3 support if this is enabled, as some legacy - * devices won't operate well if fast DMA is disabled. - */ - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82371AB_0, - PCI_ANY_ID, PCI_ANY_ID, NULL); - if (dev) { - pci_read_config_byte(dev, 0x76, &value1); - pci_read_config_byte(dev, 0x77, &value2); - if ((value1 & 0x80) || (value2 & 0x80)) - errata.piix4.fdma = 1; - pci_dev_put(dev); - } - - break; - } - - if (errata.piix4.bmisx) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Bus master activity detection (BM-IDE) erratum enabled\n")); - if (errata.piix4.fdma) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Type-F DMA livelock erratum (C3 disabled)\n")); - - return_VALUE(0); -} - -static int acpi_processor_errata(struct acpi_processor *pr) -{ - int result = 0; - struct pci_dev *dev = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_errata"); - - if (!pr) - return_VALUE(-EINVAL); - - /* - * PIIX4 - */ - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, - PCI_ANY_ID, NULL); - if (dev) { - result = acpi_processor_errata_piix4(dev); - pci_dev_put(dev); - } - - return_VALUE(result); -} - -/* -------------------------------------------------------------------------- - Common ACPI processor fucntions - -------------------------------------------------------------------------- */ - -/* - * _PDC is required for a BIOS-OS handshake for most of the newer - * ACPI processor features. - */ - -int acpi_processor_set_pdc(struct acpi_processor *pr, - struct acpi_object_list *pdc_in) -{ - acpi_status status = AE_OK; - u32 arg0_buf[3]; - union acpi_object arg0 = { ACPI_TYPE_BUFFER }; - struct acpi_object_list no_object = { 1, &arg0 }; - struct acpi_object_list *pdc; - - ACPI_FUNCTION_TRACE("acpi_processor_set_pdc"); - - arg0.buffer.length = 12; - arg0.buffer.pointer = (u8 *) arg0_buf; - arg0_buf[0] = ACPI_PDC_REVISION_ID; - arg0_buf[1] = 0; - arg0_buf[2] = 0; - - pdc = (pdc_in) ? pdc_in : &no_object; - - status = acpi_evaluate_object(pr->handle, "_PDC", pdc, NULL); - - if ((ACPI_FAILURE(status)) && (pdc_in)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Error evaluating _PDC, using legacy perf. control...\n")); - - return_VALUE(status); -} - -/* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -static struct proc_dir_entry *acpi_processor_dir = NULL; - -static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - - ACPI_FUNCTION_TRACE("acpi_processor_info_seq_show"); - - if (!pr) - goto end; - - seq_printf(seq, "processor id: %d\n" - "acpi id: %d\n" - "bus mastering control: %s\n" - "power management: %s\n" - "throttling control: %s\n" - "limit interface: %s\n", - pr->id, - pr->acpi_id, - pr->flags.bm_control ? "yes" : "no", - pr->flags.power ? "yes" : "no", - pr->flags.throttling ? "yes" : "no", - pr->flags.limit ? "yes" : "no"); - - end: - return_VALUE(0); -} - -static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_info_seq_show, - PDE(inode)->data); -} - -static int acpi_processor_add_fs(struct acpi_device *device) -{ - struct proc_dir_entry *entry = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_add_fs"); - - if (!acpi_device_dir(device)) { - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), - acpi_processor_dir); - if (!acpi_device_dir(device)) - return_VALUE(-ENODEV); - } - acpi_device_dir(device)->owner = THIS_MODULE; - - /* 'info' [R] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, - S_IRUGO, acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_INFO)); - else { - entry->proc_fops = &acpi_processor_info_fops; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - - /* 'throttling' [R/W] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, - S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_THROTTLING)); - else { - entry->proc_fops = &acpi_processor_throttling_fops; - entry->proc_fops->write = acpi_processor_write_throttling; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - - /* 'limit' [R/W] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, - S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_LIMIT)); - else { - entry->proc_fops = &acpi_processor_limit_fops; - entry->proc_fops->write = acpi_processor_write_limit; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - - return_VALUE(0); -} - -static int acpi_processor_remove_fs(struct acpi_device *device) -{ - ACPI_FUNCTION_TRACE("acpi_processor_remove_fs"); - - if (acpi_device_dir(device)) { - remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, - acpi_device_dir(device)); - remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, - acpi_device_dir(device)); - remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, - acpi_device_dir(device)); - remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); - acpi_device_dir(device) = NULL; - } - - return_VALUE(0); -} - -/* Use the acpiid in MADT to map cpus in case of SMP */ -#ifndef CONFIG_SMP -#define convert_acpiid_to_cpu(acpi_id) (0xff) -#else - -#ifdef CONFIG_IA64 -#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid -#define arch_cpu_to_apicid ia64_cpu_to_sapicid -#define ARCH_BAD_APICID (0xffff) -#else -#define arch_acpiid_to_apicid x86_acpiid_to_apicid -#define arch_cpu_to_apicid x86_cpu_to_apicid -#define ARCH_BAD_APICID (0xff) -#endif - -static u8 convert_acpiid_to_cpu(u8 acpi_id) -{ - u16 apic_id; - int i; - - apic_id = arch_acpiid_to_apicid[acpi_id]; - if (apic_id == ARCH_BAD_APICID) - return -1; - - for (i = 0; i < NR_CPUS; i++) { - if (arch_cpu_to_apicid[i] == apic_id) - return i; - } - return -1; -} -#endif - -/* -------------------------------------------------------------------------- - Driver Interface - -------------------------------------------------------------------------- */ - -static int acpi_processor_get_info(struct acpi_processor *pr) -{ - acpi_status status = 0; - union acpi_object object = { 0 }; - struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - u8 cpu_index; - static int cpu0_initialized; - - ACPI_FUNCTION_TRACE("acpi_processor_get_info"); - - if (!pr) - return_VALUE(-EINVAL); - - if (num_online_cpus() > 1) - errata.smp = TRUE; - - acpi_processor_errata(pr); - - /* - * Check to see if we have bus mastering arbitration control. This - * is required for proper C3 usage (to maintain cache coherency). - */ - if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { - pr->flags.bm_control = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Bus mastering arbitration control present\n")); - } else - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "No bus mastering arbitration control\n")); - - /* - * Evalute the processor object. Note that it is common on SMP to - * have the first (boot) processor with a valid PBLK address while - * all others have a NULL address. - */ - status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error evaluating processor object\n")); - return_VALUE(-ENODEV); - } - - /* - * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. - * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c - */ - pr->acpi_id = object.processor.proc_id; - - cpu_index = convert_acpiid_to_cpu(pr->acpi_id); - - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ - if (!cpu0_initialized && (cpu_index == 0xff) && - (num_online_cpus() == 1)) { - cpu_index = 0; - } - - cpu0_initialized = 1; - - pr->id = cpu_index; - - /* - * Extra Processor objects may be enumerated on MP systems with - * less than the max # of CPUs. They should be ignored _iff - * they are physically not present. - */ - if (cpu_index >= NR_CPUS) { - if (ACPI_FAILURE - (acpi_processor_hotadd_init(pr->handle, &pr->id))) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error getting cpuindex for acpiid 0x%x\n", - pr->acpi_id)); - return_VALUE(-ENODEV); - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, - pr->acpi_id)); - - if (!object.processor.pblk_address) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); - else if (object.processor.pblk_length != 6) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid PBLK length [%d]\n", - object.processor.pblk_length)); - else { - pr->throttling.address = object.processor.pblk_address; - pr->throttling.duty_offset = acpi_fadt.duty_offset; - pr->throttling.duty_width = acpi_fadt.duty_width; - - pr->pblk = object.processor.pblk_address; - - /* - * We don't care about error returns - we just try to mark - * these reserved so that nobody else is confused into thinking - * that this region might be unused.. - * - * (In particular, allocating the IO range for Cardbus) - */ - request_region(pr->throttling.address, 6, "ACPI CPU throttle"); - } - -#ifdef CONFIG_CPU_FREQ - acpi_processor_ppc_has_changed(pr); -#endif - acpi_processor_get_throttling_info(pr); - acpi_processor_get_limit_info(pr); - - return_VALUE(0); -} - -static void *processor_device_array[NR_CPUS]; - -static int acpi_processor_start(struct acpi_device *device) -{ - int result = 0; - acpi_status status = AE_OK; - struct acpi_processor *pr; - - ACPI_FUNCTION_TRACE("acpi_processor_start"); - - pr = acpi_driver_data(device); - - result = acpi_processor_get_info(pr); - if (result) { - /* Processor is physically not present */ - return_VALUE(0); - } - - BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0)); - - /* - * Buggy BIOS check - * ACPI id of processors can be reported wrongly by the BIOS. - * Don't trust it blindly - */ - if (processor_device_array[pr->id] != NULL && - processor_device_array[pr->id] != (void *)device) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "BIOS reporting wrong ACPI id" - "for the processor\n")); - return_VALUE(-ENODEV); - } - processor_device_array[pr->id] = (void *)device; - - processors[pr->id] = pr; - - result = acpi_processor_add_fs(device); - if (result) - goto end; - - status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, - acpi_processor_notify, pr); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error installing device notify handler\n")); - } - - acpi_processor_power_init(pr, device); - - if (pr->flags.throttling) { - printk(KERN_INFO PREFIX "%s [%s] (supports", - acpi_device_name(device), acpi_device_bid(device)); - printk(" %d throttling states", pr->throttling.state_count); - printk(")\n"); - } - - end: - - return_VALUE(result); -} - -static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) -{ - struct acpi_processor *pr = (struct acpi_processor *)data; - struct acpi_device *device = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_notify"); - - if (!pr) - return_VOID; - - if (acpi_bus_get_device(pr->handle, &device)) - return_VOID; - - switch (event) { - case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: - acpi_processor_ppc_has_changed(pr); - acpi_bus_generate_event(device, event, - pr->performance_platform_limit); - break; - case ACPI_PROCESSOR_NOTIFY_POWER: - acpi_processor_cst_has_changed(pr); - acpi_bus_generate_event(device, event, 0); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); - break; - } - - return_VOID; -} - -static int acpi_processor_add(struct acpi_device *device) -{ - struct acpi_processor *pr = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_add"); - - if (!device) - return_VALUE(-EINVAL); - - pr = kmalloc(sizeof(struct acpi_processor), GFP_KERNEL); - if (!pr) - return_VALUE(-ENOMEM); - memset(pr, 0, sizeof(struct acpi_processor)); - - pr->handle = device->handle; - strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); - acpi_driver_data(device) = pr; - - return_VALUE(0); -} - -static int acpi_processor_remove(struct acpi_device *device, int type) -{ - acpi_status status = AE_OK; - struct acpi_processor *pr = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_remove"); - - if (!device || !acpi_driver_data(device)) - return_VALUE(-EINVAL); - - pr = (struct acpi_processor *)acpi_driver_data(device); - - if (pr->id >= NR_CPUS) { - kfree(pr); - return_VALUE(0); - } - - if (type == ACPI_BUS_REMOVAL_EJECT) { - if (acpi_processor_handle_eject(pr)) - return_VALUE(-EINVAL); - } - - acpi_processor_power_exit(pr, device); - - status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY, - acpi_processor_notify); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error removing notify handler\n")); - } - - acpi_processor_remove_fs(device); - - processors[pr->id] = NULL; - - kfree(pr); - - return_VALUE(0); -} - -#ifdef CONFIG_ACPI_HOTPLUG_CPU -/**************************************************************************** - * Acpi processor hotplug support * - ****************************************************************************/ - -static int is_processor_present(acpi_handle handle); - -static int is_processor_present(acpi_handle handle) -{ - acpi_status status; - unsigned long sta = 0; - - ACPI_FUNCTION_TRACE("is_processor_present"); - - status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); - if (ACPI_FAILURE(status) || !(sta & ACPI_STA_PRESENT)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Processor Device is not present\n")); - return_VALUE(0); - } - return_VALUE(1); -} - -static -int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) -{ - acpi_handle phandle; - struct acpi_device *pdev; - struct acpi_processor *pr; - - ACPI_FUNCTION_TRACE("acpi_processor_device_add"); - - if (acpi_get_parent(handle, &phandle)) { - return_VALUE(-ENODEV); - } - - if (acpi_bus_get_device(phandle, &pdev)) { - return_VALUE(-ENODEV); - } - - if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { - return_VALUE(-ENODEV); - } - - acpi_bus_start(*device); - - pr = acpi_driver_data(*device); - if (!pr) - return_VALUE(-ENODEV); - - if ((pr->id >= 0) && (pr->id < NR_CPUS)) { - kobject_hotplug(&(*device)->kobj, KOBJ_ONLINE); - } - return_VALUE(0); -} - -static void -acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) -{ - struct acpi_processor *pr; - struct acpi_device *device = NULL; - int result; - - ACPI_FUNCTION_TRACE("acpi_processor_hotplug_notify"); - - switch (event) { - case ACPI_NOTIFY_BUS_CHECK: - case ACPI_NOTIFY_DEVICE_CHECK: - printk("Processor driver received %s event\n", - (event == ACPI_NOTIFY_BUS_CHECK) ? - "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); - - if (!is_processor_present(handle)) - break; - - if (acpi_bus_get_device(handle, &device)) { - result = acpi_processor_device_add(handle, &device); - if (result) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to add the device\n")); - break; - } - - pr = acpi_driver_data(device); - if (!pr) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Driver data is NULL\n")); - break; - } - - if (pr->id >= 0 && (pr->id < NR_CPUS)) { - kobject_hotplug(&device->kobj, KOBJ_OFFLINE); - break; - } - - result = acpi_processor_start(device); - if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { - kobject_hotplug(&device->kobj, KOBJ_ONLINE); - } else { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Device [%s] failed to start\n", - acpi_device_bid(device))); - } - break; - case ACPI_NOTIFY_EJECT_REQUEST: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "received ACPI_NOTIFY_EJECT_REQUEST\n")); - - if (acpi_bus_get_device(handle, &device)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Device don't exist, dropping EJECT\n")); - break; - } - pr = acpi_driver_data(device); - if (!pr) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Driver data is NULL, dropping EJECT\n")); - return_VOID; - } - - if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) - kobject_hotplug(&device->kobj, KOBJ_OFFLINE); - break; - default: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Unsupported event [0x%x]\n", event)); - break; - } - - return_VOID; -} - -static acpi_status -processor_walk_namespace_cb(acpi_handle handle, - u32 lvl, void *context, void **rv) -{ - acpi_status status; - int *action = context; - acpi_object_type type = 0; - - status = acpi_get_type(handle, &type); - if (ACPI_FAILURE(status)) - return (AE_OK); - - if (type != ACPI_TYPE_PROCESSOR) - return (AE_OK); - - switch (*action) { - case INSTALL_NOTIFY_HANDLER: - acpi_install_notify_handler(handle, - ACPI_SYSTEM_NOTIFY, - acpi_processor_hotplug_notify, - NULL); - break; - case UNINSTALL_NOTIFY_HANDLER: - acpi_remove_notify_handler(handle, - ACPI_SYSTEM_NOTIFY, - acpi_processor_hotplug_notify); - break; - default: - break; - } - - return (AE_OK); -} - -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) -{ - ACPI_FUNCTION_TRACE("acpi_processor_hotadd_init"); - - if (!is_processor_present(handle)) { - return_VALUE(AE_ERROR); - } - - if (acpi_map_lsapic(handle, p_cpu)) - return_VALUE(AE_ERROR); - - if (arch_register_cpu(*p_cpu)) { - acpi_unmap_lsapic(*p_cpu); - return_VALUE(AE_ERROR); - } - - return_VALUE(AE_OK); -} - -static int acpi_processor_handle_eject(struct acpi_processor *pr) -{ - if (cpu_online(pr->id)) { - return (-EINVAL); - } - arch_unregister_cpu(pr->id); - acpi_unmap_lsapic(pr->id); - return (0); -} -#else -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) -{ - return AE_ERROR; -} -static int acpi_processor_handle_eject(struct acpi_processor *pr) -{ - return (-EINVAL); -} -#endif - -static -void acpi_processor_install_hotplug_notify(void) -{ -#ifdef CONFIG_ACPI_HOTPLUG_CPU - int action = INSTALL_NOTIFY_HANDLER; - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, - ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, - processor_walk_namespace_cb, &action, NULL); -#endif -} - -static -void acpi_processor_uninstall_hotplug_notify(void) -{ -#ifdef CONFIG_ACPI_HOTPLUG_CPU - int action = UNINSTALL_NOTIFY_HANDLER; - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, - ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, - processor_walk_namespace_cb, &action, NULL); -#endif -} - -/* - * We keep the driver loaded even when ACPI is not running. - * This is needed for the powernow-k8 driver, that works even without - * ACPI, but needs symbols from this driver - */ - -static int __init acpi_processor_init(void) -{ - int result = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_init"); - - memset(&processors, 0, sizeof(processors)); - memset(&errata, 0, sizeof(errata)); - - acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); - if (!acpi_processor_dir) - return_VALUE(0); - acpi_processor_dir->owner = THIS_MODULE; - - result = acpi_bus_register_driver(&acpi_processor_driver); - if (result < 0) { - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); - return_VALUE(0); - } - - acpi_processor_install_hotplug_notify(); - - acpi_thermal_cpufreq_init(); - - acpi_processor_ppc_init(); - - return_VALUE(0); -} - -static void __exit acpi_processor_exit(void) -{ - ACPI_FUNCTION_TRACE("acpi_processor_exit"); - - acpi_processor_ppc_exit(); - - acpi_thermal_cpufreq_exit(); - - acpi_processor_uninstall_hotplug_notify(); - - acpi_bus_unregister_driver(&acpi_processor_driver); - - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); - - return_VOID; -} - -module_init(acpi_processor_init); -module_exit(acpi_processor_exit); - -EXPORT_SYMBOL(acpi_processor_set_thermal_limit); - -MODULE_ALIAS("processor"); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c deleted file mode 100644 index 807b0df..0000000 --- a/drivers/acpi/processor_idle.c +++ /dev/null @@ -1,1119 +0,0 @@ -/* - * processor_idle - idle state submodule to the ACPI processor driver - * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski - * Copyright (C) 2004 Anil S Keshavamurthy - * - Added processor hotplug support - * Copyright (C) 2005 Venkatesh Pallipadi - * - Added support for C3 on SMP - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* need_resched() */ - -#include -#include - -#include -#include - -#define ACPI_PROCESSOR_COMPONENT 0x01000000 -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") -#define ACPI_PROCESSOR_FILE_POWER "power" -#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) -#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ -static void (*pm_idle_save) (void); -module_param(max_cstate, uint, 0644); - -static unsigned int nocst = 0; -module_param(nocst, uint, 0000); - -/* - * bm_history -- bit-mask with a bit per jiffy of bus-master activity - * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms - * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms - * 100 HZ: 0x0000000F: 4 jiffies = 40ms - * reduce history for more aggressive entry into C3 - */ -static unsigned int bm_history = - (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); -module_param(bm_history, uint, 0644); -/* -------------------------------------------------------------------------- - Power Management - -------------------------------------------------------------------------- */ - -/* - * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. - * For now disable this. Probably a bug somewhere else. - * - * To skip this limit, boot/load with a large max_cstate limit. - */ -static int set_max_cstate(struct dmi_system_id *id) -{ - if (max_cstate > ACPI_PROCESSOR_MAX_POWER) - return 0; - - printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." - " Override with \"processor.max_cstate=%d\"\n", id->ident, - (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); - - max_cstate = (long)id->driver_data; - - return 0; -} - -static struct dmi_system_id __initdata processor_power_dmi_table[] = { - {set_max_cstate, "IBM ThinkPad R40e", { - DMI_MATCH(DMI_BIOS_VENDOR, - "IBM"), - DMI_MATCH(DMI_BIOS_VERSION, - "1SET60WW")}, - (void *)1}, - {set_max_cstate, "Medion 41700", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "R01-A1J")}, (void *)1}, - {set_max_cstate, "Clevo 5600D", { - DMI_MATCH(DMI_BIOS_VENDOR, - "Phoenix Technologies LTD"), - DMI_MATCH(DMI_BIOS_VERSION, - "SHE845M0.86C.0013.D.0302131307")}, - (void *)2}, - {}, -}; - -static inline u32 ticks_elapsed(u32 t1, u32 t2) -{ - if (t2 >= t1) - return (t2 - t1); - else if (!acpi_fadt.tmr_val_ext) - return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); - else - return ((0xFFFFFFFF - t1) + t2); -} - -static void -acpi_processor_power_activate(struct acpi_processor *pr, - struct acpi_processor_cx *new) -{ - struct acpi_processor_cx *old; - - if (!pr || !new) - return; - - old = pr->power.state; - - if (old) - old->promotion.count = 0; - new->demotion.count = 0; - - /* Cleanup from old state. */ - if (old) { - switch (old->type) { - case ACPI_STATE_C3: - /* Disable bus master reload */ - if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, - ACPI_MTX_DO_NOT_LOCK); - break; - } - } - - /* Prepare to use new state. */ - switch (new->type) { - case ACPI_STATE_C3: - /* Enable bus master reload */ - if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, - ACPI_MTX_DO_NOT_LOCK); - break; - } - - pr->power.state = new; - - return; -} - -static void acpi_safe_halt(void) -{ - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - if (!need_resched()) - safe_halt(); - set_thread_flag(TIF_POLLING_NRFLAG); -} - -static atomic_t c3_cpu_count; - -static void acpi_processor_idle(void) -{ - struct acpi_processor *pr = NULL; - struct acpi_processor_cx *cx = NULL; - struct acpi_processor_cx *next_state = NULL; - int sleep_ticks = 0; - u32 t1, t2 = 0; - - pr = processors[smp_processor_id()]; - if (!pr) - return; - - /* - * Interrupts must be disabled during bus mastering calculations and - * for C2/C3 transitions. - */ - local_irq_disable(); - - /* - * Check whether we truly need to go idle, or should - * reschedule: - */ - if (unlikely(need_resched())) { - local_irq_enable(); - return; - } - - cx = pr->power.state; - if (!cx) { - if (pm_idle_save) - pm_idle_save(); - else - acpi_safe_halt(); - return; - } - - /* - * Check BM Activity - * ----------------- - * Check for bus mastering activity (if required), record, and check - * for demotion. - */ - if (pr->flags.bm_check) { - u32 bm_status = 0; - unsigned long diff = jiffies - pr->power.bm_check_timestamp; - - if (diff > 32) - diff = 32; - - while (diff) { - /* if we didn't get called, assume there was busmaster activity */ - diff--; - if (diff) - pr->power.bm_activity |= 0x1; - pr->power.bm_activity <<= 1; - } - - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, - &bm_status, ACPI_MTX_DO_NOT_LOCK); - if (bm_status) { - pr->power.bm_activity++; - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, - 1, ACPI_MTX_DO_NOT_LOCK); - } - /* - * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect - * the true state of bus mastering activity; forcing us to - * manually check the BMIDEA bit of each IDE channel. - */ - else if (errata.piix4.bmisx) { - if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) - || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) - pr->power.bm_activity++; - } - - pr->power.bm_check_timestamp = jiffies; - - /* - * Apply bus mastering demotion policy. Automatically demote - * to avoid a faulty transition. Note that the processor - * won't enter a low-power state during this call (to this - * funciton) but should upon the next. - * - * TBD: A better policy might be to fallback to the demotion - * state (use it for this quantum only) istead of - * demoting -- and rely on duration as our sole demotion - * qualification. This may, however, introduce DMA - * issues (e.g. floppy DMA transfer overrun/underrun). - */ - if (pr->power.bm_activity & cx->demotion.threshold.bm) { - local_irq_enable(); - next_state = cx->demotion.state; - goto end; - } - } - -#ifdef CONFIG_HOTPLUG_CPU - /* - * Check for P_LVL2_UP flag before entering C2 and above on - * an SMP system. We do it here instead of doing it at _CST/P_LVL - * detection phase, to work cleanly with logical CPU hotplug. - */ - if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !acpi_fadt.plvl2_up) - cx = &pr->power.states[ACPI_STATE_C1]; -#endif - - cx->usage++; - - /* - * Sleep: - * ------ - * Invoke the current Cx state to put the processor to sleep. - */ - if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - if (need_resched()) { - set_thread_flag(TIF_POLLING_NRFLAG); - local_irq_enable(); - return; - } - } - - switch (cx->type) { - - case ACPI_STATE_C1: - /* - * Invoke C1. - * Use the appropriate idle routine, the one that would - * be used without acpi C-states. - */ - if (pm_idle_save) - pm_idle_save(); - else - acpi_safe_halt(); - - /* - * TBD: Can't get time duration while in C1, as resumes - * go to an ISR rather than here. Need to instrument - * base interrupt handler. - */ - sleep_ticks = 0xFFFFFFFF; - break; - - case ACPI_STATE_C2: - /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Invoke C2 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL2 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Re-enable interrupts */ - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; - break; - - case ACPI_STATE_C3: - - if (pr->flags.bm_check) { - if (atomic_inc_return(&c3_cpu_count) == - num_online_cpus()) { - /* - * All CPUs are trying to go to C3 - * Disable bus master arbitration - */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, - ACPI_MTX_DO_NOT_LOCK); - } - } else { - /* SMP with no shared cache... Invalidate cache */ - ACPI_FLUSH_CPU_CACHE(); - } - - /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Invoke C3 */ - inb(cx->address); - /* Dummy op - must do something useless after P_LVL3 read */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - if (pr->flags.bm_check) { - /* Enable bus master arbitration */ - atomic_dec(&c3_cpu_count); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, - ACPI_MTX_DO_NOT_LOCK); - } - - /* Re-enable interrupts */ - local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; - break; - - default: - local_irq_enable(); - return; - } - - next_state = pr->power.state; - -#ifdef CONFIG_HOTPLUG_CPU - /* Don't do promotion/demotion */ - if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !acpi_fadt.plvl2_up) { - next_state = cx; - goto end; - } -#endif - - /* - * Promotion? - * ---------- - * Track the number of longs (time asleep is greater than threshold) - * and promote when the count threshold is reached. Note that bus - * mastering activity may prevent promotions. - * Do not promote above max_cstate. - */ - if (cx->promotion.state && - ((cx->promotion.state - pr->power.states) <= max_cstate)) { - if (sleep_ticks > cx->promotion.threshold.ticks) { - cx->promotion.count++; - cx->demotion.count = 0; - if (cx->promotion.count >= - cx->promotion.threshold.count) { - if (pr->flags.bm_check) { - if (! - (pr->power.bm_activity & cx-> - promotion.threshold.bm)) { - next_state = - cx->promotion.state; - goto end; - } - } else { - next_state = cx->promotion.state; - goto end; - } - } - } - } - - /* - * Demotion? - * --------- - * Track the number of shorts (time asleep is less than time threshold) - * and demote when the usage threshold is reached. - */ - if (cx->demotion.state) { - if (sleep_ticks < cx->demotion.threshold.ticks) { - cx->demotion.count++; - cx->promotion.count = 0; - if (cx->demotion.count >= cx->demotion.threshold.count) { - next_state = cx->demotion.state; - goto end; - } - } - } - - end: - /* - * Demote if current state exceeds max_cstate - */ - if ((pr->power.state - pr->power.states) > max_cstate) { - if (cx->demotion.state) - next_state = cx->demotion.state; - } - - /* - * New Cx State? - * ------------- - * If we're going to start using a new Cx state we must clean up - * from the previous and prepare to use the new. - */ - if (next_state != pr->power.state) - acpi_processor_power_activate(pr, next_state); -} - -static int acpi_processor_set_power_policy(struct acpi_processor *pr) -{ - unsigned int i; - unsigned int state_is_set = 0; - struct acpi_processor_cx *lower = NULL; - struct acpi_processor_cx *higher = NULL; - struct acpi_processor_cx *cx; - - ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); - - if (!pr) - return_VALUE(-EINVAL); - - /* - * This function sets the default Cx state policy (OS idle handler). - * Our scheme is to promote quickly to C2 but more conservatively - * to C3. We're favoring C2 for its characteristics of low latency - * (quick response), good power savings, and ability to allow bus - * mastering activity. Note that the Cx state policy is completely - * customizable and can be altered dynamically. - */ - - /* startup state */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (!state_is_set) - pr->power.state = cx; - state_is_set++; - break; - } - - if (!state_is_set) - return_VALUE(-ENODEV); - - /* demotion */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (lower) { - cx->demotion.state = lower; - cx->demotion.threshold.ticks = cx->latency_ticks; - cx->demotion.threshold.count = 1; - if (cx->type == ACPI_STATE_C3) - cx->demotion.threshold.bm = bm_history; - } - - lower = cx; - } - - /* promotion */ - for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { - cx = &pr->power.states[i]; - if (!cx->valid) - continue; - - if (higher) { - cx->promotion.state = higher; - cx->promotion.threshold.ticks = cx->latency_ticks; - if (cx->type >= ACPI_STATE_C2) - cx->promotion.threshold.count = 4; - else - cx->promotion.threshold.count = 10; - if (higher->type == ACPI_STATE_C3) - cx->promotion.threshold.bm = bm_history; - } - - higher = cx; - } - - return_VALUE(0); -} - -static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) -{ - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); - - if (!pr) - return_VALUE(-EINVAL); - - if (!pr->pblk) - return_VALUE(-ENODEV); - - memset(pr->power.states, 0, sizeof(pr->power.states)); - - /* if info is obtained from pblk/fadt, type equals state */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; - pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; - - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ - pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; - -#ifndef CONFIG_HOTPLUG_CPU - /* - * Check for P_LVL2_UP flag before entering C2 and above on - * an SMP system. - */ - if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) - return_VALUE(-ENODEV); -#endif - - /* determine C2 and C3 address from pblk */ - pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; - pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; - - /* determine latencies from FADT */ - pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; - pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "lvl2[0x%08x] lvl3[0x%08x]\n", - pr->power.states[ACPI_STATE_C2].address, - pr->power.states[ACPI_STATE_C3].address)); - - return_VALUE(0); -} - -static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) -{ - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); - - memset(pr->power.states, 0, sizeof(pr->power.states)); - - /* if info is obtained from pblk/fadt, type equals state */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; - pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; - - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ - pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; - - return_VALUE(0); -} - -static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) -{ - acpi_status status = 0; - acpi_integer count; - int i; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *cst; - - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); - - if (nocst) - return_VALUE(-ENODEV); - - pr->power.count = 0; - for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); - - status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); - return_VALUE(-ENODEV); - } - - cst = (union acpi_object *)buffer.pointer; - - /* There must be at least 2 elements */ - if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "not enough elements in _CST\n")); - status = -EFAULT; - goto end; - } - - count = cst->package.elements[0].integer.value; - - /* Validate number of power states. */ - if (count < 1 || count != cst->package.count - 1) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "count given by _CST is not valid\n")); - status = -EFAULT; - goto end; - } - - /* We support up to ACPI_PROCESSOR_MAX_POWER. */ - if (count > ACPI_PROCESSOR_MAX_POWER) { - printk(KERN_WARNING - "Limiting number of power states to max (%d)\n", - ACPI_PROCESSOR_MAX_POWER); - printk(KERN_WARNING - "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); - count = ACPI_PROCESSOR_MAX_POWER; - } - - /* Tell driver that at least _CST is supported. */ - pr->flags.has_cst = 1; - - for (i = 1; i <= count; i++) { - union acpi_object *element; - union acpi_object *obj; - struct acpi_power_register *reg; - struct acpi_processor_cx cx; - - memset(&cx, 0, sizeof(cx)); - - element = (union acpi_object *)&(cst->package.elements[i]); - if (element->type != ACPI_TYPE_PACKAGE) - continue; - - if (element->package.count != 4) - continue; - - obj = (union acpi_object *)&(element->package.elements[0]); - - if (obj->type != ACPI_TYPE_BUFFER) - continue; - - reg = (struct acpi_power_register *)obj->buffer.pointer; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && - (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) - continue; - - cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? - 0 : reg->address; - - /* There should be an easy way to extract an integer... */ - obj = (union acpi_object *)&(element->package.elements[1]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.type = obj->integer.value; - - if ((cx.type != ACPI_STATE_C1) && - (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) - continue; - - if ((cx.type < ACPI_STATE_C1) || (cx.type > ACPI_STATE_C3)) - continue; - - obj = (union acpi_object *)&(element->package.elements[2]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.latency = obj->integer.value; - - obj = (union acpi_object *)&(element->package.elements[3]); - if (obj->type != ACPI_TYPE_INTEGER) - continue; - - cx.power = obj->integer.value; - - (pr->power.count)++; - memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", - pr->power.count)); - - /* Validate number of power states discovered */ - if (pr->power.count < 2) - status = -EFAULT; - - end: - acpi_os_free(buffer.pointer); - - return_VALUE(status); -} - -static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) -{ - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); - - if (!cx->address) - return_VOID; - - /* - * C2 latency must be less than or equal to 100 - * microseconds. - */ - else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "latency too large [%d]\n", cx->latency)); - return_VOID; - } - - /* - * Otherwise we've met all of our C2 requirements. - * Normalize the C2 latency to expidite policy - */ - cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - - return_VOID; -} - -static void acpi_processor_power_verify_c3(struct acpi_processor *pr, - struct acpi_processor_cx *cx) -{ - static int bm_check_flag; - - ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); - - if (!cx->address) - return_VOID; - - /* - * C3 latency must be less than or equal to 1000 - * microseconds. - */ - else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "latency too large [%d]\n", cx->latency)); - return_VOID; - } - - /* - * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) - * DMA transfers are used by any ISA device to avoid livelock. - * Note that we could disable Type-F DMA (as recommended by - * the erratum), but this is known to disrupt certain ISA - * devices thus we take the conservative approach. - */ - else if (errata.piix4.fdma) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 not supported on PIIX4 with Type-F DMA\n")); - return_VOID; - } - - /* All the logic here assumes flags.bm_check is same across all CPUs */ - if (!bm_check_flag) { - /* Determine whether bm_check is needed based on CPU */ - acpi_processor_power_init_bm_check(&(pr->flags), pr->id); - bm_check_flag = pr->flags.bm_check; - } else { - pr->flags.bm_check = bm_check_flag; - } - - if (pr->flags.bm_check) { - /* bus mastering control is necessary */ - if (!pr->flags.bm_control) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support requires bus mastering control\n")); - return_VOID; - } - } else { - /* - * WBINVD should be set in fadt, for C3 state to be - * supported on when bm_check is not required. - */ - if (acpi_fadt.wb_invd != 1) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Cache invalidation should work properly" - " for C3 to be enabled on SMP systems\n")); - return_VOID; - } - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, - 0, ACPI_MTX_DO_NOT_LOCK); - } - - /* - * Otherwise we've met all of our C3 requirements. - * Normalize the C3 latency to expidite policy. Enable - * checking of bus mastering status (bm_check) so we can - * use this in our C3 policy - */ - cx->valid = 1; - cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); - - return_VOID; -} - -static int acpi_processor_power_verify(struct acpi_processor *pr) -{ - unsigned int i; - unsigned int working = 0; - - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - struct acpi_processor_cx *cx = &pr->power.states[i]; - - switch (cx->type) { - case ACPI_STATE_C1: - cx->valid = 1; - break; - - case ACPI_STATE_C2: - acpi_processor_power_verify_c2(cx); - break; - - case ACPI_STATE_C3: - acpi_processor_power_verify_c3(pr, cx); - break; - } - - if (cx->valid) - working++; - } - - return (working); -} - -static int acpi_processor_get_power_info(struct acpi_processor *pr) -{ - unsigned int i; - int result; - - ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); - - /* NOTE: the idle thread may not be running while calling - * this function */ - - result = acpi_processor_get_power_info_cst(pr); - if (result == -ENODEV) - result = acpi_processor_get_power_info_fadt(pr); - - if ((result) || (acpi_processor_power_verify(pr) < 2)) - result = acpi_processor_get_power_info_default_c1(pr); - - /* - * Set Default Policy - * ------------------ - * Now that we know which states are supported, set the default - * policy. Note that this policy can be changed dynamically - * (e.g. encourage deeper sleeps to conserve battery life when - * not on AC). - */ - result = acpi_processor_set_power_policy(pr); - if (result) - return_VALUE(result); - - /* - * if one state of type C2 or C3 is available, mark this - * CPU as being "idle manageable" - */ - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { - if (pr->power.states[i].valid) { - pr->power.count = i; - if (pr->power.states[i].type >= ACPI_STATE_C2) - pr->flags.power = 1; - } - } - - return_VALUE(0); -} - -int acpi_processor_cst_has_changed(struct acpi_processor *pr) -{ - int result = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); - - if (!pr) - return_VALUE(-EINVAL); - - if (nocst) { - return_VALUE(-ENODEV); - } - - if (!pr->flags.power_setup_done) - return_VALUE(-ENODEV); - - /* Fall back to the default idle loop */ - pm_idle = pm_idle_save; - synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ - - pr->flags.power = 0; - result = acpi_processor_get_power_info(pr); - if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) - pm_idle = acpi_processor_idle; - - return_VALUE(result); -} - -/* proc interface */ - -static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - unsigned int i; - - ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); - - if (!pr) - goto end; - - seq_printf(seq, "active state: C%zd\n" - "max_cstate: C%d\n" - "bus master activity: %08x\n", - pr->power.state ? pr->power.state - pr->power.states : 0, - max_cstate, (unsigned)pr->power.bm_activity); - - seq_puts(seq, "states:\n"); - - for (i = 1; i <= pr->power.count; i++) { - seq_printf(seq, " %cC%d: ", - (&pr->power.states[i] == - pr->power.state ? '*' : ' '), i); - - if (!pr->power.states[i].valid) { - seq_puts(seq, "\n"); - continue; - } - - switch (pr->power.states[i].type) { - case ACPI_STATE_C1: - seq_printf(seq, "type[C1] "); - break; - case ACPI_STATE_C2: - seq_printf(seq, "type[C2] "); - break; - case ACPI_STATE_C3: - seq_printf(seq, "type[C3] "); - break; - default: - seq_printf(seq, "type[--] "); - break; - } - - if (pr->power.states[i].promotion.state) - seq_printf(seq, "promotion[C%zd] ", - (pr->power.states[i].promotion.state - - pr->power.states)); - else - seq_puts(seq, "promotion[--] "); - - if (pr->power.states[i].demotion.state) - seq_printf(seq, "demotion[C%zd] ", - (pr->power.states[i].demotion.state - - pr->power.states)); - else - seq_puts(seq, "demotion[--] "); - - seq_printf(seq, "latency[%03d] usage[%08d]\n", - pr->power.states[i].latency, - pr->power.states[i].usage); - } - - end: - return_VALUE(0); -} - -static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_power_seq_show, - PDE(inode)->data); -} - -static struct file_operations acpi_processor_power_fops = { - .open = acpi_processor_power_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -int acpi_processor_power_init(struct acpi_processor *pr, - struct acpi_device *device) -{ - acpi_status status = 0; - static int first_run = 0; - struct proc_dir_entry *entry = NULL; - unsigned int i; - - ACPI_FUNCTION_TRACE("acpi_processor_power_init"); - - if (!first_run) { - dmi_check_system(processor_power_dmi_table); - if (max_cstate < ACPI_C_STATES_MAX) - printk(KERN_NOTICE - "ACPI: processor limited to max C-state %d\n", - max_cstate); - first_run++; - } - - if (!pr) - return_VALUE(-EINVAL); - - if (acpi_fadt.cst_cnt && !nocst) { - status = - acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Notifying BIOS of _CST ability failed\n")); - } - } - - acpi_processor_power_init_pdc(&(pr->power), pr->id); - acpi_processor_set_pdc(pr, pr->power.pdc); - acpi_processor_get_power_info(pr); - - /* - * Install the idle handler if processor power management is supported. - * Note that we use previously set idle handler will be used on - * platforms that only support C1. - */ - if ((pr->flags.power) && (!boot_option_idle_override)) { - printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); - for (i = 1; i <= pr->power.count; i++) - if (pr->power.states[i].valid) - printk(" C%d[C%d]", i, - pr->power.states[i].type); - printk(")\n"); - - if (pr->id == 0) { - pm_idle_save = pm_idle; - pm_idle = acpi_processor_idle; - } - } - - /* 'power' [R] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, - S_IRUGO, acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_POWER)); - else { - entry->proc_fops = &acpi_processor_power_fops; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - - pr->flags.power_setup_done = 1; - - return_VALUE(0); -} - -int acpi_processor_power_exit(struct acpi_processor *pr, - struct acpi_device *device) -{ - ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); - - pr->flags.power_setup_done = 0; - - if (acpi_device_dir(device)) - remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, - acpi_device_dir(device)); - - /* Unregister the idle handler when processor #0 is removed. */ - if (pr->id == 0) { - pm_idle = pm_idle_save; - - /* - * We are about to unload the current idle thread pm callback - * (pm_idle), Wait for all processors to update cached/local - * copies of pm_idle before proceeding. - */ - cpu_idle_wait(); - } - - return_VALUE(0); -} diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c deleted file mode 100644 index 22c7bb6..0000000 --- a/drivers/acpi/processor_perflib.c +++ /dev/null @@ -1,624 +0,0 @@ -/* - * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) - * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski - * Copyright (C) 2004 Anil S Keshavamurthy - * - Added processor hotplug support - * - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - */ - -#include -#include -#include -#include - -#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF -#include -#include - -#include -#endif - -#include -#include - -#define ACPI_PROCESSOR_COMPONENT 0x01000000 -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") - -static DECLARE_MUTEX(performance_sem); - -/* - * _PPC support is implemented as a CPUfreq policy notifier: - * This means each time a CPUfreq driver registered also with - * the ACPI core is asked to change the speed policy, the maximum - * value is adjusted so that it is within the platform limit. - * - * Also, when a new platform limit value is detected, the CPUfreq - * policy is adjusted accordingly. - */ - -#define PPC_REGISTERED 1 -#define PPC_IN_USE 2 - -static int acpi_processor_ppc_status = 0; - -static int acpi_processor_ppc_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct acpi_processor *pr; - unsigned int ppc = 0; - - down(&performance_sem); - - if (event != CPUFREQ_INCOMPATIBLE) - goto out; - - pr = processors[policy->cpu]; - if (!pr || !pr->performance) - goto out; - - ppc = (unsigned int)pr->performance_platform_limit; - if (!ppc) - goto out; - - if (ppc > pr->performance->state_count) - goto out; - - cpufreq_verify_within_limits(policy, 0, - pr->performance->states[ppc]. - core_frequency * 1000); - - out: - up(&performance_sem); - - return 0; -} - -static struct notifier_block acpi_ppc_notifier_block = { - .notifier_call = acpi_processor_ppc_notifier, -}; - -static int acpi_processor_get_platform_limit(struct acpi_processor *pr) -{ - acpi_status status = 0; - unsigned long ppc = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit"); - - if (!pr) - return_VALUE(-EINVAL); - - /* - * _PPC indicates the maximum state currently supported by the platform - * (e.g. 0 = states 0..n; 1 = states 1..n; etc. - */ - status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); - - if (status != AE_NOT_FOUND) - acpi_processor_ppc_status |= PPC_IN_USE; - - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n")); - return_VALUE(-ENODEV); - } - - pr->performance_platform_limit = (int)ppc; - - return_VALUE(0); -} - -int acpi_processor_ppc_has_changed(struct acpi_processor *pr) -{ - int ret = acpi_processor_get_platform_limit(pr); - if (ret < 0) - return (ret); - else - return cpufreq_update_policy(pr->id); -} - -void acpi_processor_ppc_init(void) -{ - if (!cpufreq_register_notifier - (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER)) - acpi_processor_ppc_status |= PPC_REGISTERED; - else - printk(KERN_DEBUG - "Warning: Processor Platform Limit not supported.\n"); -} - -void acpi_processor_ppc_exit(void) -{ - if (acpi_processor_ppc_status & PPC_REGISTERED) - cpufreq_unregister_notifier(&acpi_ppc_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - - acpi_processor_ppc_status &= ~PPC_REGISTERED; -} - -static int acpi_processor_get_performance_control(struct acpi_processor *pr) -{ - int result = 0; - acpi_status status = 0; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *pct = NULL; - union acpi_object obj = { 0 }; - - ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control"); - - status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n")); - return_VALUE(-ENODEV); - } - - pct = (union acpi_object *)buffer.pointer; - if (!pct || (pct->type != ACPI_TYPE_PACKAGE) - || (pct->package.count != 2)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n")); - result = -EFAULT; - goto end; - } - - /* - * control_register - */ - - obj = pct->package.elements[0]; - - if ((obj.type != ACPI_TYPE_BUFFER) - || (obj.buffer.length < sizeof(struct acpi_pct_register)) - || (obj.buffer.pointer == NULL)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Invalid _PCT data (control_register)\n")); - result = -EFAULT; - goto end; - } - memcpy(&pr->performance->control_register, obj.buffer.pointer, - sizeof(struct acpi_pct_register)); - - /* - * status_register - */ - - obj = pct->package.elements[1]; - - if ((obj.type != ACPI_TYPE_BUFFER) - || (obj.buffer.length < sizeof(struct acpi_pct_register)) - || (obj.buffer.pointer == NULL)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Invalid _PCT data (status_register)\n")); - result = -EFAULT; - goto end; - } - - memcpy(&pr->performance->status_register, obj.buffer.pointer, - sizeof(struct acpi_pct_register)); - - end: - acpi_os_free(buffer.pointer); - - return_VALUE(result); -} - -static int acpi_processor_get_performance_states(struct acpi_processor *pr) -{ - int result = 0; - acpi_status status = AE_OK; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; - struct acpi_buffer state = { 0, NULL }; - union acpi_object *pss = NULL; - int i; - - ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states"); - - status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n")); - return_VALUE(-ENODEV); - } - - pss = (union acpi_object *)buffer.pointer; - if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n")); - result = -EFAULT; - goto end; - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n", - pss->package.count)); - - pr->performance->state_count = pss->package.count; - pr->performance->states = - kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, - GFP_KERNEL); - if (!pr->performance->states) { - result = -ENOMEM; - goto end; - } - - for (i = 0; i < pr->performance->state_count; i++) { - - struct acpi_processor_px *px = &(pr->performance->states[i]); - - state.length = sizeof(struct acpi_processor_px); - state.pointer = px; - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); - - status = acpi_extract_package(&(pss->package.elements[i]), - &format, &state); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Invalid _PSS data\n")); - result = -EFAULT; - kfree(pr->performance->states); - goto end; - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", - i, - (u32) px->core_frequency, - (u32) px->power, - (u32) px->transition_latency, - (u32) px->bus_master_latency, - (u32) px->control, (u32) px->status)); - - if (!px->core_frequency) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Invalid _PSS data: freq is zero\n")); - result = -EFAULT; - kfree(pr->performance->states); - goto end; - } - } - - end: - acpi_os_free(buffer.pointer); - - return_VALUE(result); -} - -static int acpi_processor_get_performance_info(struct acpi_processor *pr) -{ - int result = 0; - acpi_status status = AE_OK; - acpi_handle handle = NULL; - - ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info"); - - if (!pr || !pr->performance || !pr->handle) - return_VALUE(-EINVAL); - - acpi_processor_set_pdc(pr, pr->performance->pdc); - - status = acpi_get_handle(pr->handle, "_PCT", &handle); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "ACPI-based processor performance control unavailable\n")); - return_VALUE(-ENODEV); - } - - result = acpi_processor_get_performance_control(pr); - if (result) - return_VALUE(result); - - result = acpi_processor_get_performance_states(pr); - if (result) - return_VALUE(result); - - result = acpi_processor_get_platform_limit(pr); - if (result) - return_VALUE(result); - - return_VALUE(0); -} - -int acpi_processor_notify_smm(struct module *calling_module) -{ - acpi_status status; - static int is_done = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_notify_smm"); - - if (!(acpi_processor_ppc_status & PPC_REGISTERED)) - return_VALUE(-EBUSY); - - if (!try_module_get(calling_module)) - return_VALUE(-EINVAL); - - /* is_done is set to negative if an error occured, - * and to postitive if _no_ error occured, but SMM - * was already notified. This avoids double notification - * which might lead to unexpected results... - */ - if (is_done > 0) { - module_put(calling_module); - return_VALUE(0); - } else if (is_done < 0) { - module_put(calling_module); - return_VALUE(is_done); - } - - is_done = -EIO; - - /* Can't write pstate_cnt to smi_cmd if either value is zero */ - if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); - module_put(calling_module); - return_VALUE(0); - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", - acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); - - /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use - * it anyway, so we need to support it... */ - if (acpi_fadt_is_v1) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Using v1.0 FADT reserved value for pstate_cnt\n")); - } - - status = acpi_os_write_port(acpi_fadt.smi_cmd, - (u32) acpi_fadt.pstate_cnt, 8); - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Failed to write pstate_cnt [0x%x] to " - "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt, - acpi_fadt.smi_cmd)); - module_put(calling_module); - return_VALUE(status); - } - - /* Success. If there's no _PPC, we need to fear nothing, so - * we can allow the cpufreq driver to be rmmod'ed. */ - is_done = 1; - - if (!(acpi_processor_ppc_status & PPC_IN_USE)) - module_put(calling_module); - - return_VALUE(0); -} - -EXPORT_SYMBOL(acpi_processor_notify_smm); - -#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF -/* /proc/acpi/processor/../performance interface (DEPRECATED) */ - -static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); -static struct file_operations acpi_processor_perf_fops = { - .open = acpi_processor_perf_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - int i; - - ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show"); - - if (!pr) - goto end; - - if (!pr->performance) { - seq_puts(seq, "\n"); - goto end; - } - - seq_printf(seq, "state count: %d\n" - "active state: P%d\n", - pr->performance->state_count, pr->performance->state); - - seq_puts(seq, "states:\n"); - for (i = 0; i < pr->performance->state_count; i++) - seq_printf(seq, - " %cP%d: %d MHz, %d mW, %d uS\n", - (i == pr->performance->state ? '*' : ' '), i, - (u32) pr->performance->states[i].core_frequency, - (u32) pr->performance->states[i].power, - (u32) pr->performance->states[i].transition_latency); - - end: - return_VALUE(0); -} - -static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_perf_seq_show, - PDE(inode)->data); -} - -static ssize_t -acpi_processor_write_performance(struct file *file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = (struct seq_file *)file->private_data; - struct acpi_processor *pr = (struct acpi_processor *)m->private; - struct acpi_processor_performance *perf; - char state_string[12] = { '\0' }; - unsigned int new_state = 0; - struct cpufreq_policy policy; - - ACPI_FUNCTION_TRACE("acpi_processor_write_performance"); - - if (!pr || (count > sizeof(state_string) - 1)) - return_VALUE(-EINVAL); - - perf = pr->performance; - if (!perf) - return_VALUE(-EINVAL); - - if (copy_from_user(state_string, buffer, count)) - return_VALUE(-EFAULT); - - state_string[count] = '\0'; - new_state = simple_strtoul(state_string, NULL, 0); - - if (new_state >= perf->state_count) - return_VALUE(-EINVAL); - - cpufreq_get_policy(&policy, pr->id); - - policy.cpu = pr->id; - policy.min = perf->states[new_state].core_frequency * 1000; - policy.max = perf->states[new_state].core_frequency * 1000; - - result = cpufreq_set_policy(&policy); - if (result) - return_VALUE(result); - - return_VALUE(count); -} - -static void acpi_cpufreq_add_file(struct acpi_processor *pr) -{ - struct proc_dir_entry *entry = NULL; - struct acpi_device *device = NULL; - - ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); - - if (acpi_bus_get_device(pr->handle, &device)) - return_VOID; - - /* add file 'performance' [R/W] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, - S_IFREG | S_IRUGO | S_IWUSR, - acpi_device_dir(device)); - if (!entry) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to create '%s' fs entry\n", - ACPI_PROCESSOR_FILE_PERFORMANCE)); - else { - entry->proc_fops = &acpi_processor_perf_fops; - entry->proc_fops->write = acpi_processor_write_performance; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - return_VOID; -} - -static void acpi_cpufreq_remove_file(struct acpi_processor *pr) -{ - struct acpi_device *device = NULL; - - ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile"); - - if (acpi_bus_get_device(pr->handle, &device)) - return_VOID; - - /* remove file 'performance' */ - remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, - acpi_device_dir(device)); - - return_VOID; -} - -#else -static void acpi_cpufreq_add_file(struct acpi_processor *pr) -{ - return; -} -static void acpi_cpufreq_remove_file(struct acpi_processor *pr) -{ - return; -} -#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */ - -int -acpi_processor_register_performance(struct acpi_processor_performance - *performance, unsigned int cpu) -{ - struct acpi_processor *pr; - - ACPI_FUNCTION_TRACE("acpi_processor_register_performance"); - - if (!(acpi_processor_ppc_status & PPC_REGISTERED)) - return_VALUE(-EINVAL); - - down(&performance_sem); - - pr = processors[cpu]; - if (!pr) { - up(&performance_sem); - return_VALUE(-ENODEV); - } - - if (pr->performance) { - up(&performance_sem); - return_VALUE(-EBUSY); - } - - pr->performance = performance; - - if (acpi_processor_get_performance_info(pr)) { - pr->performance = NULL; - up(&performance_sem); - return_VALUE(-EIO); - } - - acpi_cpufreq_add_file(pr); - - up(&performance_sem); - return_VALUE(0); -} - -EXPORT_SYMBOL(acpi_processor_register_performance); - -void -acpi_processor_unregister_performance(struct acpi_processor_performance - *performance, unsigned int cpu) -{ - struct acpi_processor *pr; - - ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance"); - - down(&performance_sem); - - pr = processors[cpu]; - if (!pr) { - up(&performance_sem); - return_VOID; - } - - kfree(pr->performance->states); - pr->performance = NULL; - - acpi_cpufreq_remove_file(pr); - - up(&performance_sem); - - return_VOID; -} - -EXPORT_SYMBOL(acpi_processor_unregister_performance); diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c deleted file mode 100644 index dc9817c..0000000 --- a/drivers/acpi/processor_thermal.c +++ /dev/null @@ -1,399 +0,0 @@ -/* - * processor_thermal.c - Passive cooling submodule of the ACPI processor driver - * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski - * Copyright (C) 2004 Anil S Keshavamurthy - * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#define ACPI_PROCESSOR_COMPONENT 0x01000000 -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") - -/* -------------------------------------------------------------------------- - Limit Interface - -------------------------------------------------------------------------- */ -static int acpi_processor_apply_limit(struct acpi_processor *pr) -{ - int result = 0; - u16 px = 0; - u16 tx = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_apply_limit"); - - if (!pr) - return_VALUE(-EINVAL); - - if (!pr->flags.limit) - return_VALUE(-ENODEV); - - if (pr->flags.throttling) { - if (pr->limit.user.tx > tx) - tx = pr->limit.user.tx; - if (pr->limit.thermal.tx > tx) - tx = pr->limit.thermal.tx; - - result = acpi_processor_set_throttling(pr, tx); - if (result) - goto end; - } - - pr->limit.state.px = px; - pr->limit.state.tx = tx; - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Processor [%d] limit set to (P%d:T%d)\n", pr->id, - pr->limit.state.px, pr->limit.state.tx)); - - end: - if (result) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to set limit\n")); - - return_VALUE(result); -} - -#ifdef CONFIG_CPU_FREQ - -/* If a passive cooling situation is detected, primarily CPUfreq is used, as it - * offers (in most cases) voltage scaling in addition to frequency scaling, and - * thus a cubic (instead of linear) reduction of energy. Also, we allow for - * _any_ cpufreq driver and not only the acpi-cpufreq driver. - */ - -static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS]; -static unsigned int acpi_thermal_cpufreq_is_init = 0; - -static int cpu_has_cpufreq(unsigned int cpu) -{ - struct cpufreq_policy policy; - if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) - return 0; - return 1; -} - -static int acpi_thermal_cpufreq_increase(unsigned int cpu) -{ - if (!cpu_has_cpufreq(cpu)) - return -ENODEV; - - if (cpufreq_thermal_reduction_pctg[cpu] < 60) { - cpufreq_thermal_reduction_pctg[cpu] += 20; - cpufreq_update_policy(cpu); - return 0; - } - - return -ERANGE; -} - -static int acpi_thermal_cpufreq_decrease(unsigned int cpu) -{ - if (!cpu_has_cpufreq(cpu)) - return -ENODEV; - - if (cpufreq_thermal_reduction_pctg[cpu] > 20) - cpufreq_thermal_reduction_pctg[cpu] -= 20; - else - cpufreq_thermal_reduction_pctg[cpu] = 0; - cpufreq_update_policy(cpu); - /* We reached max freq again and can leave passive mode */ - return !cpufreq_thermal_reduction_pctg[cpu]; -} - -static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - unsigned long max_freq = 0; - - if (event != CPUFREQ_ADJUST) - goto out; - - max_freq = - (policy->cpuinfo.max_freq * - (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100; - - cpufreq_verify_within_limits(policy, 0, max_freq); - - out: - return 0; -} - -static struct notifier_block acpi_thermal_cpufreq_notifier_block = { - .notifier_call = acpi_thermal_cpufreq_notifier, -}; - -void acpi_thermal_cpufreq_init(void) -{ - int i; - - for (i = 0; i < NR_CPUS; i++) - cpufreq_thermal_reduction_pctg[i] = 0; - - i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - if (!i) - acpi_thermal_cpufreq_is_init = 1; -} - -void acpi_thermal_cpufreq_exit(void) -{ - if (acpi_thermal_cpufreq_is_init) - cpufreq_unregister_notifier - (&acpi_thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - - acpi_thermal_cpufreq_is_init = 0; -} - -#else /* ! CONFIG_CPU_FREQ */ - -static int acpi_thermal_cpufreq_increase(unsigned int cpu) -{ - return -ENODEV; -} -static int acpi_thermal_cpufreq_decrease(unsigned int cpu) -{ - return -ENODEV; -} - -#endif - -int acpi_processor_set_thermal_limit(acpi_handle handle, int type) -{ - int result = 0; - struct acpi_processor *pr = NULL; - struct acpi_device *device = NULL; - int tx = 0, max_tx_px = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_set_thermal_limit"); - - if ((type < ACPI_PROCESSOR_LIMIT_NONE) - || (type > ACPI_PROCESSOR_LIMIT_DECREMENT)) - return_VALUE(-EINVAL); - - result = acpi_bus_get_device(handle, &device); - if (result) - return_VALUE(result); - - pr = (struct acpi_processor *)acpi_driver_data(device); - if (!pr) - return_VALUE(-ENODEV); - - /* Thermal limits are always relative to the current Px/Tx state. */ - if (pr->flags.throttling) - pr->limit.thermal.tx = pr->throttling.state; - - /* - * Our default policy is to only use throttling at the lowest - * performance state. - */ - - tx = pr->limit.thermal.tx; - - switch (type) { - - case ACPI_PROCESSOR_LIMIT_NONE: - do { - result = acpi_thermal_cpufreq_decrease(pr->id); - } while (!result); - tx = 0; - break; - - case ACPI_PROCESSOR_LIMIT_INCREMENT: - /* if going up: P-states first, T-states later */ - - result = acpi_thermal_cpufreq_increase(pr->id); - if (!result) - goto end; - else if (result == -ERANGE) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "At maximum performance state\n")); - - if (pr->flags.throttling) { - if (tx == (pr->throttling.state_count - 1)) - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "At maximum throttling state\n")); - else - tx++; - } - break; - - case ACPI_PROCESSOR_LIMIT_DECREMENT: - /* if going down: T-states first, P-states later */ - - if (pr->flags.throttling) { - if (tx == 0) { - max_tx_px = 1; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "At minimum throttling state\n")); - } else { - tx--; - goto end; - } - } - - result = acpi_thermal_cpufreq_decrease(pr->id); - if (result) { - /* - * We only could get -ERANGE, 1 or 0. - * In the first two cases we reached max freq again. - */ - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "At minimum performance state\n")); - max_tx_px = 1; - } else - max_tx_px = 0; - - break; - } - - end: - if (pr->flags.throttling) { - pr->limit.thermal.px = 0; - pr->limit.thermal.tx = tx; - - result = acpi_processor_apply_limit(pr); - if (result) - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Unable to set thermal limit\n")); - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n", - pr->limit.thermal.px, pr->limit.thermal.tx)); - } else - result = 0; - if (max_tx_px) - return_VALUE(1); - else - return_VALUE(result); -} - -int acpi_processor_get_limit_info(struct acpi_processor *pr) -{ - ACPI_FUNCTION_TRACE("acpi_processor_get_limit_info"); - - if (!pr) - return_VALUE(-EINVAL); - - if (pr->flags.throttling) - pr->flags.limit = 1; - - return_VALUE(0); -} - -/* /proc interface */ - -static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) -{ - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - - ACPI_FUNCTION_TRACE("acpi_processor_limit_seq_show"); - - if (!pr) - goto end; - - if (!pr->flags.limit) { - seq_puts(seq, "\n"); - goto end; - } - - seq_printf(seq, "active limit: P%d:T%d\n" - "user limit: P%d:T%d\n" - "thermal limit: P%d:T%d\n", - pr->limit.state.px, pr->limit.state.tx, - pr->limit.user.px, pr->limit.user.tx, - pr->limit.thermal.px, pr->limit.thermal.tx); - - end: - return_VALUE(0); -} - -static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_processor_limit_seq_show, - PDE(inode)->data); -} - -ssize_t acpi_processor_write_limit(struct file * file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = (struct seq_file *)file->private_data; - struct acpi_processor *pr = (struct acpi_processor *)m->private; - char limit_string[25] = { '\0' }; - int px = 0; - int tx = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_write_limit"); - - if (!pr || (count > sizeof(limit_string) - 1)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n")); - return_VALUE(-EINVAL); - } - - if (copy_from_user(limit_string, buffer, count)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data\n")); - return_VALUE(-EFAULT); - } - - limit_string[count] = '\0'; - - if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid data format\n")); - return_VALUE(-EINVAL); - } - - if (pr->flags.throttling) { - if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid tx\n")); - return_VALUE(-EINVAL); - } - pr->limit.user.tx = tx; - } - - result = acpi_processor_apply_limit(pr); - - return_VALUE(count); -} - -struct file_operations acpi_processor_limit_fops = { - .open = acpi_processor_limit_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c deleted file mode 100644 index 74a52d4..0000000 --- a/drivers/acpi/processor_throttling.c +++ /dev/null @@ -1,342 +0,0 @@ -/* - * processor_throttling.c - Throttling submodule of the ACPI processor driver - * - * Copyright (C) 2001, 2002 Andy Grover - * Copyright (C) 2001, 2002 Paul Diefenbaugh - * Copyright (C) 2004 Dominik Brodowski - * Copyright (C) 2004 Anil S Keshavamurthy - * - Added processor hotplug support - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#define ACPI_PROCESSOR_COMPONENT 0x01000000 -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" -#define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") - -/* -------------------------------------------------------------------------- - Throttling Control - -------------------------------------------------------------------------- */ -static int acpi_processor_get_throttling(struct acpi_processor *pr) -{ - int state = 0; - u32 value = 0; - u32 duty_mask = 0; - u32 duty_value = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_get_throttling"); - - if (!pr) - return_VALUE(-EINVAL); - - if (!pr->flags.throttling) - return_VALUE(-ENODEV); - - pr->throttling.state = 0; - - duty_mask = pr->throttling.state_count - 1; - - duty_mask <<= pr->throttling.duty_offset; - - local_irq_disable(); - - value = inl(pr->throttling.address); - - /* - * Compute the current throttling state when throttling is enabled - * (bit 4 is on). - */ - if (value & 0x10) { - duty_value = value & duty_mask; - duty_value >>= pr->throttling.duty_offset; - - if (duty_value) - state = pr->throttling.state_count - duty_value; - } - - pr->throttling.state = state; - - local_irq_enable(); - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling state is T%d (%d%% throttling applied)\n", - state, pr->throttling.states[state].performance)); - - return_VALUE(0); -} - -int acpi_processor_set_throttling(struct acpi_processor *pr, int state) -{ - u32 value = 0; - u32 duty_mask = 0; - u32 duty_value = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_set_throttling"); - - if (!pr) - return_VALUE(-EINVAL); - - if ((state < 0) || (state > (pr->throttling.state_count - 1))) - return_VALUE(-EINVAL); - - if (!pr->flags.throttling) - return_VALUE(-ENODEV); - - if (state == pr->throttling.state) - return_VALUE(0); - - /* - * Calculate the duty_value and duty_mask. - */ - if (state) { - duty_value = pr->throttling.state_count - state; - - duty_value <<= pr->throttling.duty_offset; - - /* Used to clear all duty_value bits */ - duty_mask = pr->throttling.state_count - 1; - - duty_mask <<= acpi_fadt.duty_offset; - duty_mask = ~duty_mask; - } - - local_irq_disable(); - - /* - * Disable throttling by writing a 0 to bit 4. Note that we must - * turn it off before you can change the duty_value. - */ - value = inl(pr->throttling.address); - if (value & 0x10) { - value &= 0xFFFFFFEF; - outl(value, pr->throttling.address); - } - - /* - * Write the new duty_value and then enable throttling. Note - * that a state value of 0 leaves throttling disabled. - */ - if (state) { - value &= duty_mask; - value |= duty_value; - outl(value, pr->throttling.address); - - value |= 0x00000010; - outl(value, pr->throttling.address); - } - - pr->throttling.state = state; - - local_irq_enable(); - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling state set to T%d (%d%%)\n", state, - (pr->throttling.states[state].performance ? pr-> - throttling.states[state].performance / 10 : 0))); - - return_VALUE(0); -} - -int acpi_processor_get_throttling_info(struct acpi_processor *pr) -{ - int result = 0; - int step = 0; - int i = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_get_throttling_info"); - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", - pr->throttling.address, - pr->throttling.duty_offset, - pr->throttling.duty_width)); - - if (!pr) - return_VALUE(-EINVAL); - - /* TBD: Support ACPI 2.0 objects */ - - if (!pr->throttling.address) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); - return_VALUE(0); - } else if (!pr->throttling.duty_width) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); - return_VALUE(0); - } - /* TBD: Support duty_cycle values that span bit 4. */ - else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "duty_cycle spans bit 4\n")); - return_VALUE(0); - } - - /* - * PIIX4 Errata: We don't support throttling on the original PIIX4. - * This shouldn't be an issue as few (if any) mobile systems ever - * used this part. - */ - if (errata.piix4.throttle) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Throttling not supported on PIIX4 A- or B-step\n")); - return_VALUE(0); - } - - pr->throttling.state_count = 1 << acpi_fadt.duty_width; - - /* - * Compute state values. Note that throttling displays a linear power/ - * performance relationship (at 50% performance the CPU will consume - * 50% power). Values are in 1/10th of a percent to preserve accuracy. - */ - - step = (1000 / pr->throttling.state_count); - - for (i = 0; i < pr->throttling.state_count; i++) { - pr->throttling.states[i].performance = step * i; - pr->throttling.states[i].power = step * i; - } - - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", - pr->throttling.state_count)); - - pr->flags.throttling = 1; - - /* - * Disable throttling (if enabled). We'll let subsequent policy (e.g. - * thermal) decide to lower performance if it so chooses, but for now - * we'll crank up the speed. - */ - - result = acpi_processor_get_throttling(pr); - if (result) - goto end; - - if (pr->throttling.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Disabling throttling (was T%d)\n", - pr->throttling.state)); - result = acpi_processor_set_throttling(pr, 0); - if (result) - goto end; - } - - end: - if (result) - pr->flags.throttling = 0; - - return_VALUE(result); -} - -/* proc interface */ - -static int acpi_processor_throttling_seq_show(struct seq_file *seq, - void *offset) -{ - struct acpi_processor *pr = (struct acpi_processor *)seq->private; - int i = 0; - int result = 0; - - ACPI_FUNCTION_TRACE("acpi_processor_throttling_seq_show"); - - if (!pr) - goto end; - - if (!(pr->throttling.state_count > 0)) { - seq_puts(seq, "\n"); - goto end; - } - - result = acpi_processor_get_throttling(pr); - - if (result) { - seq_puts(seq, - "Could not determine current throttling state.\n"); - goto end; - } - - seq_printf(seq, "state count: %d\n" - "active state: T%d\n", - pr->throttling.state_count, pr->throttling.state); - - seq_puts(seq, "states:\n"); - for (i = 0; i < pr->throttling.state_count; i++) - seq_printf(seq, " %cT%d: %02d%%\n", - (i == pr->throttling.state ? '*' : ' '), i, - (pr->throttling.states[i].performance ? pr-> - throttling.states[i].performance / 10 : 0)); - - end: - return_VALUE(0); -} - -static int acpi_processor_throttling_open_fs(struct inode *inode, - struct file *file) -{ - return single_open(file, acpi_processor_throttling_seq_show, - PDE(inode)->data); -} - -ssize_t acpi_processor_write_throttling(struct file * file, - const char __user * buffer, - size_t count, loff_t * data) -{ - int result = 0; - struct seq_file *m = (struct seq_file *)file->private_data; - struct acpi_processor *pr = (struct acpi_processor *)m->private; - char state_string[12] = { '\0' }; - - ACPI_FUNCTION_TRACE("acpi_processor_write_throttling"); - - if (!pr || (count > sizeof(state_string) - 1)) - return_VALUE(-EINVAL); - - if (copy_from_user(state_string, buffer, count)) - return_VALUE(-EFAULT); - - state_string[count] = '\0'; - - result = acpi_processor_set_throttling(pr, - simple_strtoul(state_string, - NULL, 0)); - if (result) - return_VALUE(result); - - return_VALUE(count); -} - -struct file_operations acpi_processor_throttling_fops = { - .open = acpi_processor_throttling_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; --- 0.99.9.GIT