linux/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
<<
>>
Prefs
   1/*
   2 * acpi-cpufreq.c - ACPI Processor P-States Driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
   8 *
   9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  10 *
  11 *  This program is free software; you can redistribute it and/or modify
  12 *  it under the terms of the GNU General Public License as published by
  13 *  the Free Software Foundation; either version 2 of the License, or (at
  14 *  your option) any later version.
  15 *
  16 *  This program is distributed in the hope that it will be useful, but
  17 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  18 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 *  General Public License for more details.
  20 *
  21 *  You should have received a copy of the GNU General Public License along
  22 *  with this program; if not, write to the Free Software Foundation, Inc.,
  23 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  24 *
  25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/init.h>
  31#include <linux/smp.h>
  32#include <linux/sched.h>
  33#include <linux/cpufreq.h>
  34#include <linux/compiler.h>
  35#include <linux/dmi.h>
  36#include <linux/slab.h>
  37
  38#include <linux/acpi.h>
  39#include <linux/io.h>
  40#include <linux/delay.h>
  41#include <linux/uaccess.h>
  42
  43#include <acpi/processor.h>
  44
  45#include <asm/msr.h>
  46#include <asm/processor.h>
  47#include <asm/cpufeature.h>
  48#include "mperf.h"
  49
  50#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
  51                "acpi-cpufreq", msg)
  52
  53MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
  54MODULE_DESCRIPTION("ACPI Processor P-States Driver");
  55MODULE_LICENSE("GPL");
  56
  57enum {
  58        UNDEFINED_CAPABLE = 0,
  59        SYSTEM_INTEL_MSR_CAPABLE,
  60        SYSTEM_IO_CAPABLE,
  61};
  62
  63#define INTEL_MSR_RANGE         (0xffff)
  64
  65struct acpi_cpufreq_data {
  66        struct acpi_processor_performance *acpi_data;
  67        struct cpufreq_frequency_table *freq_table;
  68        unsigned int resume;
  69        unsigned int cpu_feature;
  70};
  71
  72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
  73
  74/* acpi_perf_data is a pointer to percpu data. */
  75static struct acpi_processor_performance __percpu *acpi_perf_data;
  76
  77static struct cpufreq_driver acpi_cpufreq_driver;
  78
  79static unsigned int acpi_pstate_strict;
  80
  81static int check_est_cpu(unsigned int cpuid)
  82{
  83        struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
  84
  85        return cpu_has(cpu, X86_FEATURE_EST);
  86}
  87
  88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
  89{
  90        struct acpi_processor_performance *perf;
  91        int i;
  92
  93        perf = data->acpi_data;
  94
  95        for (i = 0; i < perf->state_count; i++) {
  96                if (value == perf->states[i].status)
  97                        return data->freq_table[i].frequency;
  98        }
  99        return 0;
 100}
 101
 102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
 103{
 104        int i;
 105        struct acpi_processor_performance *perf;
 106
 107        msr &= INTEL_MSR_RANGE;
 108        perf = data->acpi_data;
 109
 110        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
 111                if (msr == perf->states[data->freq_table[i].index].status)
 112                        return data->freq_table[i].frequency;
 113        }
 114        return data->freq_table[0].frequency;
 115}
 116
 117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
 118{
 119        switch (data->cpu_feature) {
 120        case SYSTEM_INTEL_MSR_CAPABLE:
 121                return extract_msr(val, data);
 122        case SYSTEM_IO_CAPABLE:
 123                return extract_io(val, data);
 124        default:
 125                return 0;
 126        }
 127}
 128
 129struct msr_addr {
 130        u32 reg;
 131};
 132
 133struct io_addr {
 134        u16 port;
 135        u8 bit_width;
 136};
 137
 138struct drv_cmd {
 139        unsigned int type;
 140        const struct cpumask *mask;
 141        union {
 142                struct msr_addr msr;
 143                struct io_addr io;
 144        } addr;
 145        u32 val;
 146};
 147
 148/* Called via smp_call_function_single(), on the target CPU */
 149static void do_drv_read(void *_cmd)
 150{
 151        struct drv_cmd *cmd = _cmd;
 152        u32 h;
 153
 154        switch (cmd->type) {
 155        case SYSTEM_INTEL_MSR_CAPABLE:
 156                rdmsr(cmd->addr.msr.reg, cmd->val, h);
 157                break;
 158        case SYSTEM_IO_CAPABLE:
 159                acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
 160                                &cmd->val,
 161                                (u32)cmd->addr.io.bit_width);
 162                break;
 163        default:
 164                break;
 165        }
 166}
 167
 168/* Called via smp_call_function_many(), on the target CPUs */
 169static void do_drv_write(void *_cmd)
 170{
 171        struct drv_cmd *cmd = _cmd;
 172        u32 lo, hi;
 173
 174        switch (cmd->type) {
 175        case SYSTEM_INTEL_MSR_CAPABLE:
 176                rdmsr(cmd->addr.msr.reg, lo, hi);
 177                lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
 178                wrmsr(cmd->addr.msr.reg, lo, hi);
 179                break;
 180        case SYSTEM_IO_CAPABLE:
 181                acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
 182                                cmd->val,
 183                                (u32)cmd->addr.io.bit_width);
 184                break;
 185        default:
 186                break;
 187        }
 188}
 189
 190static void drv_read(struct drv_cmd *cmd)
 191{
 192        int err;
 193        cmd->val = 0;
 194
 195        err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
 196        WARN_ON_ONCE(err);      /* smp_call_function_any() was buggy? */
 197}
 198
 199static void drv_write(struct drv_cmd *cmd)
 200{
 201        int this_cpu;
 202
 203        this_cpu = get_cpu();
 204        if (cpumask_test_cpu(this_cpu, cmd->mask))
 205                do_drv_write(cmd);
 206        smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
 207        put_cpu();
 208}
 209
 210static u32 get_cur_val(const struct cpumask *mask)
 211{
 212        struct acpi_processor_performance *perf;
 213        struct drv_cmd cmd;
 214
 215        if (unlikely(cpumask_empty(mask)))
 216                return 0;
 217
 218        switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
 219        case SYSTEM_INTEL_MSR_CAPABLE:
 220                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 221                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
 222                break;
 223        case SYSTEM_IO_CAPABLE:
 224                cmd.type = SYSTEM_IO_CAPABLE;
 225                perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
 226                cmd.addr.io.port = perf->control_register.address;
 227                cmd.addr.io.bit_width = perf->control_register.bit_width;
 228                break;
 229        default:
 230                return 0;
 231        }
 232
 233        cmd.mask = mask;
 234        drv_read(&cmd);
 235
 236        dprintk("get_cur_val = %u\n", cmd.val);
 237
 238        return cmd.val;
 239}
 240
 241static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 242{
 243        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
 244        unsigned int freq;
 245        unsigned int cached_freq;
 246
 247        dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
 248
 249        if (unlikely(data == NULL ||
 250                     data->acpi_data == NULL || data->freq_table == NULL)) {
 251                return 0;
 252        }
 253
 254        cached_freq = data->freq_table[data->acpi_data->state].frequency;
 255        freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
 256        if (freq != cached_freq) {
 257                /*
 258                 * The dreaded BIOS frequency change behind our back.
 259                 * Force set the frequency on next target call.
 260                 */
 261                data->resume = 1;
 262        }
 263
 264        dprintk("cur freq = %u\n", freq);
 265
 266        return freq;
 267}
 268
 269static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
 270                                struct acpi_cpufreq_data *data)
 271{
 272        unsigned int cur_freq;
 273        unsigned int i;
 274
 275        for (i = 0; i < 100; i++) {
 276                cur_freq = extract_freq(get_cur_val(mask), data);
 277                if (cur_freq == freq)
 278                        return 1;
 279                udelay(10);
 280        }
 281        return 0;
 282}
 283
 284static int acpi_cpufreq_target(struct cpufreq_policy *policy,
 285                               unsigned int target_freq, unsigned int relation)
 286{
 287        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 288        struct acpi_processor_performance *perf;
 289        struct cpufreq_freqs freqs;
 290        struct drv_cmd cmd;
 291        unsigned int next_state = 0; /* Index into freq_table */
 292        unsigned int next_perf_state = 0; /* Index into perf table */
 293        unsigned int i;
 294        int result = 0;
 295
 296        dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
 297
 298        if (unlikely(data == NULL ||
 299             data->acpi_data == NULL || data->freq_table == NULL)) {
 300                return -ENODEV;
 301        }
 302
 303        perf = data->acpi_data;
 304        result = cpufreq_frequency_table_target(policy,
 305                                                data->freq_table,
 306                                                target_freq,
 307                                                relation, &next_state);
 308        if (unlikely(result)) {
 309                result = -ENODEV;
 310                goto out;
 311        }
 312
 313        next_perf_state = data->freq_table[next_state].index;
 314        if (perf->state == next_perf_state) {
 315                if (unlikely(data->resume)) {
 316                        dprintk("Called after resume, resetting to P%d\n",
 317                                next_perf_state);
 318                        data->resume = 0;
 319                } else {
 320                        dprintk("Already at target state (P%d)\n",
 321                                next_perf_state);
 322                        goto out;
 323                }
 324        }
 325
 326        switch (data->cpu_feature) {
 327        case SYSTEM_INTEL_MSR_CAPABLE:
 328                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 329                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
 330                cmd.val = (u32) perf->states[next_perf_state].control;
 331                break;
 332        case SYSTEM_IO_CAPABLE:
 333                cmd.type = SYSTEM_IO_CAPABLE;
 334                cmd.addr.io.port = perf->control_register.address;
 335                cmd.addr.io.bit_width = perf->control_register.bit_width;
 336                cmd.val = (u32) perf->states[next_perf_state].control;
 337                break;
 338        default:
 339                result = -ENODEV;
 340                goto out;
 341        }
 342
 343        /* cpufreq holds the hotplug lock, so we are safe from here on */
 344        if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
 345                cmd.mask = policy->cpus;
 346        else
 347                cmd.mask = cpumask_of(policy->cpu);
 348
 349        freqs.old = perf->states[perf->state].core_frequency * 1000;
 350        freqs.new = data->freq_table[next_state].frequency;
 351        for_each_cpu(i, policy->cpus) {
 352                freqs.cpu = i;
 353                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 354        }
 355
 356        drv_write(&cmd);
 357
 358        if (acpi_pstate_strict) {
 359                if (!check_freqs(cmd.mask, freqs.new, data)) {
 360                        dprintk("acpi_cpufreq_target failed (%d)\n",
 361                                policy->cpu);
 362                        result = -EAGAIN;
 363                        goto out;
 364                }
 365        }
 366
 367        for_each_cpu(i, policy->cpus) {
 368                freqs.cpu = i;
 369                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 370        }
 371        perf->state = next_perf_state;
 372
 373out:
 374        return result;
 375}
 376
 377static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
 378{
 379        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 380
 381        dprintk("acpi_cpufreq_verify\n");
 382
 383        return cpufreq_frequency_table_verify(policy, data->freq_table);
 384}
 385
 386static unsigned long
 387acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 388{
 389        struct acpi_processor_performance *perf = data->acpi_data;
 390
 391        if (cpu_khz) {
 392                /* search the closest match to cpu_khz */
 393                unsigned int i;
 394                unsigned long freq;
 395                unsigned long freqn = perf->states[0].core_frequency * 1000;
 396
 397                for (i = 0; i < (perf->state_count-1); i++) {
 398                        freq = freqn;
 399                        freqn = perf->states[i+1].core_frequency * 1000;
 400                        if ((2 * cpu_khz) > (freqn + freq)) {
 401                                perf->state = i;
 402                                return freq;
 403                        }
 404                }
 405                perf->state = perf->state_count-1;
 406                return freqn;
 407        } else {
 408                /* assume CPU is at P0... */
 409                perf->state = 0;
 410                return perf->states[0].core_frequency * 1000;
 411        }
 412}
 413
 414static void free_acpi_perf_data(void)
 415{
 416        unsigned int i;
 417
 418        /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
 419        for_each_possible_cpu(i)
 420                free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
 421                                 ->shared_cpu_map);
 422        free_percpu(acpi_perf_data);
 423}
 424
 425/*
 426 * acpi_cpufreq_early_init - initialize ACPI P-States library
 427 *
 428 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 429 * in order to determine correct frequency and voltage pairings. We can
 430 * do _PDC and _PSD and find out the processor dependency for the
 431 * actual init that will happen later...
 432 */
 433static int __init acpi_cpufreq_early_init(void)
 434{
 435        unsigned int i;
 436        dprintk("acpi_cpufreq_early_init\n");
 437
 438        acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
 439        if (!acpi_perf_data) {
 440                dprintk("Memory allocation error for acpi_perf_data.\n");
 441                return -ENOMEM;
 442        }
 443        for_each_possible_cpu(i) {
 444                if (!zalloc_cpumask_var_node(
 445                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
 446                        GFP_KERNEL, cpu_to_node(i))) {
 447
 448                        /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
 449                        free_acpi_perf_data();
 450                        return -ENOMEM;
 451                }
 452        }
 453
 454        /* Do initialization in ACPI core */
 455        acpi_processor_preregister_performance(acpi_perf_data);
 456        return 0;
 457}
 458
 459#ifdef CONFIG_SMP
 460/*
 461 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 462 * or do it in BIOS firmware and won't inform about it to OS. If not
 463 * detected, this has a side effect of making CPU run at a different speed
 464 * than OS intended it to run at. Detect it and handle it cleanly.
 465 */
 466static int bios_with_sw_any_bug;
 467
 468static int sw_any_bug_found(const struct dmi_system_id *d)
 469{
 470        bios_with_sw_any_bug = 1;
 471        return 0;
 472}
 473
 474static const struct dmi_system_id sw_any_bug_dmi_table[] = {
 475        {
 476                .callback = sw_any_bug_found,
 477                .ident = "Supermicro Server X6DLP",
 478                .matches = {
 479                        DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
 480                        DMI_MATCH(DMI_BIOS_VERSION, "080010"),
 481                        DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
 482                },
 483        },
 484        { }
 485};
 486
 487static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
 488{
 489        /* Intel Xeon Processor 7100 Series Specification Update
 490         * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
 491         * AL30: A Machine Check Exception (MCE) Occurring during an
 492         * Enhanced Intel SpeedStep Technology Ratio Change May Cause
 493         * Both Processor Cores to Lock Up. */
 494        if (c->x86_vendor == X86_VENDOR_INTEL) {
 495                if ((c->x86 == 15) &&
 496                    (c->x86_model == 6) &&
 497                    (c->x86_mask == 8)) {
 498                        printk(KERN_INFO "acpi-cpufreq: Intel(R) "
 499                            "Xeon(R) 7100 Errata AL30, processors may "
 500                            "lock up on frequency changes: disabling "
 501                            "acpi-cpufreq.\n");
 502                        return -ENODEV;
 503                    }
 504                }
 505        return 0;
 506}
 507#endif
 508
 509static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
 510{
 511        unsigned int i;
 512        unsigned int valid_states = 0;
 513        unsigned int cpu = policy->cpu;
 514        struct acpi_cpufreq_data *data;
 515        unsigned int result = 0;
 516        struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
 517        struct acpi_processor_performance *perf;
 518#ifdef CONFIG_SMP
 519        static int blacklisted;
 520#endif
 521
 522        dprintk("acpi_cpufreq_cpu_init\n");
 523
 524#ifdef CONFIG_SMP
 525        if (blacklisted)
 526                return blacklisted;
 527        blacklisted = acpi_cpufreq_blacklist(c);
 528        if (blacklisted)
 529                return blacklisted;
 530#endif
 531
 532        data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
 533        if (!data)
 534                return -ENOMEM;
 535
 536        data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
 537        per_cpu(acfreq_data, cpu) = data;
 538
 539        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
 540                acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
 541
 542        result = acpi_processor_register_performance(data->acpi_data, cpu);
 543        if (result)
 544                goto err_free;
 545
 546        perf = data->acpi_data;
 547        policy->shared_type = perf->shared_type;
 548
 549        /*
 550         * Will let policy->cpus know about dependency only when software
 551         * coordination is required.
 552         */
 553        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
 554            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
 555                cpumask_copy(policy->cpus, perf->shared_cpu_map);
 556        }
 557        cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 558
 559#ifdef CONFIG_SMP
 560        dmi_check_system(sw_any_bug_dmi_table);
 561        if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
 562                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 563                cpumask_copy(policy->cpus, cpu_core_mask(cpu));
 564        }
 565#endif
 566
 567        /* capability check */
 568        if (perf->state_count <= 1) {
 569                dprintk("No P-States\n");
 570                result = -ENODEV;
 571                goto err_unreg;
 572        }
 573
 574        if (perf->control_register.space_id != perf->status_register.space_id) {
 575                result = -ENODEV;
 576                goto err_unreg;
 577        }
 578
 579        switch (perf->control_register.space_id) {
 580        case ACPI_ADR_SPACE_SYSTEM_IO:
 581                dprintk("SYSTEM IO addr space\n");
 582                data->cpu_feature = SYSTEM_IO_CAPABLE;
 583                break;
 584        case ACPI_ADR_SPACE_FIXED_HARDWARE:
 585                dprintk("HARDWARE addr space\n");
 586                if (!check_est_cpu(cpu)) {
 587                        result = -ENODEV;
 588                        goto err_unreg;
 589                }
 590                data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
 591                break;
 592        default:
 593                dprintk("Unknown addr space %d\n",
 594                        (u32) (perf->control_register.space_id));
 595                result = -ENODEV;
 596                goto err_unreg;
 597        }
 598
 599        data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
 600                    (perf->state_count+1), GFP_KERNEL);
 601        if (!data->freq_table) {
 602                result = -ENOMEM;
 603                goto err_unreg;
 604        }
 605
 606        /* detect transition latency */
 607        policy->cpuinfo.transition_latency = 0;
 608        for (i = 0; i < perf->state_count; i++) {
 609                if ((perf->states[i].transition_latency * 1000) >
 610                    policy->cpuinfo.transition_latency)
 611                        policy->cpuinfo.transition_latency =
 612                            perf->states[i].transition_latency * 1000;
 613        }
 614
 615        /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
 616        if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
 617            policy->cpuinfo.transition_latency > 20 * 1000) {
 618                policy->cpuinfo.transition_latency = 20 * 1000;
 619                printk_once(KERN_INFO
 620                            "P-state transition latency capped at 20 uS\n");
 621        }
 622
 623        /* table init */
 624        for (i = 0; i < perf->state_count; i++) {
 625                if (i > 0 && perf->states[i].core_frequency >=
 626                    data->freq_table[valid_states-1].frequency / 1000)
 627                        continue;
 628
 629                data->freq_table[valid_states].index = i;
 630                data->freq_table[valid_states].frequency =
 631                    perf->states[i].core_frequency * 1000;
 632                valid_states++;
 633        }
 634        data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
 635        perf->state = 0;
 636
 637        result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
 638        if (result)
 639                goto err_freqfree;
 640
 641        if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
 642                printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
 643
 644        switch (perf->control_register.space_id) {
 645        case ACPI_ADR_SPACE_SYSTEM_IO:
 646                /* Current speed is unknown and not detectable by IO port */
 647                policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
 648                break;
 649        case ACPI_ADR_SPACE_FIXED_HARDWARE:
 650                acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
 651                policy->cur = get_cur_freq_on_cpu(cpu);
 652                break;
 653        default:
 654                break;
 655        }
 656
 657        /* notify BIOS that we exist */
 658        acpi_processor_notify_smm(THIS_MODULE);
 659
 660        /* Check for APERF/MPERF support in hardware */
 661        if (cpu_has(c, X86_FEATURE_APERFMPERF))
 662                acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
 663
 664        dprintk("CPU%u - ACPI performance management activated.\n", cpu);
 665        for (i = 0; i < perf->state_count; i++)
 666                dprintk("     %cP%d: %d MHz, %d mW, %d uS\n",
 667                        (i == perf->state ? '*' : ' '), i,
 668                        (u32) perf->states[i].core_frequency,
 669                        (u32) perf->states[i].power,
 670                        (u32) perf->states[i].transition_latency);
 671
 672        cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
 673
 674        /*
 675         * the first call to ->target() should result in us actually
 676         * writing something to the appropriate registers.
 677         */
 678        data->resume = 1;
 679
 680        return result;
 681
 682err_freqfree:
 683        kfree(data->freq_table);
 684err_unreg:
 685        acpi_processor_unregister_performance(perf, cpu);
 686err_free:
 687        kfree(data);
 688        per_cpu(acfreq_data, cpu) = NULL;
 689
 690        return result;
 691}
 692
 693static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 694{
 695        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 696
 697        dprintk("acpi_cpufreq_cpu_exit\n");
 698
 699        if (data) {
 700                cpufreq_frequency_table_put_attr(policy->cpu);
 701                per_cpu(acfreq_data, policy->cpu) = NULL;
 702                acpi_processor_unregister_performance(data->acpi_data,
 703                                                      policy->cpu);
 704                kfree(data->freq_table);
 705                kfree(data);
 706        }
 707
 708        return 0;
 709}
 710
 711static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 712{
 713        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 714
 715        dprintk("acpi_cpufreq_resume\n");
 716
 717        data->resume = 1;
 718
 719        return 0;
 720}
 721
 722static struct freq_attr *acpi_cpufreq_attr[] = {
 723        &cpufreq_freq_attr_scaling_available_freqs,
 724        NULL,
 725};
 726
 727static struct cpufreq_driver acpi_cpufreq_driver = {
 728        .verify         = acpi_cpufreq_verify,
 729        .target         = acpi_cpufreq_target,
 730        .bios_limit     = acpi_processor_get_bios_limit,
 731        .init           = acpi_cpufreq_cpu_init,
 732        .exit           = acpi_cpufreq_cpu_exit,
 733        .resume         = acpi_cpufreq_resume,
 734        .name           = "acpi-cpufreq",
 735        .owner          = THIS_MODULE,
 736        .attr           = acpi_cpufreq_attr,
 737};
 738
 739static int __init acpi_cpufreq_init(void)
 740{
 741        int ret;
 742
 743        if (acpi_disabled)
 744                return 0;
 745
 746        dprintk("acpi_cpufreq_init\n");
 747
 748        ret = acpi_cpufreq_early_init();
 749        if (ret)
 750                return ret;
 751
 752        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
 753        if (ret)
 754                free_acpi_perf_data();
 755
 756        return ret;
 757}
 758
 759static void __exit acpi_cpufreq_exit(void)
 760{
 761        dprintk("acpi_cpufreq_exit\n");
 762
 763        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 764
 765        free_percpu(acpi_perf_data);
 766}
 767
 768module_param(acpi_pstate_strict, uint, 0644);
 769MODULE_PARM_DESC(acpi_pstate_strict,
 770        "value 0 or non-zero. non-zero -> strict ACPI checks are "
 771        "performed during frequency changes.");
 772
 773late_initcall(acpi_cpufreq_init);
 774module_exit(acpi_cpufreq_exit);
 775
 776MODULE_ALIAS("acpi");
 777