linux/drivers/cpufreq/acpi-cpufreq.c
<<
>>
Prefs
   1/*
   2 * acpi-cpufreq.c - ACPI Processor P-States Driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
   8 *
   9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  10 *
  11 *  This program is free software; you can redistribute it and/or modify
  12 *  it under the terms of the GNU General Public License as published by
  13 *  the Free Software Foundation; either version 2 of the License, or (at
  14 *  your option) any later version.
  15 *
  16 *  This program is distributed in the hope that it will be useful, but
  17 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  18 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 *  General Public License for more details.
  20 *
  21 *  You should have received a copy of the GNU General Public License along
  22 *  with this program; if not, write to the Free Software Foundation, Inc.,
  23 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  24 *
  25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/init.h>
  31#include <linux/smp.h>
  32#include <linux/sched.h>
  33#include <linux/cpufreq.h>
  34#include <linux/compiler.h>
  35#include <linux/dmi.h>
  36#include <linux/slab.h>
  37
  38#include <linux/acpi.h>
  39#include <linux/io.h>
  40#include <linux/delay.h>
  41#include <linux/uaccess.h>
  42
  43#include <acpi/processor.h>
  44
  45#include <asm/msr.h>
  46#include <asm/processor.h>
  47#include <asm/cpufeature.h>
  48#include "mperf.h"
  49
  50MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
  51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
  52MODULE_LICENSE("GPL");
  53
  54enum {
  55        UNDEFINED_CAPABLE = 0,
  56        SYSTEM_INTEL_MSR_CAPABLE,
  57        SYSTEM_IO_CAPABLE,
  58};
  59
  60#define INTEL_MSR_RANGE         (0xffff)
  61
  62struct acpi_cpufreq_data {
  63        struct acpi_processor_performance *acpi_data;
  64        struct cpufreq_frequency_table *freq_table;
  65        unsigned int resume;
  66        unsigned int cpu_feature;
  67};
  68
  69static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
  70
  71/* acpi_perf_data is a pointer to percpu data. */
  72static struct acpi_processor_performance __percpu *acpi_perf_data;
  73
  74static struct cpufreq_driver acpi_cpufreq_driver;
  75
  76static unsigned int acpi_pstate_strict;
  77
  78static int check_est_cpu(unsigned int cpuid)
  79{
  80        struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
  81
  82        return cpu_has(cpu, X86_FEATURE_EST);
  83}
  84
  85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
  86{
  87        struct acpi_processor_performance *perf;
  88        int i;
  89
  90        perf = data->acpi_data;
  91
  92        for (i = 0; i < perf->state_count; i++) {
  93                if (value == perf->states[i].status)
  94                        return data->freq_table[i].frequency;
  95        }
  96        return 0;
  97}
  98
  99static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
 100{
 101        int i;
 102        struct acpi_processor_performance *perf;
 103
 104        msr &= INTEL_MSR_RANGE;
 105        perf = data->acpi_data;
 106
 107        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
 108                if (msr == perf->states[data->freq_table[i].index].status)
 109                        return data->freq_table[i].frequency;
 110        }
 111        return data->freq_table[0].frequency;
 112}
 113
 114static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
 115{
 116        switch (data->cpu_feature) {
 117        case SYSTEM_INTEL_MSR_CAPABLE:
 118                return extract_msr(val, data);
 119        case SYSTEM_IO_CAPABLE:
 120                return extract_io(val, data);
 121        default:
 122                return 0;
 123        }
 124}
 125
 126struct msr_addr {
 127        u32 reg;
 128};
 129
 130struct io_addr {
 131        u16 port;
 132        u8 bit_width;
 133};
 134
 135struct drv_cmd {
 136        unsigned int type;
 137        const struct cpumask *mask;
 138        union {
 139                struct msr_addr msr;
 140                struct io_addr io;
 141        } addr;
 142        u32 val;
 143};
 144
 145/* Called via smp_call_function_single(), on the target CPU */
 146static void do_drv_read(void *_cmd)
 147{
 148        struct drv_cmd *cmd = _cmd;
 149        u32 h;
 150
 151        switch (cmd->type) {
 152        case SYSTEM_INTEL_MSR_CAPABLE:
 153                rdmsr(cmd->addr.msr.reg, cmd->val, h);
 154                break;
 155        case SYSTEM_IO_CAPABLE:
 156                acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
 157                                &cmd->val,
 158                                (u32)cmd->addr.io.bit_width);
 159                break;
 160        default:
 161                break;
 162        }
 163}
 164
 165/* Called via smp_call_function_many(), on the target CPUs */
 166static void do_drv_write(void *_cmd)
 167{
 168        struct drv_cmd *cmd = _cmd;
 169        u32 lo, hi;
 170
 171        switch (cmd->type) {
 172        case SYSTEM_INTEL_MSR_CAPABLE:
 173                rdmsr(cmd->addr.msr.reg, lo, hi);
 174                lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
 175                wrmsr(cmd->addr.msr.reg, lo, hi);
 176                break;
 177        case SYSTEM_IO_CAPABLE:
 178                acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
 179                                cmd->val,
 180                                (u32)cmd->addr.io.bit_width);
 181                break;
 182        default:
 183                break;
 184        }
 185}
 186
 187static void drv_read(struct drv_cmd *cmd)
 188{
 189        int err;
 190        cmd->val = 0;
 191
 192        err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
 193        WARN_ON_ONCE(err);      /* smp_call_function_any() was buggy? */
 194}
 195
 196static void drv_write(struct drv_cmd *cmd)
 197{
 198        int this_cpu;
 199
 200        this_cpu = get_cpu();
 201        if (cpumask_test_cpu(this_cpu, cmd->mask))
 202                do_drv_write(cmd);
 203        smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
 204        put_cpu();
 205}
 206
 207static u32 get_cur_val(const struct cpumask *mask)
 208{
 209        struct acpi_processor_performance *perf;
 210        struct drv_cmd cmd;
 211
 212        if (unlikely(cpumask_empty(mask)))
 213                return 0;
 214
 215        switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
 216        case SYSTEM_INTEL_MSR_CAPABLE:
 217                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 218                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
 219                break;
 220        case SYSTEM_IO_CAPABLE:
 221                cmd.type = SYSTEM_IO_CAPABLE;
 222                perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
 223                cmd.addr.io.port = perf->control_register.address;
 224                cmd.addr.io.bit_width = perf->control_register.bit_width;
 225                break;
 226        default:
 227                return 0;
 228        }
 229
 230        cmd.mask = mask;
 231        drv_read(&cmd);
 232
 233        pr_debug("get_cur_val = %u\n", cmd.val);
 234
 235        return cmd.val;
 236}
 237
 238static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 239{
 240        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
 241        unsigned int freq;
 242        unsigned int cached_freq;
 243
 244        pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
 245
 246        if (unlikely(data == NULL ||
 247                     data->acpi_data == NULL || data->freq_table == NULL)) {
 248                return 0;
 249        }
 250
 251        cached_freq = data->freq_table[data->acpi_data->state].frequency;
 252        freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
 253        if (freq != cached_freq) {
 254                /*
 255                 * The dreaded BIOS frequency change behind our back.
 256                 * Force set the frequency on next target call.
 257                 */
 258                data->resume = 1;
 259        }
 260
 261        pr_debug("cur freq = %u\n", freq);
 262
 263        return freq;
 264}
 265
 266static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
 267                                struct acpi_cpufreq_data *data)
 268{
 269        unsigned int cur_freq;
 270        unsigned int i;
 271
 272        for (i = 0; i < 100; i++) {
 273                cur_freq = extract_freq(get_cur_val(mask), data);
 274                if (cur_freq == freq)
 275                        return 1;
 276                udelay(10);
 277        }
 278        return 0;
 279}
 280
 281static int acpi_cpufreq_target(struct cpufreq_policy *policy,
 282                               unsigned int target_freq, unsigned int relation)
 283{
 284        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 285        struct acpi_processor_performance *perf;
 286        struct cpufreq_freqs freqs;
 287        struct drv_cmd cmd;
 288        unsigned int next_state = 0; /* Index into freq_table */
 289        unsigned int next_perf_state = 0; /* Index into perf table */
 290        unsigned int i;
 291        int result = 0;
 292
 293        pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
 294
 295        if (unlikely(data == NULL ||
 296             data->acpi_data == NULL || data->freq_table == NULL)) {
 297                return -ENODEV;
 298        }
 299
 300        perf = data->acpi_data;
 301        result = cpufreq_frequency_table_target(policy,
 302                                                data->freq_table,
 303                                                target_freq,
 304                                                relation, &next_state);
 305        if (unlikely(result)) {
 306                result = -ENODEV;
 307                goto out;
 308        }
 309
 310        next_perf_state = data->freq_table[next_state].index;
 311        if (perf->state == next_perf_state) {
 312                if (unlikely(data->resume)) {
 313                        pr_debug("Called after resume, resetting to P%d\n",
 314                                next_perf_state);
 315                        data->resume = 0;
 316                } else {
 317                        pr_debug("Already at target state (P%d)\n",
 318                                next_perf_state);
 319                        goto out;
 320                }
 321        }
 322
 323        switch (data->cpu_feature) {
 324        case SYSTEM_INTEL_MSR_CAPABLE:
 325                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 326                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
 327                cmd.val = (u32) perf->states[next_perf_state].control;
 328                break;
 329        case SYSTEM_IO_CAPABLE:
 330                cmd.type = SYSTEM_IO_CAPABLE;
 331                cmd.addr.io.port = perf->control_register.address;
 332                cmd.addr.io.bit_width = perf->control_register.bit_width;
 333                cmd.val = (u32) perf->states[next_perf_state].control;
 334                break;
 335        default:
 336                result = -ENODEV;
 337                goto out;
 338        }
 339
 340        /* cpufreq holds the hotplug lock, so we are safe from here on */
 341        if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
 342                cmd.mask = policy->cpus;
 343        else
 344                cmd.mask = cpumask_of(policy->cpu);
 345
 346        freqs.old = perf->states[perf->state].core_frequency * 1000;
 347        freqs.new = data->freq_table[next_state].frequency;
 348        for_each_cpu(i, policy->cpus) {
 349                freqs.cpu = i;
 350                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 351        }
 352
 353        drv_write(&cmd);
 354
 355        if (acpi_pstate_strict) {
 356                if (!check_freqs(cmd.mask, freqs.new, data)) {
 357                        pr_debug("acpi_cpufreq_target failed (%d)\n",
 358                                policy->cpu);
 359                        result = -EAGAIN;
 360                        goto out;
 361                }
 362        }
 363
 364        for_each_cpu(i, policy->cpus) {
 365                freqs.cpu = i;
 366                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 367        }
 368        perf->state = next_perf_state;
 369
 370out:
 371        return result;
 372}
 373
 374static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
 375{
 376        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 377
 378        pr_debug("acpi_cpufreq_verify\n");
 379
 380        return cpufreq_frequency_table_verify(policy, data->freq_table);
 381}
 382
 383static unsigned long
 384acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 385{
 386        struct acpi_processor_performance *perf = data->acpi_data;
 387
 388        if (cpu_khz) {
 389                /* search the closest match to cpu_khz */
 390                unsigned int i;
 391                unsigned long freq;
 392                unsigned long freqn = perf->states[0].core_frequency * 1000;
 393
 394                for (i = 0; i < (perf->state_count-1); i++) {
 395                        freq = freqn;
 396                        freqn = perf->states[i+1].core_frequency * 1000;
 397                        if ((2 * cpu_khz) > (freqn + freq)) {
 398                                perf->state = i;
 399                                return freq;
 400                        }
 401                }
 402                perf->state = perf->state_count-1;
 403                return freqn;
 404        } else {
 405                /* assume CPU is at P0... */
 406                perf->state = 0;
 407                return perf->states[0].core_frequency * 1000;
 408        }
 409}
 410
 411static void free_acpi_perf_data(void)
 412{
 413        unsigned int i;
 414
 415        /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
 416        for_each_possible_cpu(i)
 417                free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
 418                                 ->shared_cpu_map);
 419        free_percpu(acpi_perf_data);
 420}
 421
 422/*
 423 * acpi_cpufreq_early_init - initialize ACPI P-States library
 424 *
 425 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
 426 * in order to determine correct frequency and voltage pairings. We can
 427 * do _PDC and _PSD and find out the processor dependency for the
 428 * actual init that will happen later...
 429 */
 430static int __init acpi_cpufreq_early_init(void)
 431{
 432        unsigned int i;
 433        pr_debug("acpi_cpufreq_early_init\n");
 434
 435        acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
 436        if (!acpi_perf_data) {
 437                pr_debug("Memory allocation error for acpi_perf_data.\n");
 438                return -ENOMEM;
 439        }
 440        for_each_possible_cpu(i) {
 441                if (!zalloc_cpumask_var_node(
 442                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
 443                        GFP_KERNEL, cpu_to_node(i))) {
 444
 445                        /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
 446                        free_acpi_perf_data();
 447                        return -ENOMEM;
 448                }
 449        }
 450
 451        /* Do initialization in ACPI core */
 452        acpi_processor_preregister_performance(acpi_perf_data);
 453        return 0;
 454}
 455
 456#ifdef CONFIG_SMP
 457/*
 458 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
 459 * or do it in BIOS firmware and won't inform about it to OS. If not
 460 * detected, this has a side effect of making CPU run at a different speed
 461 * than OS intended it to run at. Detect it and handle it cleanly.
 462 */
 463static int bios_with_sw_any_bug;
 464
 465static int sw_any_bug_found(const struct dmi_system_id *d)
 466{
 467        bios_with_sw_any_bug = 1;
 468        return 0;
 469}
 470
 471static const struct dmi_system_id sw_any_bug_dmi_table[] = {
 472        {
 473                .callback = sw_any_bug_found,
 474                .ident = "Supermicro Server X6DLP",
 475                .matches = {
 476                        DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
 477                        DMI_MATCH(DMI_BIOS_VERSION, "080010"),
 478                        DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
 479                },
 480        },
 481        { }
 482};
 483
 484static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
 485{
 486        /* Intel Xeon Processor 7100 Series Specification Update
 487         * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
 488         * AL30: A Machine Check Exception (MCE) Occurring during an
 489         * Enhanced Intel SpeedStep Technology Ratio Change May Cause
 490         * Both Processor Cores to Lock Up. */
 491        if (c->x86_vendor == X86_VENDOR_INTEL) {
 492                if ((c->x86 == 15) &&
 493                    (c->x86_model == 6) &&
 494                    (c->x86_mask == 8)) {
 495                        printk(KERN_INFO "acpi-cpufreq: Intel(R) "
 496                            "Xeon(R) 7100 Errata AL30, processors may "
 497                            "lock up on frequency changes: disabling "
 498                            "acpi-cpufreq.\n");
 499                        return -ENODEV;
 500                    }
 501                }
 502        return 0;
 503}
 504#endif
 505
 506static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
 507{
 508        unsigned int i;
 509        unsigned int valid_states = 0;
 510        unsigned int cpu = policy->cpu;
 511        struct acpi_cpufreq_data *data;
 512        unsigned int result = 0;
 513        struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
 514        struct acpi_processor_performance *perf;
 515#ifdef CONFIG_SMP
 516        static int blacklisted;
 517#endif
 518
 519        pr_debug("acpi_cpufreq_cpu_init\n");
 520
 521#ifdef CONFIG_SMP
 522        if (blacklisted)
 523                return blacklisted;
 524        blacklisted = acpi_cpufreq_blacklist(c);
 525        if (blacklisted)
 526                return blacklisted;
 527#endif
 528
 529        data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
 530        if (!data)
 531                return -ENOMEM;
 532
 533        data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
 534        per_cpu(acfreq_data, cpu) = data;
 535
 536        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
 537                acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
 538
 539        result = acpi_processor_register_performance(data->acpi_data, cpu);
 540        if (result)
 541                goto err_free;
 542
 543        perf = data->acpi_data;
 544        policy->shared_type = perf->shared_type;
 545
 546        /*
 547         * Will let policy->cpus know about dependency only when software
 548         * coordination is required.
 549         */
 550        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
 551            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
 552                cpumask_copy(policy->cpus, perf->shared_cpu_map);
 553        }
 554        cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 555
 556#ifdef CONFIG_SMP
 557        dmi_check_system(sw_any_bug_dmi_table);
 558        if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
 559                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
 560                cpumask_copy(policy->cpus, cpu_core_mask(cpu));
 561        }
 562#endif
 563
 564        /* capability check */
 565        if (perf->state_count <= 1) {
 566                pr_debug("No P-States\n");
 567                result = -ENODEV;
 568                goto err_unreg;
 569        }
 570
 571        if (perf->control_register.space_id != perf->status_register.space_id) {
 572                result = -ENODEV;
 573                goto err_unreg;
 574        }
 575
 576        switch (perf->control_register.space_id) {
 577        case ACPI_ADR_SPACE_SYSTEM_IO:
 578                pr_debug("SYSTEM IO addr space\n");
 579                data->cpu_feature = SYSTEM_IO_CAPABLE;
 580                break;
 581        case ACPI_ADR_SPACE_FIXED_HARDWARE:
 582                pr_debug("HARDWARE addr space\n");
 583                if (!check_est_cpu(cpu)) {
 584                        result = -ENODEV;
 585                        goto err_unreg;
 586                }
 587                data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
 588                break;
 589        default:
 590                pr_debug("Unknown addr space %d\n",
 591                        (u32) (perf->control_register.space_id));
 592                result = -ENODEV;
 593                goto err_unreg;
 594        }
 595
 596        data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
 597                    (perf->state_count+1), GFP_KERNEL);
 598        if (!data->freq_table) {
 599                result = -ENOMEM;
 600                goto err_unreg;
 601        }
 602
 603        /* detect transition latency */
 604        policy->cpuinfo.transition_latency = 0;
 605        for (i = 0; i < perf->state_count; i++) {
 606                if ((perf->states[i].transition_latency * 1000) >
 607                    policy->cpuinfo.transition_latency)
 608                        policy->cpuinfo.transition_latency =
 609                            perf->states[i].transition_latency * 1000;
 610        }
 611
 612        /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
 613        if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
 614            policy->cpuinfo.transition_latency > 20 * 1000) {
 615                policy->cpuinfo.transition_latency = 20 * 1000;
 616                printk_once(KERN_INFO
 617                            "P-state transition latency capped at 20 uS\n");
 618        }
 619
 620        /* table init */
 621        for (i = 0; i < perf->state_count; i++) {
 622                if (i > 0 && perf->states[i].core_frequency >=
 623                    data->freq_table[valid_states-1].frequency / 1000)
 624                        continue;
 625
 626                data->freq_table[valid_states].index = i;
 627                data->freq_table[valid_states].frequency =
 628                    perf->states[i].core_frequency * 1000;
 629                valid_states++;
 630        }
 631        data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
 632        perf->state = 0;
 633
 634        result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
 635        if (result)
 636                goto err_freqfree;
 637
 638        if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
 639                printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
 640
 641        switch (perf->control_register.space_id) {
 642        case ACPI_ADR_SPACE_SYSTEM_IO:
 643                /* Current speed is unknown and not detectable by IO port */
 644                policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
 645                break;
 646        case ACPI_ADR_SPACE_FIXED_HARDWARE:
 647                acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
 648                policy->cur = get_cur_freq_on_cpu(cpu);
 649                break;
 650        default:
 651                break;
 652        }
 653
 654        /* notify BIOS that we exist */
 655        acpi_processor_notify_smm(THIS_MODULE);
 656
 657        /* Check for APERF/MPERF support in hardware */
 658        if (boot_cpu_has(X86_FEATURE_APERFMPERF))
 659                acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
 660
 661        pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
 662        for (i = 0; i < perf->state_count; i++)
 663                pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
 664                        (i == perf->state ? '*' : ' '), i,
 665                        (u32) perf->states[i].core_frequency,
 666                        (u32) perf->states[i].power,
 667                        (u32) perf->states[i].transition_latency);
 668
 669        cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
 670
 671        /*
 672         * the first call to ->target() should result in us actually
 673         * writing something to the appropriate registers.
 674         */
 675        data->resume = 1;
 676
 677        return result;
 678
 679err_freqfree:
 680        kfree(data->freq_table);
 681err_unreg:
 682        acpi_processor_unregister_performance(perf, cpu);
 683err_free:
 684        kfree(data);
 685        per_cpu(acfreq_data, cpu) = NULL;
 686
 687        return result;
 688}
 689
 690static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 691{
 692        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 693
 694        pr_debug("acpi_cpufreq_cpu_exit\n");
 695
 696        if (data) {
 697                cpufreq_frequency_table_put_attr(policy->cpu);
 698                per_cpu(acfreq_data, policy->cpu) = NULL;
 699                acpi_processor_unregister_performance(data->acpi_data,
 700                                                      policy->cpu);
 701                kfree(data->freq_table);
 702                kfree(data);
 703        }
 704
 705        return 0;
 706}
 707
 708static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 709{
 710        struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 711
 712        pr_debug("acpi_cpufreq_resume\n");
 713
 714        data->resume = 1;
 715
 716        return 0;
 717}
 718
 719static struct freq_attr *acpi_cpufreq_attr[] = {
 720        &cpufreq_freq_attr_scaling_available_freqs,
 721        NULL,
 722};
 723
 724static struct cpufreq_driver acpi_cpufreq_driver = {
 725        .verify         = acpi_cpufreq_verify,
 726        .target         = acpi_cpufreq_target,
 727        .bios_limit     = acpi_processor_get_bios_limit,
 728        .init           = acpi_cpufreq_cpu_init,
 729        .exit           = acpi_cpufreq_cpu_exit,
 730        .resume         = acpi_cpufreq_resume,
 731        .name           = "acpi-cpufreq",
 732        .owner          = THIS_MODULE,
 733        .attr           = acpi_cpufreq_attr,
 734};
 735
 736static int __init acpi_cpufreq_init(void)
 737{
 738        int ret;
 739
 740        if (acpi_disabled)
 741                return 0;
 742
 743        pr_debug("acpi_cpufreq_init\n");
 744
 745        ret = acpi_cpufreq_early_init();
 746        if (ret)
 747                return ret;
 748
 749        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
 750        if (ret)
 751                free_acpi_perf_data();
 752
 753        return ret;
 754}
 755
 756static void __exit acpi_cpufreq_exit(void)
 757{
 758        pr_debug("acpi_cpufreq_exit\n");
 759
 760        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 761
 762        free_acpi_perf_data();
 763}
 764
 765module_param(acpi_pstate_strict, uint, 0644);
 766MODULE_PARM_DESC(acpi_pstate_strict,
 767        "value 0 or non-zero. non-zero -> strict ACPI checks are "
 768        "performed during frequency changes.");
 769
 770late_initcall(acpi_cpufreq_init);
 771module_exit(acpi_cpufreq_exit);
 772
 773MODULE_ALIAS("acpi");
 774