linux/drivers/acpi/acpi_processor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * acpi_processor.c - ACPI processor enumeration support
   4 *
   5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7 * Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
   8 * Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 * Copyright (C) 2013, Intel Corporation
  10 *                     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  11 */
  12
  13#include <linux/acpi.h>
  14#include <linux/device.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/pci.h>
  18
  19#include <acpi/processor.h>
  20
  21#include <asm/cpu.h>
  22
  23#include "internal.h"
  24
  25DEFINE_PER_CPU(struct acpi_processor *, processors);
  26EXPORT_PER_CPU_SYMBOL(processors);
  27
  28/* Errata Handling */
  29struct acpi_processor_errata errata __read_mostly;
  30EXPORT_SYMBOL_GPL(errata);
  31
  32static int acpi_processor_errata_piix4(struct pci_dev *dev)
  33{
  34        u8 value1 = 0;
  35        u8 value2 = 0;
  36
  37
  38        if (!dev)
  39                return -EINVAL;
  40
  41        /*
  42         * Note that 'dev' references the PIIX4 ACPI Controller.
  43         */
  44
  45        switch (dev->revision) {
  46        case 0:
  47                dev_dbg(&dev->dev, "Found PIIX4 A-step\n");
  48                break;
  49        case 1:
  50                dev_dbg(&dev->dev, "Found PIIX4 B-step\n");
  51                break;
  52        case 2:
  53                dev_dbg(&dev->dev, "Found PIIX4E\n");
  54                break;
  55        case 3:
  56                dev_dbg(&dev->dev, "Found PIIX4M\n");
  57                break;
  58        default:
  59                dev_dbg(&dev->dev, "Found unknown PIIX4\n");
  60                break;
  61        }
  62
  63        switch (dev->revision) {
  64
  65        case 0:         /* PIIX4 A-step */
  66        case 1:         /* PIIX4 B-step */
  67                /*
  68                 * See specification changes #13 ("Manual Throttle Duty Cycle")
  69                 * and #14 ("Enabling and Disabling Manual Throttle"), plus
  70                 * erratum #5 ("STPCLK# Deassertion Time") from the January
  71                 * 2002 PIIX4 specification update.  Applies to only older
  72                 * PIIX4 models.
  73                 */
  74                errata.piix4.throttle = 1;
  75                fallthrough;
  76
  77        case 2:         /* PIIX4E */
  78        case 3:         /* PIIX4M */
  79                /*
  80                 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
  81                 * Livelock") from the January 2002 PIIX4 specification update.
  82                 * Applies to all PIIX4 models.
  83                 */
  84
  85                /*
  86                 * BM-IDE
  87                 * ------
  88                 * Find the PIIX4 IDE Controller and get the Bus Master IDE
  89                 * Status register address.  We'll use this later to read
  90                 * each IDE controller's DMA status to make sure we catch all
  91                 * DMA activity.
  92                 */
  93                dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  94                                     PCI_DEVICE_ID_INTEL_82371AB,
  95                                     PCI_ANY_ID, PCI_ANY_ID, NULL);
  96                if (dev) {
  97                        errata.piix4.bmisx = pci_resource_start(dev, 4);
  98                        pci_dev_put(dev);
  99                }
 100
 101                /*
 102                 * Type-F DMA
 103                 * ----------
 104                 * Find the PIIX4 ISA Controller and read the Motherboard
 105                 * DMA controller's status to see if Type-F (Fast) DMA mode
 106                 * is enabled (bit 7) on either channel.  Note that we'll
 107                 * disable C3 support if this is enabled, as some legacy
 108                 * devices won't operate well if fast DMA is disabled.
 109                 */
 110                dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
 111                                     PCI_DEVICE_ID_INTEL_82371AB_0,
 112                                     PCI_ANY_ID, PCI_ANY_ID, NULL);
 113                if (dev) {
 114                        pci_read_config_byte(dev, 0x76, &value1);
 115                        pci_read_config_byte(dev, 0x77, &value2);
 116                        if ((value1 & 0x80) || (value2 & 0x80))
 117                                errata.piix4.fdma = 1;
 118                        pci_dev_put(dev);
 119                }
 120
 121                break;
 122        }
 123
 124        if (errata.piix4.bmisx)
 125                dev_dbg(&dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
 126        if (errata.piix4.fdma)
 127                dev_dbg(&dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
 128
 129        return 0;
 130}
 131
 132static int acpi_processor_errata(void)
 133{
 134        int result = 0;
 135        struct pci_dev *dev = NULL;
 136
 137        /*
 138         * PIIX4
 139         */
 140        dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
 141                             PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
 142                             PCI_ANY_ID, NULL);
 143        if (dev) {
 144                result = acpi_processor_errata_piix4(dev);
 145                pci_dev_put(dev);
 146        }
 147
 148        return result;
 149}
 150
 151/* Initialization */
 152#ifdef CONFIG_ACPI_HOTPLUG_CPU
 153int __weak acpi_map_cpu(acpi_handle handle,
 154                phys_cpuid_t physid, u32 acpi_id, int *pcpu)
 155{
 156        return -ENODEV;
 157}
 158
 159int __weak acpi_unmap_cpu(int cpu)
 160{
 161        return -ENODEV;
 162}
 163
 164int __weak arch_register_cpu(int cpu)
 165{
 166        return -ENODEV;
 167}
 168
 169void __weak arch_unregister_cpu(int cpu) {}
 170
 171static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 172{
 173        unsigned long long sta;
 174        acpi_status status;
 175        int ret;
 176
 177        if (invalid_phys_cpuid(pr->phys_id))
 178                return -ENODEV;
 179
 180        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
 181        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
 182                return -ENODEV;
 183
 184        cpu_maps_update_begin();
 185        cpus_write_lock();
 186
 187        ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
 188        if (ret)
 189                goto out;
 190
 191        ret = arch_register_cpu(pr->id);
 192        if (ret) {
 193                acpi_unmap_cpu(pr->id);
 194                goto out;
 195        }
 196
 197        /*
 198         * CPU got hot-added, but cpu_data is not initialized yet.  Set a flag
 199         * to delay cpu_idle/throttling initialization and do it when the CPU
 200         * gets online for the first time.
 201         */
 202        pr_info("CPU%d has been hot-added\n", pr->id);
 203        pr->flags.need_hotplug_init = 1;
 204
 205out:
 206        cpus_write_unlock();
 207        cpu_maps_update_done();
 208        return ret;
 209}
 210#else
 211static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
 212{
 213        return -ENODEV;
 214}
 215#endif /* CONFIG_ACPI_HOTPLUG_CPU */
 216
 217static int acpi_processor_get_info(struct acpi_device *device)
 218{
 219        union acpi_object object = { 0 };
 220        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 221        struct acpi_processor *pr = acpi_driver_data(device);
 222        int device_declaration = 0;
 223        acpi_status status = AE_OK;
 224        static int cpu0_initialized;
 225        unsigned long long value;
 226
 227        acpi_processor_errata();
 228
 229        /*
 230         * Check to see if we have bus mastering arbitration control.  This
 231         * is required for proper C3 usage (to maintain cache coherency).
 232         */
 233        if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
 234                pr->flags.bm_control = 1;
 235                dev_dbg(&device->dev, "Bus mastering arbitration control present\n");
 236        } else
 237                dev_dbg(&device->dev, "No bus mastering arbitration control\n");
 238
 239        if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
 240                /* Declared with "Processor" statement; match ProcessorID */
 241                status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
 242                if (ACPI_FAILURE(status)) {
 243                        dev_err(&device->dev,
 244                                "Failed to evaluate processor object (0x%x)\n",
 245                                status);
 246                        return -ENODEV;
 247                }
 248
 249                pr->acpi_id = object.processor.proc_id;
 250        } else {
 251                /*
 252                 * Declared with "Device" statement; match _UID.
 253                 */
 254                status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
 255                                                NULL, &value);
 256                if (ACPI_FAILURE(status)) {
 257                        dev_err(&device->dev,
 258                                "Failed to evaluate processor _UID (0x%x)\n",
 259                                status);
 260                        return -ENODEV;
 261                }
 262                device_declaration = 1;
 263                pr->acpi_id = value;
 264        }
 265
 266        if (acpi_duplicate_processor_id(pr->acpi_id)) {
 267                if (pr->acpi_id == 0xff)
 268                        dev_info_once(&device->dev,
 269                                "Entry not well-defined, consider updating BIOS\n");
 270                else
 271                        dev_err(&device->dev,
 272                                "Failed to get unique processor _UID (0x%x)\n",
 273                                pr->acpi_id);
 274                return -ENODEV;
 275        }
 276
 277        pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
 278                                        pr->acpi_id);
 279        if (invalid_phys_cpuid(pr->phys_id))
 280                dev_dbg(&device->dev, "Failed to get CPU physical ID.\n");
 281
 282        pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
 283        if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
 284                cpu0_initialized = 1;
 285                /*
 286                 * Handle UP system running SMP kernel, with no CPU
 287                 * entry in MADT
 288                 */
 289                if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
 290                        pr->id = 0;
 291        }
 292
 293        /*
 294         *  Extra Processor objects may be enumerated on MP systems with
 295         *  less than the max # of CPUs. They should be ignored _iff
 296         *  they are physically not present.
 297         *
 298         *  NOTE: Even if the processor has a cpuid, it may not be present
 299         *  because cpuid <-> apicid mapping is persistent now.
 300         */
 301        if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
 302                int ret = acpi_processor_hotadd_init(pr);
 303
 304                if (ret)
 305                        return ret;
 306        }
 307
 308        /*
 309         * On some boxes several processors use the same processor bus id.
 310         * But they are located in different scope. For example:
 311         * \_SB.SCK0.CPU0
 312         * \_SB.SCK1.CPU0
 313         * Rename the processor device bus id. And the new bus id will be
 314         * generated as the following format:
 315         * CPU+CPU ID.
 316         */
 317        sprintf(acpi_device_bid(device), "CPU%X", pr->id);
 318        dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id);
 319
 320        if (!object.processor.pblk_address)
 321                dev_dbg(&device->dev, "No PBLK (NULL address)\n");
 322        else if (object.processor.pblk_length != 6)
 323                dev_err(&device->dev, "Invalid PBLK length [%d]\n",
 324                            object.processor.pblk_length);
 325        else {
 326                pr->throttling.address = object.processor.pblk_address;
 327                pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
 328                pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 329
 330                pr->pblk = object.processor.pblk_address;
 331        }
 332
 333        /*
 334         * If ACPI describes a slot number for this CPU, we can use it to
 335         * ensure we get the right value in the "physical id" field
 336         * of /proc/cpuinfo
 337         */
 338        status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
 339        if (ACPI_SUCCESS(status))
 340                arch_fix_phys_package_id(pr->id, value);
 341
 342        return 0;
 343}
 344
 345/*
 346 * Do not put anything in here which needs the core to be online.
 347 * For example MSR access or setting up things which check for cpuinfo_x86
 348 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
 349 * Such things have to be put in and set up by the processor driver's .probe().
 350 */
 351static DEFINE_PER_CPU(void *, processor_device_array);
 352
 353static int acpi_processor_add(struct acpi_device *device,
 354                                        const struct acpi_device_id *id)
 355{
 356        struct acpi_processor *pr;
 357        struct device *dev;
 358        int result = 0;
 359
 360        pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
 361        if (!pr)
 362                return -ENOMEM;
 363
 364        if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
 365                result = -ENOMEM;
 366                goto err_free_pr;
 367        }
 368
 369        pr->handle = device->handle;
 370        strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
 371        strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
 372        device->driver_data = pr;
 373
 374        result = acpi_processor_get_info(device);
 375        if (result) /* Processor is not physically present or unavailable */
 376                return 0;
 377
 378        BUG_ON(pr->id >= nr_cpu_ids);
 379
 380        /*
 381         * Buggy BIOS check.
 382         * ACPI id of processors can be reported wrongly by the BIOS.
 383         * Don't trust it blindly
 384         */
 385        if (per_cpu(processor_device_array, pr->id) != NULL &&
 386            per_cpu(processor_device_array, pr->id) != device) {
 387                dev_warn(&device->dev,
 388                        "BIOS reported wrong ACPI id %d for the processor\n",
 389                        pr->id);
 390                /* Give up, but do not abort the namespace scan. */
 391                goto err;
 392        }
 393        /*
 394         * processor_device_array is not cleared on errors to allow buggy BIOS
 395         * checks.
 396         */
 397        per_cpu(processor_device_array, pr->id) = device;
 398        per_cpu(processors, pr->id) = pr;
 399
 400        dev = get_cpu_device(pr->id);
 401        if (!dev) {
 402                result = -ENODEV;
 403                goto err;
 404        }
 405
 406        result = acpi_bind_one(dev, device);
 407        if (result)
 408                goto err;
 409
 410        pr->dev = dev;
 411
 412        /* Trigger the processor driver's .probe() if present. */
 413        if (device_attach(dev) >= 0)
 414                return 1;
 415
 416        dev_err(dev, "Processor driver could not be attached\n");
 417        acpi_unbind_one(dev);
 418
 419 err:
 420        free_cpumask_var(pr->throttling.shared_cpu_map);
 421        device->driver_data = NULL;
 422        per_cpu(processors, pr->id) = NULL;
 423 err_free_pr:
 424        kfree(pr);
 425        return result;
 426}
 427
 428#ifdef CONFIG_ACPI_HOTPLUG_CPU
 429/* Removal */
 430static void acpi_processor_remove(struct acpi_device *device)
 431{
 432        struct acpi_processor *pr;
 433
 434        if (!device || !acpi_driver_data(device))
 435                return;
 436
 437        pr = acpi_driver_data(device);
 438        if (pr->id >= nr_cpu_ids)
 439                goto out;
 440
 441        /*
 442         * The only reason why we ever get here is CPU hot-removal.  The CPU is
 443         * already offline and the ACPI device removal locking prevents it from
 444         * being put back online at this point.
 445         *
 446         * Unbind the driver from the processor device and detach it from the
 447         * ACPI companion object.
 448         */
 449        device_release_driver(pr->dev);
 450        acpi_unbind_one(pr->dev);
 451
 452        /* Clean up. */
 453        per_cpu(processor_device_array, pr->id) = NULL;
 454        per_cpu(processors, pr->id) = NULL;
 455
 456        cpu_maps_update_begin();
 457        cpus_write_lock();
 458
 459        /* Remove the CPU. */
 460        arch_unregister_cpu(pr->id);
 461        acpi_unmap_cpu(pr->id);
 462
 463        cpus_write_unlock();
 464        cpu_maps_update_done();
 465
 466        try_offline_node(cpu_to_node(pr->id));
 467
 468 out:
 469        free_cpumask_var(pr->throttling.shared_cpu_map);
 470        kfree(pr);
 471}
 472#endif /* CONFIG_ACPI_HOTPLUG_CPU */
 473
 474#ifdef CONFIG_X86
 475static bool acpi_hwp_native_thermal_lvt_set;
 476static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
 477                                                          u32 lvl,
 478                                                          void *context,
 479                                                          void **rv)
 480{
 481        u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
 482        u32 capbuf[2];
 483        struct acpi_osc_context osc_context = {
 484                .uuid_str = sb_uuid_str,
 485                .rev = 1,
 486                .cap.length = 8,
 487                .cap.pointer = capbuf,
 488        };
 489
 490        if (acpi_hwp_native_thermal_lvt_set)
 491                return AE_CTRL_TERMINATE;
 492
 493        capbuf[0] = 0x0000;
 494        capbuf[1] = 0x1000; /* set bit 12 */
 495
 496        if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
 497                if (osc_context.ret.pointer && osc_context.ret.length > 1) {
 498                        u32 *capbuf_ret = osc_context.ret.pointer;
 499
 500                        if (capbuf_ret[1] & 0x1000) {
 501                                acpi_handle_info(handle,
 502                                        "_OSC native thermal LVT Acked\n");
 503                                acpi_hwp_native_thermal_lvt_set = true;
 504                        }
 505                }
 506                kfree(osc_context.ret.pointer);
 507        }
 508
 509        return AE_OK;
 510}
 511
 512void __init acpi_early_processor_osc(void)
 513{
 514        if (boot_cpu_has(X86_FEATURE_HWP)) {
 515                acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
 516                                    ACPI_UINT32_MAX,
 517                                    acpi_hwp_native_thermal_lvt_osc,
 518                                    NULL, NULL, NULL);
 519                acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
 520                                 acpi_hwp_native_thermal_lvt_osc,
 521                                 NULL, NULL);
 522        }
 523}
 524#endif
 525
 526/*
 527 * The following ACPI IDs are known to be suitable for representing as
 528 * processor devices.
 529 */
 530static const struct acpi_device_id processor_device_ids[] = {
 531
 532        { ACPI_PROCESSOR_OBJECT_HID, },
 533        { ACPI_PROCESSOR_DEVICE_HID, },
 534
 535        { }
 536};
 537
 538static struct acpi_scan_handler processor_handler = {
 539        .ids = processor_device_ids,
 540        .attach = acpi_processor_add,
 541#ifdef CONFIG_ACPI_HOTPLUG_CPU
 542        .detach = acpi_processor_remove,
 543#endif
 544        .hotplug = {
 545                .enabled = true,
 546        },
 547};
 548
 549static int acpi_processor_container_attach(struct acpi_device *dev,
 550                                           const struct acpi_device_id *id)
 551{
 552        return 1;
 553}
 554
 555static const struct acpi_device_id processor_container_ids[] = {
 556        { ACPI_PROCESSOR_CONTAINER_HID, },
 557        { }
 558};
 559
 560static struct acpi_scan_handler processor_container_handler = {
 561        .ids = processor_container_ids,
 562        .attach = acpi_processor_container_attach,
 563};
 564
 565/* The number of the unique processor IDs */
 566static int nr_unique_ids __initdata;
 567
 568/* The number of the duplicate processor IDs */
 569static int nr_duplicate_ids;
 570
 571/* Used to store the unique processor IDs */
 572static int unique_processor_ids[] __initdata = {
 573        [0 ... NR_CPUS - 1] = -1,
 574};
 575
 576/* Used to store the duplicate processor IDs */
 577static int duplicate_processor_ids[] = {
 578        [0 ... NR_CPUS - 1] = -1,
 579};
 580
 581static void __init processor_validated_ids_update(int proc_id)
 582{
 583        int i;
 584
 585        if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
 586                return;
 587
 588        /*
 589         * Firstly, compare the proc_id with duplicate IDs, if the proc_id is
 590         * already in the IDs, do nothing.
 591         */
 592        for (i = 0; i < nr_duplicate_ids; i++) {
 593                if (duplicate_processor_ids[i] == proc_id)
 594                        return;
 595        }
 596
 597        /*
 598         * Secondly, compare the proc_id with unique IDs, if the proc_id is in
 599         * the IDs, put it in the duplicate IDs.
 600         */
 601        for (i = 0; i < nr_unique_ids; i++) {
 602                if (unique_processor_ids[i] == proc_id) {
 603                        duplicate_processor_ids[nr_duplicate_ids] = proc_id;
 604                        nr_duplicate_ids++;
 605                        return;
 606                }
 607        }
 608
 609        /*
 610         * Lastly, the proc_id is a unique ID, put it in the unique IDs.
 611         */
 612        unique_processor_ids[nr_unique_ids] = proc_id;
 613        nr_unique_ids++;
 614}
 615
 616static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
 617                                                  u32 lvl,
 618                                                  void *context,
 619                                                  void **rv)
 620{
 621        acpi_status status;
 622        acpi_object_type acpi_type;
 623        unsigned long long uid;
 624        union acpi_object object = { 0 };
 625        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
 626
 627        status = acpi_get_type(handle, &acpi_type);
 628        if (ACPI_FAILURE(status))
 629                return status;
 630
 631        switch (acpi_type) {
 632        case ACPI_TYPE_PROCESSOR:
 633                status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
 634                if (ACPI_FAILURE(status))
 635                        goto err;
 636                uid = object.processor.proc_id;
 637                break;
 638
 639        case ACPI_TYPE_DEVICE:
 640                status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
 641                if (ACPI_FAILURE(status))
 642                        goto err;
 643                break;
 644        default:
 645                goto err;
 646        }
 647
 648        processor_validated_ids_update(uid);
 649        return AE_OK;
 650
 651err:
 652        /* Exit on error, but don't abort the namespace walk */
 653        acpi_handle_info(handle, "Invalid processor object\n");
 654        return AE_OK;
 655
 656}
 657
 658static void __init acpi_processor_check_duplicates(void)
 659{
 660        /* check the correctness for all processors in ACPI namespace */
 661        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
 662                                                ACPI_UINT32_MAX,
 663                                                acpi_processor_ids_walk,
 664                                                NULL, NULL, NULL);
 665        acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
 666                                                NULL, NULL);
 667}
 668
 669bool acpi_duplicate_processor_id(int proc_id)
 670{
 671        int i;
 672
 673        /*
 674         * compare the proc_id with duplicate IDs, if the proc_id is already
 675         * in the duplicate IDs, return true, otherwise, return false.
 676         */
 677        for (i = 0; i < nr_duplicate_ids; i++) {
 678                if (duplicate_processor_ids[i] == proc_id)
 679                        return true;
 680        }
 681        return false;
 682}
 683
 684void __init acpi_processor_init(void)
 685{
 686        acpi_processor_check_duplicates();
 687        acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
 688        acpi_scan_add_handler(&processor_container_handler);
 689}
 690
 691#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
 692/**
 693 * acpi_processor_claim_cst_control - Request _CST control from the platform.
 694 */
 695bool acpi_processor_claim_cst_control(void)
 696{
 697        static bool cst_control_claimed;
 698        acpi_status status;
 699
 700        if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
 701                return true;
 702
 703        status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
 704                                    acpi_gbl_FADT.cst_control, 8);
 705        if (ACPI_FAILURE(status)) {
 706                pr_warn("ACPI: Failed to claim processor _CST control\n");
 707                return false;
 708        }
 709
 710        cst_control_claimed = true;
 711        return true;
 712}
 713EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
 714
 715/**
 716 * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
 717 * @handle: ACPI handle of the processor object containing the _CST.
 718 * @cpu: The numeric ID of the target CPU.
 719 * @info: Object write the C-states information into.
 720 *
 721 * Extract the C-state information for the given CPU from the output of the _CST
 722 * control method under the corresponding ACPI processor object (or processor
 723 * device object) and populate @info with it.
 724 *
 725 * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
 726 * acpi_processor_ffh_cstate_probe() to verify them and update the
 727 * cpu_cstate_entry data for @cpu.
 728 */
 729int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
 730                                struct acpi_processor_power *info)
 731{
 732        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 733        union acpi_object *cst;
 734        acpi_status status;
 735        u64 count;
 736        int last_index = 0;
 737        int i, ret = 0;
 738
 739        status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
 740        if (ACPI_FAILURE(status)) {
 741                acpi_handle_debug(handle, "No _CST\n");
 742                return -ENODEV;
 743        }
 744
 745        cst = buffer.pointer;
 746
 747        /* There must be at least 2 elements. */
 748        if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
 749                acpi_handle_warn(handle, "Invalid _CST output\n");
 750                ret = -EFAULT;
 751                goto end;
 752        }
 753
 754        count = cst->package.elements[0].integer.value;
 755
 756        /* Validate the number of C-states. */
 757        if (count < 1 || count != cst->package.count - 1) {
 758                acpi_handle_warn(handle, "Inconsistent _CST data\n");
 759                ret = -EFAULT;
 760                goto end;
 761        }
 762
 763        for (i = 1; i <= count; i++) {
 764                union acpi_object *element;
 765                union acpi_object *obj;
 766                struct acpi_power_register *reg;
 767                struct acpi_processor_cx cx;
 768
 769                /*
 770                 * If there is not enough space for all C-states, skip the
 771                 * excess ones and log a warning.
 772                 */
 773                if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
 774                        acpi_handle_warn(handle,
 775                                         "No room for more idle states (limit: %d)\n",
 776                                         ACPI_PROCESSOR_MAX_POWER - 1);
 777                        break;
 778                }
 779
 780                memset(&cx, 0, sizeof(cx));
 781
 782                element = &cst->package.elements[i];
 783                if (element->type != ACPI_TYPE_PACKAGE) {
 784                        acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
 785                                         i, element->type);
 786                        continue;
 787                }
 788
 789                if (element->package.count != 4) {
 790                        acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
 791                                         i, element->package.count);
 792                        continue;
 793                }
 794
 795                obj = &element->package.elements[0];
 796
 797                if (obj->type != ACPI_TYPE_BUFFER) {
 798                        acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
 799                                         i, obj->type);
 800                        continue;
 801                }
 802
 803                reg = (struct acpi_power_register *)obj->buffer.pointer;
 804
 805                obj = &element->package.elements[1];
 806                if (obj->type != ACPI_TYPE_INTEGER) {
 807                        acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
 808                                         i, obj->type);
 809                        continue;
 810                }
 811
 812                cx.type = obj->integer.value;
 813                /*
 814                 * There are known cases in which the _CST output does not
 815                 * contain C1, so if the type of the first state found is not
 816                 * C1, leave an empty slot for C1 to be filled in later.
 817                 */
 818                if (i == 1 && cx.type != ACPI_STATE_C1)
 819                        last_index = 1;
 820
 821                cx.address = reg->address;
 822                cx.index = last_index + 1;
 823
 824                if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
 825                        if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
 826                                /*
 827                                 * In the majority of cases _CST describes C1 as
 828                                 * a FIXED_HARDWARE C-state, but if the command
 829                                 * line forbids using MWAIT, use CSTATE_HALT for
 830                                 * C1 regardless.
 831                                 */
 832                                if (cx.type == ACPI_STATE_C1 &&
 833                                    boot_option_idle_override == IDLE_NOMWAIT) {
 834                                        cx.entry_method = ACPI_CSTATE_HALT;
 835                                        snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 836                                } else {
 837                                        cx.entry_method = ACPI_CSTATE_FFH;
 838                                }
 839                        } else if (cx.type == ACPI_STATE_C1) {
 840                                /*
 841                                 * In the special case of C1, FIXED_HARDWARE can
 842                                 * be handled by executing the HLT instruction.
 843                                 */
 844                                cx.entry_method = ACPI_CSTATE_HALT;
 845                                snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
 846                        } else {
 847                                acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
 848                                                 i);
 849                                continue;
 850                        }
 851                } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
 852                        cx.entry_method = ACPI_CSTATE_SYSTEMIO;
 853                        snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
 854                                 cx.address);
 855                } else {
 856                        acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
 857                                         i, reg->space_id);
 858                        continue;
 859                }
 860
 861                if (cx.type == ACPI_STATE_C1)
 862                        cx.valid = 1;
 863
 864                obj = &element->package.elements[2];
 865                if (obj->type != ACPI_TYPE_INTEGER) {
 866                        acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
 867                                         i, obj->type);
 868                        continue;
 869                }
 870
 871                cx.latency = obj->integer.value;
 872
 873                obj = &element->package.elements[3];
 874                if (obj->type != ACPI_TYPE_INTEGER) {
 875                        acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
 876                                         i, obj->type);
 877                        continue;
 878                }
 879
 880                memcpy(&info->states[++last_index], &cx, sizeof(cx));
 881        }
 882
 883        acpi_handle_info(handle, "Found %d idle states\n", last_index);
 884
 885        info->count = last_index;
 886
 887end:
 888        kfree(buffer.pointer);
 889
 890        return ret;
 891}
 892EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
 893#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
 894