linux/drivers/hwmon/coretemp.c
<<
>>
Prefs
   1/*
   2 * coretemp.c - Linux kernel module for hardware monitoring
   3 *
   4 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
   5 *
   6 * Inspired from many hwmon drivers
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; version 2 of the License.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301 USA.
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/slab.h>
  28#include <linux/jiffies.h>
  29#include <linux/hwmon.h>
  30#include <linux/sysfs.h>
  31#include <linux/hwmon-sysfs.h>
  32#include <linux/err.h>
  33#include <linux/mutex.h>
  34#include <linux/list.h>
  35#include <linux/platform_device.h>
  36#include <linux/cpu.h>
  37#include <linux/smp.h>
  38#include <linux/moduleparam.h>
  39#include <linux/pci.h>
  40#include <asm/msr.h>
  41#include <asm/processor.h>
  42#include <asm/cpu_device_id.h>
  43
  44#define DRVNAME "coretemp"
  45
  46/*
  47 * force_tjmax only matters when TjMax can't be read from the CPU itself.
  48 * When set, it replaces the driver's suboptimal heuristic.
  49 */
  50static int force_tjmax;
  51module_param_named(tjmax, force_tjmax, int, 0444);
  52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
  53
  54#define PKG_SYSFS_ATTR_NO       1       /* Sysfs attribute for package temp */
  55#define BASE_SYSFS_ATTR_NO      2       /* Sysfs Base attr no for coretemp */
  56#define NUM_REAL_CORES          128     /* Number of Real cores per cpu */
  57#define CORETEMP_NAME_LENGTH    19      /* String Length of attrs */
  58#define MAX_CORE_ATTRS          4       /* Maximum no of basic attrs */
  59#define TOTAL_ATTRS             (MAX_CORE_ATTRS + 1)
  60#define MAX_CORE_DATA           (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
  61
  62#define TO_CORE_ID(cpu)         (cpu_data(cpu).cpu_core_id)
  63#define TO_ATTR_NO(cpu)         (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
  64
  65#ifdef CONFIG_SMP
  66#define for_each_sibling(i, cpu) \
  67        for_each_cpu(i, topology_sibling_cpumask(cpu))
  68#else
  69#define for_each_sibling(i, cpu)        for (i = 0; false; )
  70#endif
  71
  72/*
  73 * Per-Core Temperature Data
  74 * @last_updated: The time when the current temperature value was updated
  75 *              earlier (in jiffies).
  76 * @cpu_core_id: The CPU Core from which temperature values should be read
  77 *              This value is passed as "id" field to rdmsr/wrmsr functions.
  78 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  79 *              from where the temperature values should be read.
  80 * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
  81 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  82 *              Otherwise, temp_data holds coretemp data.
  83 * @valid: If this is 1, the current temperature is valid.
  84 */
  85struct temp_data {
  86        int temp;
  87        int ttarget;
  88        int tjmax;
  89        unsigned long last_updated;
  90        unsigned int cpu;
  91        u32 cpu_core_id;
  92        u32 status_reg;
  93        int attr_size;
  94        bool is_pkg_data;
  95        bool valid;
  96        struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
  97        char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
  98        struct attribute *attrs[TOTAL_ATTRS + 1];
  99        struct attribute_group attr_group;
 100        struct mutex update_lock;
 101};
 102
 103/* Platform Data per Physical CPU */
 104struct platform_data {
 105        struct device           *hwmon_dev;
 106        u16                     pkg_id;
 107        struct cpumask          cpumask;
 108        struct temp_data        *core_data[MAX_CORE_DATA];
 109        struct device_attribute name_attr;
 110};
 111
 112/* Keep track of how many package pointers we allocated in init() */
 113static int max_packages __read_mostly;
 114/* Array of package pointers. Serialized by cpu hotplug lock */
 115static struct platform_device **pkg_devices;
 116
 117static ssize_t show_label(struct device *dev,
 118                                struct device_attribute *devattr, char *buf)
 119{
 120        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 121        struct platform_data *pdata = dev_get_drvdata(dev);
 122        struct temp_data *tdata = pdata->core_data[attr->index];
 123
 124        if (tdata->is_pkg_data)
 125                return sprintf(buf, "Package id %u\n", pdata->pkg_id);
 126
 127        return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
 128}
 129
 130static ssize_t show_crit_alarm(struct device *dev,
 131                                struct device_attribute *devattr, char *buf)
 132{
 133        u32 eax, edx;
 134        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 135        struct platform_data *pdata = dev_get_drvdata(dev);
 136        struct temp_data *tdata = pdata->core_data[attr->index];
 137
 138        mutex_lock(&tdata->update_lock);
 139        rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
 140        mutex_unlock(&tdata->update_lock);
 141
 142        return sprintf(buf, "%d\n", (eax >> 5) & 1);
 143}
 144
 145static ssize_t show_tjmax(struct device *dev,
 146                        struct device_attribute *devattr, char *buf)
 147{
 148        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 149        struct platform_data *pdata = dev_get_drvdata(dev);
 150
 151        return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
 152}
 153
 154static ssize_t show_ttarget(struct device *dev,
 155                                struct device_attribute *devattr, char *buf)
 156{
 157        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 158        struct platform_data *pdata = dev_get_drvdata(dev);
 159
 160        return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
 161}
 162
 163static ssize_t show_temp(struct device *dev,
 164                        struct device_attribute *devattr, char *buf)
 165{
 166        u32 eax, edx;
 167        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 168        struct platform_data *pdata = dev_get_drvdata(dev);
 169        struct temp_data *tdata = pdata->core_data[attr->index];
 170
 171        mutex_lock(&tdata->update_lock);
 172
 173        /* Check whether the time interval has elapsed */
 174        if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
 175                rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
 176                /*
 177                 * Ignore the valid bit. In all observed cases the register
 178                 * value is either low or zero if the valid bit is 0.
 179                 * Return it instead of reporting an error which doesn't
 180                 * really help at all.
 181                 */
 182                tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
 183                tdata->valid = 1;
 184                tdata->last_updated = jiffies;
 185        }
 186
 187        mutex_unlock(&tdata->update_lock);
 188        return sprintf(buf, "%d\n", tdata->temp);
 189}
 190
 191struct tjmax_pci {
 192        unsigned int device;
 193        int tjmax;
 194};
 195
 196static const struct tjmax_pci tjmax_pci_table[] = {
 197        { 0x0708, 110000 },     /* CE41x0 (Sodaville ) */
 198        { 0x0c72, 102000 },     /* Atom S1240 (Centerton) */
 199        { 0x0c73, 95000 },      /* Atom S1220 (Centerton) */
 200        { 0x0c75, 95000 },      /* Atom S1260 (Centerton) */
 201};
 202
 203struct tjmax {
 204        char const *id;
 205        int tjmax;
 206};
 207
 208static const struct tjmax tjmax_table[] = {
 209        { "CPU  230", 100000 },         /* Model 0x1c, stepping 2       */
 210        { "CPU  330", 125000 },         /* Model 0x1c, stepping 2       */
 211};
 212
 213struct tjmax_model {
 214        u8 model;
 215        u8 mask;
 216        int tjmax;
 217};
 218
 219#define ANY 0xff
 220
 221static const struct tjmax_model tjmax_model_table[] = {
 222        { 0x1c, 10, 100000 },   /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
 223        { 0x1c, ANY, 90000 },   /* Z5xx, N2xx, possibly others
 224                                 * Note: Also matches 230 and 330,
 225                                 * which are covered by tjmax_table
 226                                 */
 227        { 0x26, ANY, 90000 },   /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
 228                                 * Note: TjMax for E6xxT is 110C, but CPU type
 229                                 * is undetectable by software
 230                                 */
 231        { 0x27, ANY, 90000 },   /* Atom Medfield (Z2460) */
 232        { 0x35, ANY, 90000 },   /* Atom Clover Trail/Cloverview (Z27x0) */
 233        { 0x36, ANY, 100000 },  /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
 234                                 * Also matches S12x0 (stepping 9), covered by
 235                                 * PCI table
 236                                 */
 237};
 238
 239static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 240{
 241        /* The 100C is default for both mobile and non mobile CPUs */
 242
 243        int tjmax = 100000;
 244        int tjmax_ee = 85000;
 245        int usemsr_ee = 1;
 246        int err;
 247        u32 eax, edx;
 248        int i;
 249        u16 devfn = PCI_DEVFN(0, 0);
 250        struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
 251
 252        /*
 253         * Explicit tjmax table entries override heuristics.
 254         * First try PCI host bridge IDs, followed by model ID strings
 255         * and model/stepping information.
 256         */
 257        if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
 258                for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
 259                        if (host_bridge->device == tjmax_pci_table[i].device)
 260                                return tjmax_pci_table[i].tjmax;
 261                }
 262        }
 263
 264        for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
 265                if (strstr(c->x86_model_id, tjmax_table[i].id))
 266                        return tjmax_table[i].tjmax;
 267        }
 268
 269        for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
 270                const struct tjmax_model *tm = &tjmax_model_table[i];
 271                if (c->x86_model == tm->model &&
 272                    (tm->mask == ANY || c->x86_stepping == tm->mask))
 273                        return tm->tjmax;
 274        }
 275
 276        /* Early chips have no MSR for TjMax */
 277
 278        if (c->x86_model == 0xf && c->x86_stepping < 4)
 279                usemsr_ee = 0;
 280
 281        if (c->x86_model > 0xe && usemsr_ee) {
 282                u8 platform_id;
 283
 284                /*
 285                 * Now we can detect the mobile CPU using Intel provided table
 286                 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
 287                 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
 288                 */
 289                err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
 290                if (err) {
 291                        dev_warn(dev,
 292                                 "Unable to access MSR 0x17, assuming desktop"
 293                                 " CPU\n");
 294                        usemsr_ee = 0;
 295                } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
 296                        /*
 297                         * Trust bit 28 up to Penryn, I could not find any
 298                         * documentation on that; if you happen to know
 299                         * someone at Intel please ask
 300                         */
 301                        usemsr_ee = 0;
 302                } else {
 303                        /* Platform ID bits 52:50 (EDX starts at bit 32) */
 304                        platform_id = (edx >> 18) & 0x7;
 305
 306                        /*
 307                         * Mobile Penryn CPU seems to be platform ID 7 or 5
 308                         * (guesswork)
 309                         */
 310                        if (c->x86_model == 0x17 &&
 311                            (platform_id == 5 || platform_id == 7)) {
 312                                /*
 313                                 * If MSR EE bit is set, set it to 90 degrees C,
 314                                 * otherwise 105 degrees C
 315                                 */
 316                                tjmax_ee = 90000;
 317                                tjmax = 105000;
 318                        }
 319                }
 320        }
 321
 322        if (usemsr_ee) {
 323                err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
 324                if (err) {
 325                        dev_warn(dev,
 326                                 "Unable to access MSR 0xEE, for Tjmax, left"
 327                                 " at default\n");
 328                } else if (eax & 0x40000000) {
 329                        tjmax = tjmax_ee;
 330                }
 331        } else if (tjmax == 100000) {
 332                /*
 333                 * If we don't use msr EE it means we are desktop CPU
 334                 * (with exeception of Atom)
 335                 */
 336                dev_warn(dev, "Using relative temperature scale!\n");
 337        }
 338
 339        return tjmax;
 340}
 341
 342static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
 343{
 344        u8 model = c->x86_model;
 345
 346        return model > 0xe &&
 347               model != 0x1c &&
 348               model != 0x26 &&
 349               model != 0x27 &&
 350               model != 0x35 &&
 351               model != 0x36;
 352}
 353
 354static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 355{
 356        int err;
 357        u32 eax, edx;
 358        u32 val;
 359
 360        /*
 361         * A new feature of current Intel(R) processors, the
 362         * IA32_TEMPERATURE_TARGET contains the TjMax value
 363         */
 364        err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
 365        if (err) {
 366                if (cpu_has_tjmax(c))
 367                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
 368        } else {
 369                val = (eax >> 16) & 0xff;
 370                /*
 371                 * If the TjMax is not plausible, an assumption
 372                 * will be used
 373                 */
 374                if (val) {
 375                        dev_dbg(dev, "TjMax is %d degrees C\n", val);
 376                        return val * 1000;
 377                }
 378        }
 379
 380        if (force_tjmax) {
 381                dev_notice(dev, "TjMax forced to %d degrees C by user\n",
 382                           force_tjmax);
 383                return force_tjmax * 1000;
 384        }
 385
 386        /*
 387         * An assumption is made for early CPUs and unreadable MSR.
 388         * NOTE: the calculated value may not be correct.
 389         */
 390        return adjust_tjmax(c, id, dev);
 391}
 392
 393static int create_core_attrs(struct temp_data *tdata, struct device *dev,
 394                             int attr_no)
 395{
 396        int i;
 397        static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
 398                        struct device_attribute *devattr, char *buf) = {
 399                        show_label, show_crit_alarm, show_temp, show_tjmax,
 400                        show_ttarget };
 401        static const char *const suffixes[TOTAL_ATTRS] = {
 402                "label", "crit_alarm", "input", "crit", "max"
 403        };
 404
 405        for (i = 0; i < tdata->attr_size; i++) {
 406                snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
 407                         "temp%d_%s", attr_no, suffixes[i]);
 408                sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
 409                tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
 410                tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
 411                tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
 412                tdata->sd_attrs[i].index = attr_no;
 413                tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
 414        }
 415        tdata->attr_group.attrs = tdata->attrs;
 416        return sysfs_create_group(&dev->kobj, &tdata->attr_group);
 417}
 418
 419
 420static int chk_ucode_version(unsigned int cpu)
 421{
 422        struct cpuinfo_x86 *c = &cpu_data(cpu);
 423
 424        /*
 425         * Check if we have problem with errata AE18 of Core processors:
 426         * Readings might stop update when processor visited too deep sleep,
 427         * fixed for stepping D0 (6EC).
 428         */
 429        if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
 430                pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
 431                return -ENODEV;
 432        }
 433        return 0;
 434}
 435
 436static struct platform_device *coretemp_get_pdev(unsigned int cpu)
 437{
 438        int pkgid = topology_logical_package_id(cpu);
 439
 440        if (pkgid >= 0 && pkgid < max_packages)
 441                return pkg_devices[pkgid];
 442        return NULL;
 443}
 444
 445static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
 446{
 447        struct temp_data *tdata;
 448
 449        tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
 450        if (!tdata)
 451                return NULL;
 452
 453        tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
 454                                                        MSR_IA32_THERM_STATUS;
 455        tdata->is_pkg_data = pkg_flag;
 456        tdata->cpu = cpu;
 457        tdata->cpu_core_id = TO_CORE_ID(cpu);
 458        tdata->attr_size = MAX_CORE_ATTRS;
 459        mutex_init(&tdata->update_lock);
 460        return tdata;
 461}
 462
 463static int create_core_data(struct platform_device *pdev, unsigned int cpu,
 464                            int pkg_flag)
 465{
 466        struct temp_data *tdata;
 467        struct platform_data *pdata = platform_get_drvdata(pdev);
 468        struct cpuinfo_x86 *c = &cpu_data(cpu);
 469        u32 eax, edx;
 470        int err, attr_no;
 471
 472        /*
 473         * Find attr number for sysfs:
 474         * We map the attr number to core id of the CPU
 475         * The attr number is always core id + 2
 476         * The Pkgtemp will always show up as temp1_*, if available
 477         */
 478        attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
 479
 480        if (attr_no > MAX_CORE_DATA - 1)
 481                return -ERANGE;
 482
 483        tdata = init_temp_data(cpu, pkg_flag);
 484        if (!tdata)
 485                return -ENOMEM;
 486
 487        /* Test if we can access the status register */
 488        err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
 489        if (err)
 490                goto exit_free;
 491
 492        /* We can access status register. Get Critical Temperature */
 493        tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
 494
 495        /*
 496         * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
 497         * The target temperature is available on older CPUs but not in this
 498         * register. Atoms don't have the register at all.
 499         */
 500        if (c->x86_model > 0xe && c->x86_model != 0x1c) {
 501                err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
 502                                        &eax, &edx);
 503                if (!err) {
 504                        tdata->ttarget
 505                          = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
 506                        tdata->attr_size++;
 507                }
 508        }
 509
 510        pdata->core_data[attr_no] = tdata;
 511
 512        /* Create sysfs interfaces */
 513        err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
 514        if (err)
 515                goto exit_free;
 516
 517        return 0;
 518exit_free:
 519        pdata->core_data[attr_no] = NULL;
 520        kfree(tdata);
 521        return err;
 522}
 523
 524static void
 525coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
 526{
 527        if (create_core_data(pdev, cpu, pkg_flag))
 528                dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
 529}
 530
 531static void coretemp_remove_core(struct platform_data *pdata, int indx)
 532{
 533        struct temp_data *tdata = pdata->core_data[indx];
 534
 535        /* Remove the sysfs attributes */
 536        sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
 537
 538        kfree(pdata->core_data[indx]);
 539        pdata->core_data[indx] = NULL;
 540}
 541
 542static int coretemp_probe(struct platform_device *pdev)
 543{
 544        struct device *dev = &pdev->dev;
 545        struct platform_data *pdata;
 546
 547        /* Initialize the per-package data structures */
 548        pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
 549        if (!pdata)
 550                return -ENOMEM;
 551
 552        pdata->pkg_id = pdev->id;
 553        platform_set_drvdata(pdev, pdata);
 554
 555        pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
 556                                                                  pdata, NULL);
 557        return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
 558}
 559
 560static int coretemp_remove(struct platform_device *pdev)
 561{
 562        struct platform_data *pdata = platform_get_drvdata(pdev);
 563        int i;
 564
 565        for (i = MAX_CORE_DATA - 1; i >= 0; --i)
 566                if (pdata->core_data[i])
 567                        coretemp_remove_core(pdata, i);
 568
 569        return 0;
 570}
 571
 572static struct platform_driver coretemp_driver = {
 573        .driver = {
 574                .name = DRVNAME,
 575        },
 576        .probe = coretemp_probe,
 577        .remove = coretemp_remove,
 578};
 579
 580static struct platform_device *coretemp_device_add(unsigned int cpu)
 581{
 582        int err, pkgid = topology_logical_package_id(cpu);
 583        struct platform_device *pdev;
 584
 585        if (pkgid < 0)
 586                return ERR_PTR(-ENOMEM);
 587
 588        pdev = platform_device_alloc(DRVNAME, pkgid);
 589        if (!pdev)
 590                return ERR_PTR(-ENOMEM);
 591
 592        err = platform_device_add(pdev);
 593        if (err) {
 594                platform_device_put(pdev);
 595                return ERR_PTR(err);
 596        }
 597
 598        pkg_devices[pkgid] = pdev;
 599        return pdev;
 600}
 601
 602static int coretemp_cpu_online(unsigned int cpu)
 603{
 604        struct platform_device *pdev = coretemp_get_pdev(cpu);
 605        struct cpuinfo_x86 *c = &cpu_data(cpu);
 606        struct platform_data *pdata;
 607
 608        /*
 609         * Don't execute this on resume as the offline callback did
 610         * not get executed on suspend.
 611         */
 612        if (cpuhp_tasks_frozen)
 613                return 0;
 614
 615        /*
 616         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
 617         * sensors. We check this bit only, all the early CPUs
 618         * without thermal sensors will be filtered out.
 619         */
 620        if (!cpu_has(c, X86_FEATURE_DTHERM))
 621                return -ENODEV;
 622
 623        if (!pdev) {
 624                /* Check the microcode version of the CPU */
 625                if (chk_ucode_version(cpu))
 626                        return -EINVAL;
 627
 628                /*
 629                 * Alright, we have DTS support.
 630                 * We are bringing the _first_ core in this pkg
 631                 * online. So, initialize per-pkg data structures and
 632                 * then bring this core online.
 633                 */
 634                pdev = coretemp_device_add(cpu);
 635                if (IS_ERR(pdev))
 636                        return PTR_ERR(pdev);
 637
 638                /*
 639                 * Check whether pkgtemp support is available.
 640                 * If so, add interfaces for pkgtemp.
 641                 */
 642                if (cpu_has(c, X86_FEATURE_PTS))
 643                        coretemp_add_core(pdev, cpu, 1);
 644        }
 645
 646        pdata = platform_get_drvdata(pdev);
 647        /*
 648         * Check whether a thread sibling is already online. If not add the
 649         * interface for this CPU core.
 650         */
 651        if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
 652                coretemp_add_core(pdev, cpu, 0);
 653
 654        cpumask_set_cpu(cpu, &pdata->cpumask);
 655        return 0;
 656}
 657
 658static int coretemp_cpu_offline(unsigned int cpu)
 659{
 660        struct platform_device *pdev = coretemp_get_pdev(cpu);
 661        struct platform_data *pd;
 662        struct temp_data *tdata;
 663        int indx, target;
 664
 665        /*
 666         * Don't execute this on suspend as the device remove locks
 667         * up the machine.
 668         */
 669        if (cpuhp_tasks_frozen)
 670                return 0;
 671
 672        /* If the physical CPU device does not exist, just return */
 673        if (!pdev)
 674                return 0;
 675
 676        /* The core id is too big, just return */
 677        indx = TO_ATTR_NO(cpu);
 678        if (indx > MAX_CORE_DATA - 1)
 679                return 0;
 680
 681        pd = platform_get_drvdata(pdev);
 682        tdata = pd->core_data[indx];
 683
 684        cpumask_clear_cpu(cpu, &pd->cpumask);
 685
 686        /*
 687         * If this is the last thread sibling, remove the CPU core
 688         * interface, If there is still a sibling online, transfer the
 689         * target cpu of that core interface to it.
 690         */
 691        target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
 692        if (target >= nr_cpu_ids) {
 693                coretemp_remove_core(pd, indx);
 694        } else if (tdata && tdata->cpu == cpu) {
 695                mutex_lock(&tdata->update_lock);
 696                tdata->cpu = target;
 697                mutex_unlock(&tdata->update_lock);
 698        }
 699
 700        /*
 701         * If all cores in this pkg are offline, remove the device. This
 702         * will invoke the platform driver remove function, which cleans up
 703         * the rest.
 704         */
 705        if (cpumask_empty(&pd->cpumask)) {
 706                pkg_devices[topology_logical_package_id(cpu)] = NULL;
 707                platform_device_unregister(pdev);
 708                return 0;
 709        }
 710
 711        /*
 712         * Check whether this core is the target for the package
 713         * interface. We need to assign it to some other cpu.
 714         */
 715        tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
 716        if (tdata && tdata->cpu == cpu) {
 717                target = cpumask_first(&pd->cpumask);
 718                mutex_lock(&tdata->update_lock);
 719                tdata->cpu = target;
 720                mutex_unlock(&tdata->update_lock);
 721        }
 722        return 0;
 723}
 724static const struct x86_cpu_id __initconst coretemp_ids[] = {
 725        { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
 726        {}
 727};
 728MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 729
 730static enum cpuhp_state coretemp_hp_online;
 731
 732static int __init coretemp_init(void)
 733{
 734        int err;
 735
 736        /*
 737         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
 738         * sensors. We check this bit only, all the early CPUs
 739         * without thermal sensors will be filtered out.
 740         */
 741        if (!x86_match_cpu(coretemp_ids))
 742                return -ENODEV;
 743
 744        max_packages = topology_max_packages();
 745        pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
 746                              GFP_KERNEL);
 747        if (!pkg_devices)
 748                return -ENOMEM;
 749
 750        err = platform_driver_register(&coretemp_driver);
 751        if (err)
 752                return err;
 753
 754        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
 755                                coretemp_cpu_online, coretemp_cpu_offline);
 756        if (err < 0)
 757                goto outdrv;
 758        coretemp_hp_online = err;
 759        return 0;
 760
 761outdrv:
 762        platform_driver_unregister(&coretemp_driver);
 763        kfree(pkg_devices);
 764        return err;
 765}
 766module_init(coretemp_init)
 767
 768static void __exit coretemp_exit(void)
 769{
 770        cpuhp_remove_state(coretemp_hp_online);
 771        platform_driver_unregister(&coretemp_driver);
 772        kfree(pkg_devices);
 773}
 774module_exit(coretemp_exit)
 775
 776MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
 777MODULE_DESCRIPTION("Intel Core temperature monitor");
 778MODULE_LICENSE("GPL");
 779