linux/drivers/hwmon/coretemp.c
<<
>>
Prefs
   1/*
   2 * coretemp.c - Linux kernel module for hardware monitoring
   3 *
   4 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
   5 *
   6 * Inspired from many hwmon drivers
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; version 2 of the License.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301 USA.
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/module.h>
  26#include <linux/init.h>
  27#include <linux/slab.h>
  28#include <linux/jiffies.h>
  29#include <linux/hwmon.h>
  30#include <linux/sysfs.h>
  31#include <linux/hwmon-sysfs.h>
  32#include <linux/err.h>
  33#include <linux/mutex.h>
  34#include <linux/list.h>
  35#include <linux/platform_device.h>
  36#include <linux/cpu.h>
  37#include <linux/smp.h>
  38#include <linux/moduleparam.h>
  39#include <linux/pci.h>
  40#include <asm/msr.h>
  41#include <asm/processor.h>
  42#include <asm/cpu_device_id.h>
  43
  44#define DRVNAME "coretemp"
  45
  46/*
  47 * force_tjmax only matters when TjMax can't be read from the CPU itself.
  48 * When set, it replaces the driver's suboptimal heuristic.
  49 */
  50static int force_tjmax;
  51module_param_named(tjmax, force_tjmax, int, 0444);
  52MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
  53
  54#define PKG_SYSFS_ATTR_NO       1       /* Sysfs attribute for package temp */
  55#define BASE_SYSFS_ATTR_NO      2       /* Sysfs Base attr no for coretemp */
  56#define NUM_REAL_CORES          128     /* Number of Real cores per cpu */
  57#define CORETEMP_NAME_LENGTH    19      /* String Length of attrs */
  58#define MAX_CORE_ATTRS          4       /* Maximum no of basic attrs */
  59#define TOTAL_ATTRS             (MAX_CORE_ATTRS + 1)
  60#define MAX_CORE_DATA           (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
  61
  62#define TO_CORE_ID(cpu)         (cpu_data(cpu).cpu_core_id)
  63#define TO_ATTR_NO(cpu)         (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
  64
  65#ifdef CONFIG_SMP
  66#define for_each_sibling(i, cpu) \
  67        for_each_cpu(i, topology_sibling_cpumask(cpu))
  68#else
  69#define for_each_sibling(i, cpu)        for (i = 0; false; )
  70#endif
  71
  72/*
  73 * Per-Core Temperature Data
  74 * @last_updated: The time when the current temperature value was updated
  75 *              earlier (in jiffies).
  76 * @cpu_core_id: The CPU Core from which temperature values should be read
  77 *              This value is passed as "id" field to rdmsr/wrmsr functions.
  78 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
  79 *              from where the temperature values should be read.
  80 * @attr_size:  Total number of pre-core attrs displayed in the sysfs.
  81 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
  82 *              Otherwise, temp_data holds coretemp data.
  83 * @valid: If this is 1, the current temperature is valid.
  84 */
  85struct temp_data {
  86        int temp;
  87        int ttarget;
  88        int tjmax;
  89        unsigned long last_updated;
  90        unsigned int cpu;
  91        u32 cpu_core_id;
  92        u32 status_reg;
  93        int attr_size;
  94        bool is_pkg_data;
  95        bool valid;
  96        struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
  97        char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
  98        struct attribute *attrs[TOTAL_ATTRS + 1];
  99        struct attribute_group attr_group;
 100        struct mutex update_lock;
 101};
 102
 103/* Platform Data per Physical CPU */
 104struct platform_data {
 105        struct device           *hwmon_dev;
 106        u16                     pkg_id;
 107        struct cpumask          cpumask;
 108        struct temp_data        *core_data[MAX_CORE_DATA];
 109        struct device_attribute name_attr;
 110};
 111
 112/* Keep track of how many package pointers we allocated in init() */
 113static int max_packages __read_mostly;
 114/* Array of package pointers. Serialized by cpu hotplug lock */
 115static struct platform_device **pkg_devices;
 116
 117static ssize_t show_label(struct device *dev,
 118                                struct device_attribute *devattr, char *buf)
 119{
 120        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 121        struct platform_data *pdata = dev_get_drvdata(dev);
 122        struct temp_data *tdata = pdata->core_data[attr->index];
 123
 124        if (tdata->is_pkg_data)
 125                return sprintf(buf, "Package id %u\n", pdata->pkg_id);
 126
 127        return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
 128}
 129
 130static ssize_t show_crit_alarm(struct device *dev,
 131                                struct device_attribute *devattr, char *buf)
 132{
 133        u32 eax, edx;
 134        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 135        struct platform_data *pdata = dev_get_drvdata(dev);
 136        struct temp_data *tdata = pdata->core_data[attr->index];
 137
 138        mutex_lock(&tdata->update_lock);
 139        rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
 140        mutex_unlock(&tdata->update_lock);
 141
 142        return sprintf(buf, "%d\n", (eax >> 5) & 1);
 143}
 144
 145static ssize_t show_tjmax(struct device *dev,
 146                        struct device_attribute *devattr, char *buf)
 147{
 148        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 149        struct platform_data *pdata = dev_get_drvdata(dev);
 150
 151        return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
 152}
 153
 154static ssize_t show_ttarget(struct device *dev,
 155                                struct device_attribute *devattr, char *buf)
 156{
 157        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 158        struct platform_data *pdata = dev_get_drvdata(dev);
 159
 160        return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
 161}
 162
 163static ssize_t show_temp(struct device *dev,
 164                        struct device_attribute *devattr, char *buf)
 165{
 166        u32 eax, edx;
 167        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 168        struct platform_data *pdata = dev_get_drvdata(dev);
 169        struct temp_data *tdata = pdata->core_data[attr->index];
 170
 171        mutex_lock(&tdata->update_lock);
 172
 173        /* Check whether the time interval has elapsed */
 174        if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
 175                rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
 176                /*
 177                 * Ignore the valid bit. In all observed cases the register
 178                 * value is either low or zero if the valid bit is 0.
 179                 * Return it instead of reporting an error which doesn't
 180                 * really help at all.
 181                 */
 182                tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
 183                tdata->valid = 1;
 184                tdata->last_updated = jiffies;
 185        }
 186
 187        mutex_unlock(&tdata->update_lock);
 188        return sprintf(buf, "%d\n", tdata->temp);
 189}
 190
 191struct tjmax_pci {
 192        unsigned int device;
 193        int tjmax;
 194};
 195
 196static const struct tjmax_pci tjmax_pci_table[] = {
 197        { 0x0708, 110000 },     /* CE41x0 (Sodaville ) */
 198        { 0x0c72, 102000 },     /* Atom S1240 (Centerton) */
 199        { 0x0c73, 95000 },      /* Atom S1220 (Centerton) */
 200        { 0x0c75, 95000 },      /* Atom S1260 (Centerton) */
 201};
 202
 203struct tjmax {
 204        char const *id;
 205        int tjmax;
 206};
 207
 208static const struct tjmax tjmax_table[] = {
 209        { "CPU  230", 100000 },         /* Model 0x1c, stepping 2       */
 210        { "CPU  330", 125000 },         /* Model 0x1c, stepping 2       */
 211};
 212
 213struct tjmax_model {
 214        u8 model;
 215        u8 mask;
 216        int tjmax;
 217};
 218
 219#define ANY 0xff
 220
 221static const struct tjmax_model tjmax_model_table[] = {
 222        { 0x1c, 10, 100000 },   /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
 223        { 0x1c, ANY, 90000 },   /* Z5xx, N2xx, possibly others
 224                                 * Note: Also matches 230 and 330,
 225                                 * which are covered by tjmax_table
 226                                 */
 227        { 0x26, ANY, 90000 },   /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
 228                                 * Note: TjMax for E6xxT is 110C, but CPU type
 229                                 * is undetectable by software
 230                                 */
 231        { 0x27, ANY, 90000 },   /* Atom Medfield (Z2460) */
 232        { 0x35, ANY, 90000 },   /* Atom Clover Trail/Cloverview (Z27x0) */
 233        { 0x36, ANY, 100000 },  /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
 234                                 * Also matches S12x0 (stepping 9), covered by
 235                                 * PCI table
 236                                 */
 237};
 238
 239static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 240{
 241        /* The 100C is default for both mobile and non mobile CPUs */
 242
 243        int tjmax = 100000;
 244        int tjmax_ee = 85000;
 245        int usemsr_ee = 1;
 246        int err;
 247        u32 eax, edx;
 248        int i;
 249        struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
 250
 251        /*
 252         * Explicit tjmax table entries override heuristics.
 253         * First try PCI host bridge IDs, followed by model ID strings
 254         * and model/stepping information.
 255         */
 256        if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
 257                for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
 258                        if (host_bridge->device == tjmax_pci_table[i].device)
 259                                return tjmax_pci_table[i].tjmax;
 260                }
 261        }
 262
 263        for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
 264                if (strstr(c->x86_model_id, tjmax_table[i].id))
 265                        return tjmax_table[i].tjmax;
 266        }
 267
 268        for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
 269                const struct tjmax_model *tm = &tjmax_model_table[i];
 270                if (c->x86_model == tm->model &&
 271                    (tm->mask == ANY || c->x86_mask == tm->mask))
 272                        return tm->tjmax;
 273        }
 274
 275        /* Early chips have no MSR for TjMax */
 276
 277        if (c->x86_model == 0xf && c->x86_mask < 4)
 278                usemsr_ee = 0;
 279
 280        if (c->x86_model > 0xe && usemsr_ee) {
 281                u8 platform_id;
 282
 283                /*
 284                 * Now we can detect the mobile CPU using Intel provided table
 285                 * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
 286                 * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
 287                 */
 288                err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
 289                if (err) {
 290                        dev_warn(dev,
 291                                 "Unable to access MSR 0x17, assuming desktop"
 292                                 " CPU\n");
 293                        usemsr_ee = 0;
 294                } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
 295                        /*
 296                         * Trust bit 28 up to Penryn, I could not find any
 297                         * documentation on that; if you happen to know
 298                         * someone at Intel please ask
 299                         */
 300                        usemsr_ee = 0;
 301                } else {
 302                        /* Platform ID bits 52:50 (EDX starts at bit 32) */
 303                        platform_id = (edx >> 18) & 0x7;
 304
 305                        /*
 306                         * Mobile Penryn CPU seems to be platform ID 7 or 5
 307                         * (guesswork)
 308                         */
 309                        if (c->x86_model == 0x17 &&
 310                            (platform_id == 5 || platform_id == 7)) {
 311                                /*
 312                                 * If MSR EE bit is set, set it to 90 degrees C,
 313                                 * otherwise 105 degrees C
 314                                 */
 315                                tjmax_ee = 90000;
 316                                tjmax = 105000;
 317                        }
 318                }
 319        }
 320
 321        if (usemsr_ee) {
 322                err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
 323                if (err) {
 324                        dev_warn(dev,
 325                                 "Unable to access MSR 0xEE, for Tjmax, left"
 326                                 " at default\n");
 327                } else if (eax & 0x40000000) {
 328                        tjmax = tjmax_ee;
 329                }
 330        } else if (tjmax == 100000) {
 331                /*
 332                 * If we don't use msr EE it means we are desktop CPU
 333                 * (with exeception of Atom)
 334                 */
 335                dev_warn(dev, "Using relative temperature scale!\n");
 336        }
 337
 338        return tjmax;
 339}
 340
 341static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
 342{
 343        u8 model = c->x86_model;
 344
 345        return model > 0xe &&
 346               model != 0x1c &&
 347               model != 0x26 &&
 348               model != 0x27 &&
 349               model != 0x35 &&
 350               model != 0x36;
 351}
 352
 353static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 354{
 355        int err;
 356        u32 eax, edx;
 357        u32 val;
 358
 359        /*
 360         * A new feature of current Intel(R) processors, the
 361         * IA32_TEMPERATURE_TARGET contains the TjMax value
 362         */
 363        err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
 364        if (err) {
 365                if (cpu_has_tjmax(c))
 366                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
 367        } else {
 368                val = (eax >> 16) & 0xff;
 369                /*
 370                 * If the TjMax is not plausible, an assumption
 371                 * will be used
 372                 */
 373                if (val) {
 374                        dev_dbg(dev, "TjMax is %d degrees C\n", val);
 375                        return val * 1000;
 376                }
 377        }
 378
 379        if (force_tjmax) {
 380                dev_notice(dev, "TjMax forced to %d degrees C by user\n",
 381                           force_tjmax);
 382                return force_tjmax * 1000;
 383        }
 384
 385        /*
 386         * An assumption is made for early CPUs and unreadable MSR.
 387         * NOTE: the calculated value may not be correct.
 388         */
 389        return adjust_tjmax(c, id, dev);
 390}
 391
 392static int create_core_attrs(struct temp_data *tdata, struct device *dev,
 393                             int attr_no)
 394{
 395        int i;
 396        static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
 397                        struct device_attribute *devattr, char *buf) = {
 398                        show_label, show_crit_alarm, show_temp, show_tjmax,
 399                        show_ttarget };
 400        static const char *const suffixes[TOTAL_ATTRS] = {
 401                "label", "crit_alarm", "input", "crit", "max"
 402        };
 403
 404        for (i = 0; i < tdata->attr_size; i++) {
 405                snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
 406                         "temp%d_%s", attr_no, suffixes[i]);
 407                sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
 408                tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
 409                tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
 410                tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
 411                tdata->sd_attrs[i].index = attr_no;
 412                tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
 413        }
 414        tdata->attr_group.attrs = tdata->attrs;
 415        return sysfs_create_group(&dev->kobj, &tdata->attr_group);
 416}
 417
 418
 419static int chk_ucode_version(unsigned int cpu)
 420{
 421        struct cpuinfo_x86 *c = &cpu_data(cpu);
 422
 423        /*
 424         * Check if we have problem with errata AE18 of Core processors:
 425         * Readings might stop update when processor visited too deep sleep,
 426         * fixed for stepping D0 (6EC).
 427         */
 428        if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
 429                pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
 430                return -ENODEV;
 431        }
 432        return 0;
 433}
 434
 435static struct platform_device *coretemp_get_pdev(unsigned int cpu)
 436{
 437        int pkgid = topology_logical_package_id(cpu);
 438
 439        if (pkgid >= 0 && pkgid < max_packages)
 440                return pkg_devices[pkgid];
 441        return NULL;
 442}
 443
 444static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
 445{
 446        struct temp_data *tdata;
 447
 448        tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
 449        if (!tdata)
 450                return NULL;
 451
 452        tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
 453                                                        MSR_IA32_THERM_STATUS;
 454        tdata->is_pkg_data = pkg_flag;
 455        tdata->cpu = cpu;
 456        tdata->cpu_core_id = TO_CORE_ID(cpu);
 457        tdata->attr_size = MAX_CORE_ATTRS;
 458        mutex_init(&tdata->update_lock);
 459        return tdata;
 460}
 461
 462static int create_core_data(struct platform_device *pdev, unsigned int cpu,
 463                            int pkg_flag)
 464{
 465        struct temp_data *tdata;
 466        struct platform_data *pdata = platform_get_drvdata(pdev);
 467        struct cpuinfo_x86 *c = &cpu_data(cpu);
 468        u32 eax, edx;
 469        int err, attr_no;
 470
 471        /*
 472         * Find attr number for sysfs:
 473         * We map the attr number to core id of the CPU
 474         * The attr number is always core id + 2
 475         * The Pkgtemp will always show up as temp1_*, if available
 476         */
 477        attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
 478
 479        if (attr_no > MAX_CORE_DATA - 1)
 480                return -ERANGE;
 481
 482        tdata = init_temp_data(cpu, pkg_flag);
 483        if (!tdata)
 484                return -ENOMEM;
 485
 486        /* Test if we can access the status register */
 487        err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
 488        if (err)
 489                goto exit_free;
 490
 491        /* We can access status register. Get Critical Temperature */
 492        tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
 493
 494        /*
 495         * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
 496         * The target temperature is available on older CPUs but not in this
 497         * register. Atoms don't have the register at all.
 498         */
 499        if (c->x86_model > 0xe && c->x86_model != 0x1c) {
 500                err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
 501                                        &eax, &edx);
 502                if (!err) {
 503                        tdata->ttarget
 504                          = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
 505                        tdata->attr_size++;
 506                }
 507        }
 508
 509        pdata->core_data[attr_no] = tdata;
 510
 511        /* Create sysfs interfaces */
 512        err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
 513        if (err)
 514                goto exit_free;
 515
 516        return 0;
 517exit_free:
 518        pdata->core_data[attr_no] = NULL;
 519        kfree(tdata);
 520        return err;
 521}
 522
 523static void
 524coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
 525{
 526        if (create_core_data(pdev, cpu, pkg_flag))
 527                dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
 528}
 529
 530static void coretemp_remove_core(struct platform_data *pdata, int indx)
 531{
 532        struct temp_data *tdata = pdata->core_data[indx];
 533
 534        /* Remove the sysfs attributes */
 535        sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);
 536
 537        kfree(pdata->core_data[indx]);
 538        pdata->core_data[indx] = NULL;
 539}
 540
 541static int coretemp_probe(struct platform_device *pdev)
 542{
 543        struct device *dev = &pdev->dev;
 544        struct platform_data *pdata;
 545
 546        /* Initialize the per-package data structures */
 547        pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
 548        if (!pdata)
 549                return -ENOMEM;
 550
 551        pdata->pkg_id = pdev->id;
 552        platform_set_drvdata(pdev, pdata);
 553
 554        pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
 555                                                                  pdata, NULL);
 556        return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
 557}
 558
 559static int coretemp_remove(struct platform_device *pdev)
 560{
 561        struct platform_data *pdata = platform_get_drvdata(pdev);
 562        int i;
 563
 564        for (i = MAX_CORE_DATA - 1; i >= 0; --i)
 565                if (pdata->core_data[i])
 566                        coretemp_remove_core(pdata, i);
 567
 568        return 0;
 569}
 570
 571static struct platform_driver coretemp_driver = {
 572        .driver = {
 573                .name = DRVNAME,
 574        },
 575        .probe = coretemp_probe,
 576        .remove = coretemp_remove,
 577};
 578
 579static struct platform_device *coretemp_device_add(unsigned int cpu)
 580{
 581        int err, pkgid = topology_logical_package_id(cpu);
 582        struct platform_device *pdev;
 583
 584        if (pkgid < 0)
 585                return ERR_PTR(-ENOMEM);
 586
 587        pdev = platform_device_alloc(DRVNAME, pkgid);
 588        if (!pdev)
 589                return ERR_PTR(-ENOMEM);
 590
 591        err = platform_device_add(pdev);
 592        if (err) {
 593                platform_device_put(pdev);
 594                return ERR_PTR(err);
 595        }
 596
 597        pkg_devices[pkgid] = pdev;
 598        return pdev;
 599}
 600
 601static int coretemp_cpu_online(unsigned int cpu)
 602{
 603        struct platform_device *pdev = coretemp_get_pdev(cpu);
 604        struct cpuinfo_x86 *c = &cpu_data(cpu);
 605        struct platform_data *pdata;
 606
 607        /*
 608         * Don't execute this on resume as the offline callback did
 609         * not get executed on suspend.
 610         */
 611        if (cpuhp_tasks_frozen)
 612                return 0;
 613
 614        /*
 615         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
 616         * sensors. We check this bit only, all the early CPUs
 617         * without thermal sensors will be filtered out.
 618         */
 619        if (!cpu_has(c, X86_FEATURE_DTHERM))
 620                return -ENODEV;
 621
 622        if (!pdev) {
 623                /* Check the microcode version of the CPU */
 624                if (chk_ucode_version(cpu))
 625                        return -EINVAL;
 626
 627                /*
 628                 * Alright, we have DTS support.
 629                 * We are bringing the _first_ core in this pkg
 630                 * online. So, initialize per-pkg data structures and
 631                 * then bring this core online.
 632                 */
 633                pdev = coretemp_device_add(cpu);
 634                if (IS_ERR(pdev))
 635                        return PTR_ERR(pdev);
 636
 637                /*
 638                 * Check whether pkgtemp support is available.
 639                 * If so, add interfaces for pkgtemp.
 640                 */
 641                if (cpu_has(c, X86_FEATURE_PTS))
 642                        coretemp_add_core(pdev, cpu, 1);
 643        }
 644
 645        pdata = platform_get_drvdata(pdev);
 646        /*
 647         * Check whether a thread sibling is already online. If not add the
 648         * interface for this CPU core.
 649         */
 650        if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
 651                coretemp_add_core(pdev, cpu, 0);
 652
 653        cpumask_set_cpu(cpu, &pdata->cpumask);
 654        return 0;
 655}
 656
 657static int coretemp_cpu_offline(unsigned int cpu)
 658{
 659        struct platform_device *pdev = coretemp_get_pdev(cpu);
 660        struct platform_data *pd;
 661        struct temp_data *tdata;
 662        int indx, target;
 663
 664        /*
 665         * Don't execute this on suspend as the device remove locks
 666         * up the machine.
 667         */
 668        if (cpuhp_tasks_frozen)
 669                return 0;
 670
 671        /* If the physical CPU device does not exist, just return */
 672        if (!pdev)
 673                return 0;
 674
 675        /* The core id is too big, just return */
 676        indx = TO_ATTR_NO(cpu);
 677        if (indx > MAX_CORE_DATA - 1)
 678                return 0;
 679
 680        pd = platform_get_drvdata(pdev);
 681        tdata = pd->core_data[indx];
 682
 683        cpumask_clear_cpu(cpu, &pd->cpumask);
 684
 685        /*
 686         * If this is the last thread sibling, remove the CPU core
 687         * interface, If there is still a sibling online, transfer the
 688         * target cpu of that core interface to it.
 689         */
 690        target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
 691        if (target >= nr_cpu_ids) {
 692                coretemp_remove_core(pd, indx);
 693        } else if (tdata && tdata->cpu == cpu) {
 694                mutex_lock(&tdata->update_lock);
 695                tdata->cpu = target;
 696                mutex_unlock(&tdata->update_lock);
 697        }
 698
 699        /*
 700         * If all cores in this pkg are offline, remove the device. This
 701         * will invoke the platform driver remove function, which cleans up
 702         * the rest.
 703         */
 704        if (cpumask_empty(&pd->cpumask)) {
 705                pkg_devices[topology_logical_package_id(cpu)] = NULL;
 706                platform_device_unregister(pdev);
 707                return 0;
 708        }
 709
 710        /*
 711         * Check whether this core is the target for the package
 712         * interface. We need to assign it to some other cpu.
 713         */
 714        tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
 715        if (tdata && tdata->cpu == cpu) {
 716                target = cpumask_first(&pd->cpumask);
 717                mutex_lock(&tdata->update_lock);
 718                tdata->cpu = target;
 719                mutex_unlock(&tdata->update_lock);
 720        }
 721        return 0;
 722}
 723static const struct x86_cpu_id __initconst coretemp_ids[] = {
 724        { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
 725        {}
 726};
 727MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 728
 729static enum cpuhp_state coretemp_hp_online;
 730
 731static int __init coretemp_init(void)
 732{
 733        int err;
 734
 735        /*
 736         * CPUID.06H.EAX[0] indicates whether the CPU has thermal
 737         * sensors. We check this bit only, all the early CPUs
 738         * without thermal sensors will be filtered out.
 739         */
 740        if (!x86_match_cpu(coretemp_ids))
 741                return -ENODEV;
 742
 743        max_packages = topology_max_packages();
 744        pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
 745                              GFP_KERNEL);
 746        if (!pkg_devices)
 747                return -ENOMEM;
 748
 749        err = platform_driver_register(&coretemp_driver);
 750        if (err)
 751                return err;
 752
 753        err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
 754                                coretemp_cpu_online, coretemp_cpu_offline);
 755        if (err < 0)
 756                goto outdrv;
 757        coretemp_hp_online = err;
 758        return 0;
 759
 760outdrv:
 761        platform_driver_unregister(&coretemp_driver);
 762        kfree(pkg_devices);
 763        return err;
 764}
 765module_init(coretemp_init)
 766
 767static void __exit coretemp_exit(void)
 768{
 769        cpuhp_remove_state(coretemp_hp_online);
 770        platform_driver_unregister(&coretemp_driver);
 771        kfree(pkg_devices);
 772}
 773module_exit(coretemp_exit)
 774
 775MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
 776MODULE_DESCRIPTION("Intel Core temperature monitor");
 777MODULE_LICENSE("GPL");
 778