linux/drivers/thermal/x86_pkg_temp_thermal.c
<<
>>
Prefs
   1/*
   2 * x86_pkg_temp_thermal driver
   3 * Copyright (c) 2013, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.
  16 *
  17 */
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/err.h>
  23#include <linux/param.h>
  24#include <linux/device.h>
  25#include <linux/platform_device.h>
  26#include <linux/cpu.h>
  27#include <linux/smp.h>
  28#include <linux/slab.h>
  29#include <linux/pm.h>
  30#include <linux/thermal.h>
  31#include <linux/debugfs.h>
  32#include <asm/cpu_device_id.h>
  33#include <asm/mce.h>
  34
  35/*
  36* Rate control delay: Idea is to introduce denounce effect
  37* This should be long enough to avoid reduce events, when
  38* threshold is set to a temperature, which is constantly
  39* violated, but at the short enough to take any action.
  40* The action can be remove threshold or change it to next
  41* interesting setting. Based on experiments, in around
  42* every 5 seconds under load will give us a significant
  43* temperature change.
  44*/
  45#define PKG_TEMP_THERMAL_NOTIFY_DELAY   5000
  46static int notify_delay_ms = PKG_TEMP_THERMAL_NOTIFY_DELAY;
  47module_param(notify_delay_ms, int, 0644);
  48MODULE_PARM_DESC(notify_delay_ms,
  49        "User space notification delay in milli seconds.");
  50
  51/* Number of trip points in thermal zone. Currently it can't
  52* be more than 2. MSR can allow setting and getting notifications
  53* for only 2 thresholds. This define enforces this, if there
  54* is some wrong values returned by cpuid for number of thresholds.
  55*/
  56#define MAX_NUMBER_OF_TRIPS     2
  57/* Limit number of package temp zones */
  58#define MAX_PKG_TEMP_ZONE_IDS   256
  59
  60struct phy_dev_entry {
  61        struct list_head list;
  62        u16 phys_proc_id;
  63        u16 first_cpu;
  64        u32 tj_max;
  65        int ref_cnt;
  66        u32 start_pkg_therm_low;
  67        u32 start_pkg_therm_high;
  68        struct thermal_zone_device *tzone;
  69};
  70
  71/* List maintaining number of package instances */
  72static LIST_HEAD(phy_dev_list);
  73static DEFINE_MUTEX(phy_dev_list_mutex);
  74
  75/* Interrupt to work function schedule queue */
  76static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
  77
  78/* To track if the work is already scheduled on a package */
  79static u8 *pkg_work_scheduled;
  80
  81/* Spin lock to prevent races with pkg_work_scheduled */
  82static spinlock_t pkg_work_lock;
  83static u16 max_phy_id;
  84
  85/* Debug counters to show using debugfs */
  86static struct dentry *debugfs;
  87static unsigned int pkg_interrupt_cnt;
  88static unsigned int pkg_work_cnt;
  89
  90static int pkg_temp_debugfs_init(void)
  91{
  92        struct dentry *d;
  93
  94        debugfs = debugfs_create_dir("pkg_temp_thermal", NULL);
  95        if (!debugfs)
  96                return -ENOENT;
  97
  98        d = debugfs_create_u32("pkg_thres_interrupt", S_IRUGO, debugfs,
  99                                (u32 *)&pkg_interrupt_cnt);
 100        if (!d)
 101                goto err_out;
 102
 103        d = debugfs_create_u32("pkg_thres_work", S_IRUGO, debugfs,
 104                                (u32 *)&pkg_work_cnt);
 105        if (!d)
 106                goto err_out;
 107
 108        return 0;
 109
 110err_out:
 111        debugfs_remove_recursive(debugfs);
 112        return -ENOENT;
 113}
 114
 115static struct phy_dev_entry
 116                        *pkg_temp_thermal_get_phy_entry(unsigned int cpu)
 117{
 118        u16 phys_proc_id = topology_physical_package_id(cpu);
 119        struct phy_dev_entry *phy_ptr;
 120
 121        mutex_lock(&phy_dev_list_mutex);
 122
 123        list_for_each_entry(phy_ptr, &phy_dev_list, list)
 124                if (phy_ptr->phys_proc_id == phys_proc_id) {
 125                        mutex_unlock(&phy_dev_list_mutex);
 126                        return phy_ptr;
 127                }
 128
 129        mutex_unlock(&phy_dev_list_mutex);
 130
 131        return NULL;
 132}
 133
 134/*
 135* tj-max is is interesting because threshold is set relative to this
 136* temperature.
 137*/
 138static int get_tj_max(int cpu, u32 *tj_max)
 139{
 140        u32 eax, edx;
 141        u32 val;
 142        int err;
 143
 144        err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
 145        if (err)
 146                goto err_ret;
 147        else {
 148                val = (eax >> 16) & 0xff;
 149                if (val)
 150                        *tj_max = val * 1000;
 151                else {
 152                        err = -EINVAL;
 153                        goto err_ret;
 154                }
 155        }
 156
 157        return 0;
 158err_ret:
 159        *tj_max = 0;
 160        return err;
 161}
 162
 163static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
 164{
 165        u32 eax, edx;
 166        struct phy_dev_entry *phy_dev_entry;
 167
 168        phy_dev_entry = tzd->devdata;
 169        rdmsr_on_cpu(phy_dev_entry->first_cpu, MSR_IA32_PACKAGE_THERM_STATUS,
 170                        &eax, &edx);
 171        if (eax & 0x80000000) {
 172                *temp = phy_dev_entry->tj_max -
 173                                ((eax >> 16) & 0x7f) * 1000;
 174                pr_debug("sys_get_curr_temp %ld\n", *temp);
 175                return 0;
 176        }
 177
 178        return -EINVAL;
 179}
 180
 181static int sys_get_trip_temp(struct thermal_zone_device *tzd,
 182                int trip, unsigned long *temp)
 183{
 184        u32 eax, edx;
 185        struct phy_dev_entry *phy_dev_entry;
 186        u32 mask, shift;
 187        unsigned long thres_reg_value;
 188        int ret;
 189
 190        if (trip >= MAX_NUMBER_OF_TRIPS)
 191                return -EINVAL;
 192
 193        phy_dev_entry = tzd->devdata;
 194
 195        if (trip) {
 196                mask = THERM_MASK_THRESHOLD1;
 197                shift = THERM_SHIFT_THRESHOLD1;
 198        } else {
 199                mask = THERM_MASK_THRESHOLD0;
 200                shift = THERM_SHIFT_THRESHOLD0;
 201        }
 202
 203        ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
 204                                MSR_IA32_PACKAGE_THERM_INTERRUPT, &eax, &edx);
 205        if (ret < 0)
 206                return -EINVAL;
 207
 208        thres_reg_value = (eax & mask) >> shift;
 209        if (thres_reg_value)
 210                *temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
 211        else
 212                *temp = 0;
 213        pr_debug("sys_get_trip_temp %ld\n", *temp);
 214
 215        return 0;
 216}
 217
 218int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
 219                                                        unsigned long temp)
 220{
 221        u32 l, h;
 222        struct phy_dev_entry *phy_dev_entry;
 223        u32 mask, shift, intr;
 224        int ret;
 225
 226        phy_dev_entry = tzd->devdata;
 227
 228        if (trip >= MAX_NUMBER_OF_TRIPS || temp >= phy_dev_entry->tj_max)
 229                return -EINVAL;
 230
 231        ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
 232                                        MSR_IA32_PACKAGE_THERM_INTERRUPT,
 233                                        &l, &h);
 234        if (ret < 0)
 235                return -EINVAL;
 236
 237        if (trip) {
 238                mask = THERM_MASK_THRESHOLD1;
 239                shift = THERM_SHIFT_THRESHOLD1;
 240                intr = THERM_INT_THRESHOLD1_ENABLE;
 241        } else {
 242                mask = THERM_MASK_THRESHOLD0;
 243                shift = THERM_SHIFT_THRESHOLD0;
 244                intr = THERM_INT_THRESHOLD0_ENABLE;
 245        }
 246        l &= ~mask;
 247        /*
 248        * When users space sets a trip temperature == 0, which is indication
 249        * that, it is no longer interested in receiving notifications.
 250        */
 251        if (!temp)
 252                l &= ~intr;
 253        else {
 254                l |= (phy_dev_entry->tj_max - temp)/1000 << shift;
 255                l |= intr;
 256        }
 257
 258        return wrmsr_on_cpu(phy_dev_entry->first_cpu,
 259                                        MSR_IA32_PACKAGE_THERM_INTERRUPT,
 260                                        l, h);
 261}
 262
 263static int sys_get_trip_type(struct thermal_zone_device *thermal,
 264                int trip, enum thermal_trip_type *type)
 265{
 266
 267        *type = THERMAL_TRIP_PASSIVE;
 268
 269        return 0;
 270}
 271
 272/* Thermal zone callback registry */
 273static struct thermal_zone_device_ops tzone_ops = {
 274        .get_temp = sys_get_curr_temp,
 275        .get_trip_temp = sys_get_trip_temp,
 276        .get_trip_type = sys_get_trip_type,
 277        .set_trip_temp = sys_set_trip_temp,
 278};
 279
 280static bool pkg_temp_thermal_platform_thermal_rate_control(void)
 281{
 282        return true;
 283}
 284
 285/* Enable threshold interrupt on local package/cpu */
 286static inline void enable_pkg_thres_interrupt(void)
 287{
 288        u32 l, h;
 289        u8 thres_0, thres_1;
 290
 291        rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
 292        /* only enable/disable if it had valid threshold value */
 293        thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0;
 294        thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1;
 295        if (thres_0)
 296                l |= THERM_INT_THRESHOLD0_ENABLE;
 297        if (thres_1)
 298                l |= THERM_INT_THRESHOLD1_ENABLE;
 299        wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
 300}
 301
 302/* Disable threshold interrupt on local package/cpu */
 303static inline void disable_pkg_thres_interrupt(void)
 304{
 305        u32 l, h;
 306        rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
 307        wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
 308                        l & (~THERM_INT_THRESHOLD0_ENABLE) &
 309                                (~THERM_INT_THRESHOLD1_ENABLE), h);
 310}
 311
 312static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
 313{
 314        __u64 msr_val;
 315        int cpu = smp_processor_id();
 316        int phy_id = topology_physical_package_id(cpu);
 317        struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
 318        bool notify = false;
 319        unsigned long flags;
 320
 321        if (!phdev)
 322                return;
 323
 324        spin_lock_irqsave(&pkg_work_lock, flags);
 325        ++pkg_work_cnt;
 326        if (unlikely(phy_id > max_phy_id)) {
 327                spin_unlock_irqrestore(&pkg_work_lock, flags);
 328                return;
 329        }
 330        pkg_work_scheduled[phy_id] = 0;
 331        spin_unlock_irqrestore(&pkg_work_lock, flags);
 332
 333        enable_pkg_thres_interrupt();
 334        rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
 335        if (msr_val & THERM_LOG_THRESHOLD0) {
 336                wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
 337                                msr_val & ~THERM_LOG_THRESHOLD0);
 338                notify = true;
 339        }
 340        if (msr_val & THERM_LOG_THRESHOLD1) {
 341                wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
 342                                msr_val & ~THERM_LOG_THRESHOLD1);
 343                notify = true;
 344        }
 345        if (notify) {
 346                pr_debug("thermal_zone_device_update\n");
 347                thermal_zone_device_update(phdev->tzone);
 348        }
 349}
 350
 351static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
 352{
 353        unsigned long flags;
 354        int cpu = smp_processor_id();
 355        int phy_id = topology_physical_package_id(cpu);
 356
 357        /*
 358        * When a package is in interrupted state, all CPU's in that package
 359        * are in the same interrupt state. So scheduling on any one CPU in
 360        * the package is enough and simply return for others.
 361        */
 362        spin_lock_irqsave(&pkg_work_lock, flags);
 363        ++pkg_interrupt_cnt;
 364        if (unlikely(phy_id > max_phy_id) || unlikely(!pkg_work_scheduled) ||
 365                        pkg_work_scheduled[phy_id]) {
 366                disable_pkg_thres_interrupt();
 367                spin_unlock_irqrestore(&pkg_work_lock, flags);
 368                return -EINVAL;
 369        }
 370        pkg_work_scheduled[phy_id] = 1;
 371        spin_unlock_irqrestore(&pkg_work_lock, flags);
 372
 373        disable_pkg_thres_interrupt();
 374        schedule_delayed_work_on(cpu,
 375                                &per_cpu(pkg_temp_thermal_threshold_work, cpu),
 376                                msecs_to_jiffies(notify_delay_ms));
 377        return 0;
 378}
 379
 380static int find_siblings_cpu(int cpu)
 381{
 382        int i;
 383        int id = topology_physical_package_id(cpu);
 384
 385        for_each_online_cpu(i)
 386                if (i != cpu && topology_physical_package_id(i) == id)
 387                        return i;
 388
 389        return 0;
 390}
 391
 392static int pkg_temp_thermal_device_add(unsigned int cpu)
 393{
 394        int err;
 395        u32 tj_max;
 396        struct phy_dev_entry *phy_dev_entry;
 397        char buffer[30];
 398        int thres_count;
 399        u32 eax, ebx, ecx, edx;
 400        u8 *temp;
 401        unsigned long flags;
 402
 403        cpuid(6, &eax, &ebx, &ecx, &edx);
 404        thres_count = ebx & 0x07;
 405        if (!thres_count)
 406                return -ENODEV;
 407
 408        if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS)
 409                return -ENODEV;
 410
 411        thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
 412
 413        err = get_tj_max(cpu, &tj_max);
 414        if (err)
 415                goto err_ret;
 416
 417        mutex_lock(&phy_dev_list_mutex);
 418
 419        phy_dev_entry = kzalloc(sizeof(*phy_dev_entry), GFP_KERNEL);
 420        if (!phy_dev_entry) {
 421                err = -ENOMEM;
 422                goto err_ret_unlock;
 423        }
 424
 425        spin_lock_irqsave(&pkg_work_lock, flags);
 426        if (topology_physical_package_id(cpu) > max_phy_id)
 427                max_phy_id = topology_physical_package_id(cpu);
 428        temp = krealloc(pkg_work_scheduled,
 429                        (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
 430        if (!temp) {
 431                spin_unlock_irqrestore(&pkg_work_lock, flags);
 432                err = -ENOMEM;
 433                goto err_ret_free;
 434        }
 435        pkg_work_scheduled = temp;
 436        pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
 437        spin_unlock_irqrestore(&pkg_work_lock, flags);
 438
 439        phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
 440        phy_dev_entry->first_cpu = cpu;
 441        phy_dev_entry->tj_max = tj_max;
 442        phy_dev_entry->ref_cnt = 1;
 443        snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
 444                                        phy_dev_entry->phys_proc_id);
 445        phy_dev_entry->tzone = thermal_zone_device_register(buffer,
 446                        thres_count,
 447                        (thres_count == MAX_NUMBER_OF_TRIPS) ?
 448                                0x03 : 0x01,
 449                        phy_dev_entry, &tzone_ops, NULL, 0, 0);
 450        if (IS_ERR(phy_dev_entry->tzone)) {
 451                err = PTR_ERR(phy_dev_entry->tzone);
 452                goto err_ret_free;
 453        }
 454        /* Store MSR value for package thermal interrupt, to restore at exit */
 455        rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
 456                                &phy_dev_entry->start_pkg_therm_low,
 457                                &phy_dev_entry->start_pkg_therm_high);
 458
 459        list_add_tail(&phy_dev_entry->list, &phy_dev_list);
 460        pr_debug("pkg_temp_thermal_device_add :phy_id %d cpu %d\n",
 461                        phy_dev_entry->phys_proc_id, cpu);
 462
 463        mutex_unlock(&phy_dev_list_mutex);
 464
 465        return 0;
 466
 467err_ret_free:
 468        kfree(phy_dev_entry);
 469err_ret_unlock:
 470        mutex_unlock(&phy_dev_list_mutex);
 471
 472err_ret:
 473        return err;
 474}
 475
 476static int pkg_temp_thermal_device_remove(unsigned int cpu)
 477{
 478        struct phy_dev_entry *n;
 479        u16 phys_proc_id = topology_physical_package_id(cpu);
 480        struct phy_dev_entry *phdev =
 481                        pkg_temp_thermal_get_phy_entry(cpu);
 482
 483        if (!phdev)
 484                return -ENODEV;
 485
 486        mutex_lock(&phy_dev_list_mutex);
 487        /* If we are loosing the first cpu for this package, we need change */
 488        if (phdev->first_cpu == cpu) {
 489                phdev->first_cpu = find_siblings_cpu(cpu);
 490                pr_debug("thermal_device_remove: first cpu switched %d\n",
 491                                        phdev->first_cpu);
 492        }
 493        /*
 494        * It is possible that no siblings left as this was the last cpu
 495        * going offline. We don't need to worry about this assignment
 496        * as the phydev entry will be removed in this case and
 497        * thermal zone is removed.
 498        */
 499        --phdev->ref_cnt;
 500        pr_debug("thermal_device_remove: pkg: %d cpu %d ref_cnt %d\n",
 501                                        phys_proc_id, cpu, phdev->ref_cnt);
 502        if (!phdev->ref_cnt)
 503                list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
 504                        if (phdev->phys_proc_id == phys_proc_id) {
 505                                thermal_zone_device_unregister(phdev->tzone);
 506                                list_del(&phdev->list);
 507                                kfree(phdev);
 508                                break;
 509                        }
 510                }
 511        mutex_unlock(&phy_dev_list_mutex);
 512
 513        return 0;
 514}
 515
 516static int get_core_online(unsigned int cpu)
 517{
 518        struct cpuinfo_x86 *c = &cpu_data(cpu);
 519        struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
 520
 521        /* Check if there is already an instance for this package */
 522        if (!phdev) {
 523                if (!cpu_has(c, X86_FEATURE_DTHERM) ||
 524                                        !cpu_has(c, X86_FEATURE_PTS))
 525                        return -ENODEV;
 526                if (pkg_temp_thermal_device_add(cpu))
 527                        return -ENODEV;
 528        } else {
 529                mutex_lock(&phy_dev_list_mutex);
 530                ++phdev->ref_cnt;
 531                pr_debug("get_core_online: cpu %d ref_cnt %d\n",
 532                                                cpu, phdev->ref_cnt);
 533                mutex_unlock(&phy_dev_list_mutex);
 534        }
 535        INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu),
 536                        pkg_temp_thermal_threshold_work_fn);
 537
 538        pr_debug("get_core_online: cpu %d successful\n", cpu);
 539
 540        return 0;
 541}
 542
 543static void put_core_offline(unsigned int cpu)
 544{
 545        if (!pkg_temp_thermal_device_remove(cpu))
 546                cancel_delayed_work_sync(
 547                        &per_cpu(pkg_temp_thermal_threshold_work, cpu));
 548
 549        pr_debug("put_core_offline: cpu %d\n", cpu);
 550}
 551
 552static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
 553                                 unsigned long action, void *hcpu)
 554{
 555        unsigned int cpu = (unsigned long) hcpu;
 556
 557        switch (action) {
 558        case CPU_ONLINE:
 559        case CPU_DOWN_FAILED:
 560                get_core_online(cpu);
 561                break;
 562        case CPU_DOWN_PREPARE:
 563                put_core_offline(cpu);
 564                break;
 565        }
 566        return NOTIFY_OK;
 567}
 568
 569static struct notifier_block pkg_temp_thermal_notifier __refdata = {
 570        .notifier_call = pkg_temp_thermal_cpu_callback,
 571};
 572
 573static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
 574        { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
 575        {}
 576};
 577MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
 578
 579static int __init pkg_temp_thermal_init(void)
 580{
 581        int i;
 582
 583        if (!x86_match_cpu(pkg_temp_thermal_ids))
 584                return -ENODEV;
 585
 586        spin_lock_init(&pkg_work_lock);
 587        platform_thermal_package_notify =
 588                        pkg_temp_thermal_platform_thermal_notify;
 589        platform_thermal_package_rate_control =
 590                        pkg_temp_thermal_platform_thermal_rate_control;
 591
 592        get_online_cpus();
 593        for_each_online_cpu(i)
 594                if (get_core_online(i))
 595                        goto err_ret;
 596        register_hotcpu_notifier(&pkg_temp_thermal_notifier);
 597        put_online_cpus();
 598
 599        pkg_temp_debugfs_init(); /* Don't care if fails */
 600
 601        return 0;
 602
 603err_ret:
 604        for_each_online_cpu(i)
 605                put_core_offline(i);
 606        put_online_cpus();
 607        kfree(pkg_work_scheduled);
 608        platform_thermal_package_notify = NULL;
 609        platform_thermal_package_rate_control = NULL;
 610
 611        return -ENODEV;
 612}
 613
 614static void __exit pkg_temp_thermal_exit(void)
 615{
 616        struct phy_dev_entry *phdev, *n;
 617        int i;
 618
 619        get_online_cpus();
 620        unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
 621        mutex_lock(&phy_dev_list_mutex);
 622        list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
 623                /* Retore old MSR value for package thermal interrupt */
 624                wrmsr_on_cpu(phdev->first_cpu,
 625                        MSR_IA32_PACKAGE_THERM_INTERRUPT,
 626                        phdev->start_pkg_therm_low,
 627                        phdev->start_pkg_therm_high);
 628                thermal_zone_device_unregister(phdev->tzone);
 629                list_del(&phdev->list);
 630                kfree(phdev);
 631        }
 632        mutex_unlock(&phy_dev_list_mutex);
 633        platform_thermal_package_notify = NULL;
 634        platform_thermal_package_rate_control = NULL;
 635        for_each_online_cpu(i)
 636                cancel_delayed_work_sync(
 637                        &per_cpu(pkg_temp_thermal_threshold_work, i));
 638        put_online_cpus();
 639
 640        kfree(pkg_work_scheduled);
 641
 642        debugfs_remove_recursive(debugfs);
 643}
 644
 645module_init(pkg_temp_thermal_init)
 646module_exit(pkg_temp_thermal_exit)
 647
 648MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver");
 649MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
 650MODULE_LICENSE("GPL v2");
 651