1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#include <linux/debugfs.h>
60#include <linux/delay.h>
61#include <linux/interrupt.h>
62#include <linux/kernel.h>
63#include <linux/kthread.h>
64#include <linux/module.h>
65#include <linux/pci.h>
66#include <linux/sched.h>
67#include <linux/sched/loadavg.h>
68#include <linux/seq_file.h>
69#include <linux/string.h>
70#include <linux/tick.h>
71#include <linux/timer.h>
72#include <linux/dmi.h>
73#include <drm/i915_drm.h>
74#include <asm/msr.h>
75#include <asm/processor.h>
76#include "intel_ips.h"
77
78#include <linux/io-64-nonatomic-lo-hi.h>
79
80#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
81
82
83
84
85#define PLATFORM_INFO 0xce
86#define PLATFORM_TDP (1<<29)
87#define PLATFORM_RATIO (1<<28)
88
89#define IA32_MISC_ENABLE 0x1a0
90#define IA32_MISC_TURBO_EN (1ULL<<38)
91
92#define TURBO_POWER_CURRENT_LIMIT 0x1ac
93#define TURBO_TDC_OVR_EN (1UL<<31)
94#define TURBO_TDC_MASK (0x000000007fff0000UL)
95#define TURBO_TDC_SHIFT (16)
96#define TURBO_TDP_OVR_EN (1UL<<15)
97#define TURBO_TDP_MASK (0x0000000000003fffUL)
98
99
100
101
102#define IA32_PERF_CTL 0x199
103#define IA32_PERF_TURBO_DIS (1ULL<<32)
104
105
106
107
108#define THM_CFG_TBAR 0x10
109#define THM_CFG_TBAR_HI 0x14
110
111#define THM_TSIU 0x00
112#define THM_TSE 0x01
113#define TSE_EN 0xb8
114#define THM_TSS 0x02
115#define THM_TSTR 0x03
116#define THM_TSTTP 0x04
117#define THM_TSCO 0x08
118#define THM_TSES 0x0c
119#define THM_TSGPEN 0x0d
120#define TSGPEN_HOT_LOHI (1<<1)
121#define TSGPEN_CRIT_LOHI (1<<2)
122#define THM_TSPC 0x0e
123#define THM_PPEC 0x10
124#define THM_CTA 0x12
125#define THM_PTA 0x14
126#define PTA_SLOPE_MASK (0xff00)
127#define PTA_SLOPE_SHIFT 8
128#define PTA_OFFSET_MASK (0x00ff)
129#define THM_MGTA 0x16
130#define MGTA_SLOPE_MASK (0xff00)
131#define MGTA_SLOPE_SHIFT 8
132#define MGTA_OFFSET_MASK (0x00ff)
133#define THM_TRC 0x1a
134#define TRC_CORE2_EN (1<<15)
135#define TRC_THM_EN (1<<12)
136#define TRC_C6_WAR (1<<8)
137#define TRC_CORE1_EN (1<<7)
138#define TRC_CORE_PWR (1<<6)
139#define TRC_PCH_EN (1<<5)
140#define TRC_MCH_EN (1<<4)
141#define TRC_DIMM4 (1<<3)
142#define TRC_DIMM3 (1<<2)
143#define TRC_DIMM2 (1<<1)
144#define TRC_DIMM1 (1<<0)
145#define THM_TES 0x20
146#define THM_TEN 0x21
147#define TEN_UPDATE_EN 1
148#define THM_PSC 0x24
149#define PSC_NTG (1<<0)
150#define PSC_NTPC (1<<1)
151#define PSC_PP_DEF (0<<2)
152#define PSP_PP_PC (1<<2)
153#define PSP_PP_BAL (2<<2)
154#define PSP_PP_GFX (3<<2)
155#define PSP_PBRT (1<<4)
156#define THM_CTV1 0x30
157#define CTV_TEMP_ERROR (1<<15)
158#define CTV_TEMP_MASK 0x3f
159#define CTV_
160#define THM_CTV2 0x32
161#define THM_CEC 0x34
162#define THM_AE 0x3f
163#define THM_HTS 0x50
164#define HTS_PCPL_MASK (0x7fe00000)
165#define HTS_PCPL_SHIFT 21
166#define HTS_GPL_MASK (0x001ff000)
167#define HTS_GPL_SHIFT 12
168#define HTS_PP_MASK (0x00000c00)
169#define HTS_PP_SHIFT 10
170#define HTS_PP_DEF 0
171#define HTS_PP_PROC 1
172#define HTS_PP_BAL 2
173#define HTS_PP_GFX 3
174#define HTS_PCTD_DIS (1<<9)
175#define HTS_GTD_DIS (1<<8)
176#define HTS_PTL_MASK (0x000000fe)
177#define HTS_PTL_SHIFT 1
178#define HTS_NVV (1<<0)
179#define THM_HTSHI 0x54
180#define HTS2_PPL_MASK (0x03ff)
181#define HTS2_PRST_MASK (0x3c00)
182#define HTS2_PRST_SHIFT 10
183#define HTS2_PRST_UNLOADED 0
184#define HTS2_PRST_RUNNING 1
185#define HTS2_PRST_TDISOP 2
186#define HTS2_PRST_TDISHT 3
187#define HTS2_PRST_TDISUSR 4
188#define HTS2_PRST_TDISPLAT 5
189#define HTS2_PRST_TDISPM 6
190#define HTS2_PRST_TDISERR 7
191#define THM_PTL 0x56
192#define THM_MGTV 0x58
193#define TV_MASK 0x000000000000ff00
194#define TV_SHIFT 8
195#define THM_PTV 0x60
196#define PTV_MASK 0x00ff
197#define THM_MMGPC 0x64
198#define THM_MPPC 0x66
199#define THM_MPCPC 0x68
200#define THM_TSPIEN 0x82
201#define TSPIEN_AUX_LOHI (1<<0)
202#define TSPIEN_HOT_LOHI (1<<1)
203#define TSPIEN_CRIT_LOHI (1<<2)
204#define TSPIEN_AUX2_LOHI (1<<3)
205#define THM_TSLOCK 0x83
206#define THM_ATR 0x84
207#define THM_TOF 0x87
208#define THM_STS 0x98
209#define STS_PCPL_MASK (0x7fe00000)
210#define STS_PCPL_SHIFT 21
211#define STS_GPL_MASK (0x001ff000)
212#define STS_GPL_SHIFT 12
213#define STS_PP_MASK (0x00000c00)
214#define STS_PP_SHIFT 10
215#define STS_PP_DEF 0
216#define STS_PP_PROC 1
217#define STS_PP_BAL 2
218#define STS_PP_GFX 3
219#define STS_PCTD_DIS (1<<9)
220#define STS_GTD_DIS (1<<8)
221#define STS_PTL_MASK (0x000000fe)
222#define STS_PTL_SHIFT 1
223#define STS_NVV (1<<0)
224#define THM_SEC 0x9c
225#define SEC_ACK (1<<0)
226#define THM_TC3 0xa4
227#define THM_TC1 0xa8
228#define STS_PPL_MASK (0x0003ff00)
229#define STS_PPL_SHIFT 16
230#define THM_TC2 0xac
231#define THM_DTV 0xb0
232#define THM_ITV 0xd8
233#define ITV_ME_SEQNO_MASK 0x00ff0000
234#define ITV_ME_SEQNO_SHIFT (16)
235#define ITV_MCH_TEMP_MASK 0x0000ff00
236#define ITV_MCH_TEMP_SHIFT (8)
237#define ITV_PCH_TEMP_MASK 0x000000ff
238
239#define thm_readb(off) readb(ips->regmap + (off))
240#define thm_readw(off) readw(ips->regmap + (off))
241#define thm_readl(off) readl(ips->regmap + (off))
242#define thm_readq(off) readq(ips->regmap + (off))
243
244#define thm_writeb(off, val) writeb((val), ips->regmap + (off))
245#define thm_writew(off, val) writew((val), ips->regmap + (off))
246#define thm_writel(off, val) writel((val), ips->regmap + (off))
247
248static const int IPS_ADJUST_PERIOD = 5000;
249static bool late_i915_load = false;
250
251
252static const int IPS_SAMPLE_PERIOD = 200;
253static const int IPS_SAMPLE_WINDOW = 5000;
254#define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD)
255
256
257struct ips_mcp_limits {
258 int mcp_power_limit;
259 int core_power_limit;
260 int mch_power_limit;
261 int core_temp_limit;
262 int mch_temp_limit;
263};
264
265
266
267static struct ips_mcp_limits ips_sv_limits = {
268 .mcp_power_limit = 35000,
269 .core_power_limit = 29000,
270 .mch_power_limit = 20000,
271 .core_temp_limit = 95,
272 .mch_temp_limit = 90
273};
274
275static struct ips_mcp_limits ips_lv_limits = {
276 .mcp_power_limit = 25000,
277 .core_power_limit = 21000,
278 .mch_power_limit = 13000,
279 .core_temp_limit = 95,
280 .mch_temp_limit = 90
281};
282
283static struct ips_mcp_limits ips_ulv_limits = {
284 .mcp_power_limit = 18000,
285 .core_power_limit = 14000,
286 .mch_power_limit = 11000,
287 .core_temp_limit = 95,
288 .mch_temp_limit = 90
289};
290
291struct ips_driver {
292 struct device *dev;
293 void __iomem *regmap;
294 int irq;
295
296 struct task_struct *monitor;
297 struct task_struct *adjust;
298 struct dentry *debug_root;
299 struct timer_list timer;
300
301
302 u16 ctv1_avg_temp;
303 u16 ctv2_avg_temp;
304
305 u16 mch_avg_temp;
306
307 u16 mcp_avg_temp;
308
309 u32 cpu_avg_power;
310 u32 mch_avg_power;
311
312
313 u16 cta_val;
314 u16 pta_val;
315 u16 mgta_val;
316
317
318 spinlock_t turbo_status_lock;
319 u16 mcp_temp_limit;
320 u16 mcp_power_limit;
321 u16 core_power_limit;
322 u16 mch_power_limit;
323 bool cpu_turbo_enabled;
324 bool __cpu_turbo_on;
325 bool gpu_turbo_enabled;
326 bool __gpu_turbo_on;
327 bool gpu_preferred;
328 bool poll_turbo_status;
329 bool second_cpu;
330 bool turbo_toggle_allowed;
331 struct ips_mcp_limits *limits;
332
333
334 unsigned long (*read_mch_val)(void);
335 bool (*gpu_raise)(void);
336 bool (*gpu_lower)(void);
337 bool (*gpu_busy)(void);
338 bool (*gpu_turbo_disable)(void);
339
340
341 u64 orig_turbo_limit;
342 u64 orig_turbo_ratios;
343};
344
345static bool
346ips_gpu_turbo_enabled(struct ips_driver *ips);
347
348
349
350
351
352
353
354
355
356
357static bool ips_cpu_busy(struct ips_driver *ips)
358{
359 if ((avenrun[0] >> FSHIFT) > 1)
360 return true;
361
362 return false;
363}
364
365
366
367
368
369
370
371
372
373
374
375static void ips_cpu_raise(struct ips_driver *ips)
376{
377 u64 turbo_override;
378 u16 cur_tdp_limit, new_tdp_limit;
379
380 if (!ips->cpu_turbo_enabled)
381 return;
382
383 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
384
385 cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
386 new_tdp_limit = cur_tdp_limit + 8;
387
388
389 if (((new_tdp_limit * 10) / 8) > ips->core_power_limit)
390 new_tdp_limit = cur_tdp_limit;
391
392 thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
393
394 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
395 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
396
397 turbo_override &= ~TURBO_TDP_MASK;
398 turbo_override |= new_tdp_limit;
399
400 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
401}
402
403
404
405
406
407
408
409
410
411
412
413static void ips_cpu_lower(struct ips_driver *ips)
414{
415 u64 turbo_override;
416 u16 cur_limit, new_limit;
417
418 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
419
420 cur_limit = turbo_override & TURBO_TDP_MASK;
421 new_limit = cur_limit - 8;
422
423
424 if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
425 new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
426
427 thm_writew(THM_MPCPC, (new_limit * 10) / 8);
428
429 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
430 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
431
432 turbo_override &= ~TURBO_TDP_MASK;
433 turbo_override |= new_limit;
434
435 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
436}
437
438
439
440
441
442
443
444
445
446static void do_enable_cpu_turbo(void *data)
447{
448 u64 perf_ctl;
449
450 rdmsrl(IA32_PERF_CTL, perf_ctl);
451 if (perf_ctl & IA32_PERF_TURBO_DIS) {
452 perf_ctl &= ~IA32_PERF_TURBO_DIS;
453 wrmsrl(IA32_PERF_CTL, perf_ctl);
454 }
455}
456
457
458
459
460
461
462
463
464static void ips_enable_cpu_turbo(struct ips_driver *ips)
465{
466
467 if (ips->__cpu_turbo_on)
468 return;
469
470 if (ips->turbo_toggle_allowed)
471 on_each_cpu(do_enable_cpu_turbo, ips, 1);
472
473 ips->__cpu_turbo_on = true;
474}
475
476
477
478
479
480
481
482
483
484static void do_disable_cpu_turbo(void *data)
485{
486 u64 perf_ctl;
487
488 rdmsrl(IA32_PERF_CTL, perf_ctl);
489 if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
490 perf_ctl |= IA32_PERF_TURBO_DIS;
491 wrmsrl(IA32_PERF_CTL, perf_ctl);
492 }
493}
494
495
496
497
498
499
500
501
502static void ips_disable_cpu_turbo(struct ips_driver *ips)
503{
504
505 if (!ips->__cpu_turbo_on)
506 return;
507
508 if (ips->turbo_toggle_allowed)
509 on_each_cpu(do_disable_cpu_turbo, ips, 1);
510
511 ips->__cpu_turbo_on = false;
512}
513
514
515
516
517
518
519
520
521
522
523
524static bool ips_gpu_busy(struct ips_driver *ips)
525{
526 if (!ips_gpu_turbo_enabled(ips))
527 return false;
528
529 return ips->gpu_busy();
530}
531
532
533
534
535
536
537
538
539static void ips_gpu_raise(struct ips_driver *ips)
540{
541 if (!ips_gpu_turbo_enabled(ips))
542 return;
543
544 if (!ips->gpu_raise())
545 ips->gpu_turbo_enabled = false;
546
547 return;
548}
549
550
551
552
553
554
555
556static void ips_gpu_lower(struct ips_driver *ips)
557{
558 if (!ips_gpu_turbo_enabled(ips))
559 return;
560
561 if (!ips->gpu_lower())
562 ips->gpu_turbo_enabled = false;
563
564 return;
565}
566
567
568
569
570
571
572
573
574static void ips_enable_gpu_turbo(struct ips_driver *ips)
575{
576 if (ips->__gpu_turbo_on)
577 return;
578 ips->__gpu_turbo_on = true;
579}
580
581
582
583
584
585
586
587static void ips_disable_gpu_turbo(struct ips_driver *ips)
588{
589
590 if (!ips->__gpu_turbo_on)
591 return;
592
593 if (!ips->gpu_turbo_disable())
594 dev_err(ips->dev, "failed to disable graphics turbo\n");
595 else
596 ips->__gpu_turbo_on = false;
597}
598
599
600
601
602
603
604
605static bool mcp_exceeded(struct ips_driver *ips)
606{
607 unsigned long flags;
608 bool ret = false;
609 u32 temp_limit;
610 u32 avg_power;
611
612 spin_lock_irqsave(&ips->turbo_status_lock, flags);
613
614 temp_limit = ips->mcp_temp_limit * 100;
615 if (ips->mcp_avg_temp > temp_limit)
616 ret = true;
617
618 avg_power = ips->cpu_avg_power + ips->mch_avg_power;
619 if (avg_power > ips->mcp_power_limit)
620 ret = true;
621
622 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
623
624 return ret;
625}
626
627
628
629
630
631
632
633
634static bool cpu_exceeded(struct ips_driver *ips, int cpu)
635{
636 unsigned long flags;
637 int avg;
638 bool ret = false;
639
640 spin_lock_irqsave(&ips->turbo_status_lock, flags);
641 avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
642 if (avg > (ips->limits->core_temp_limit * 100))
643 ret = true;
644 if (ips->cpu_avg_power > ips->core_power_limit * 100)
645 ret = true;
646 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
647
648 if (ret)
649 dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
650
651 return ret;
652}
653
654
655
656
657
658
659
660static bool mch_exceeded(struct ips_driver *ips)
661{
662 unsigned long flags;
663 bool ret = false;
664
665 spin_lock_irqsave(&ips->turbo_status_lock, flags);
666 if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100))
667 ret = true;
668 if (ips->mch_avg_power > ips->mch_power_limit)
669 ret = true;
670 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
671
672 return ret;
673}
674
675
676
677
678
679
680
681
682
683static void verify_limits(struct ips_driver *ips)
684{
685 if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
686 ips->mcp_power_limit > 35000)
687 ips->mcp_power_limit = ips->limits->mcp_power_limit;
688
689 if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
690 ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
691 ips->mcp_temp_limit > 150)
692 ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
693 ips->limits->mch_temp_limit);
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709static void update_turbo_limits(struct ips_driver *ips)
710{
711 u32 hts = thm_readl(THM_HTS);
712
713 ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
714
715
716
717
718 ips->cpu_turbo_enabled = false;
719
720 if (ips->gpu_busy)
721 ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
722
723 ips->core_power_limit = thm_readw(THM_MPCPC);
724 ips->mch_power_limit = thm_readw(THM_MMGPC);
725 ips->mcp_temp_limit = thm_readw(THM_PTL);
726 ips->mcp_power_limit = thm_readw(THM_MPPC);
727
728 verify_limits(ips);
729
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static int ips_adjust(void *data)
764{
765 struct ips_driver *ips = data;
766 unsigned long flags;
767
768 dev_dbg(ips->dev, "starting ips-adjust thread\n");
769
770
771
772
773
774 do {
775 bool cpu_busy = ips_cpu_busy(ips);
776 bool gpu_busy = ips_gpu_busy(ips);
777
778 spin_lock_irqsave(&ips->turbo_status_lock, flags);
779 if (ips->poll_turbo_status)
780 update_turbo_limits(ips);
781 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
782
783
784 if (ips->cpu_turbo_enabled)
785 ips_enable_cpu_turbo(ips);
786 else
787 ips_disable_cpu_turbo(ips);
788
789 if (ips->gpu_turbo_enabled)
790 ips_enable_gpu_turbo(ips);
791 else
792 ips_disable_gpu_turbo(ips);
793
794
795 if (mcp_exceeded(ips)) {
796 ips_cpu_lower(ips);
797 ips_gpu_lower(ips);
798 goto sleep;
799 }
800
801 if (!cpu_exceeded(ips, 0) && cpu_busy)
802 ips_cpu_raise(ips);
803 else
804 ips_cpu_lower(ips);
805
806 if (!mch_exceeded(ips) && gpu_busy)
807 ips_gpu_raise(ips);
808 else
809 ips_gpu_lower(ips);
810
811sleep:
812 schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
813 } while (!kthread_should_stop());
814
815 dev_dbg(ips->dev, "ips-adjust thread stopped\n");
816
817 return 0;
818}
819
820
821
822
823
824
825static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
826{
827 u64 total = 0;
828 int i;
829 u16 avg;
830
831 for (i = 0; i < IPS_SAMPLE_COUNT; i++)
832 total += (u64)(array[i] * 100);
833
834 do_div(total, IPS_SAMPLE_COUNT);
835
836 avg = (u16)total;
837
838 return avg;
839}
840
841static u16 read_mgtv(struct ips_driver *ips)
842{
843 u16 ret;
844 u64 slope, offset;
845 u64 val;
846
847 val = thm_readq(THM_MGTV);
848 val = (val & TV_MASK) >> TV_SHIFT;
849
850 slope = offset = thm_readw(THM_MGTA);
851 slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT;
852 offset = offset & MGTA_OFFSET_MASK;
853
854 ret = ((val * slope + 0x40) >> 7) + offset;
855
856 return 0;
857}
858
859static u16 read_ptv(struct ips_driver *ips)
860{
861 u16 val;
862
863 val = thm_readw(THM_PTV) & PTV_MASK;
864
865 return val;
866}
867
868static u16 read_ctv(struct ips_driver *ips, int cpu)
869{
870 int reg = cpu ? THM_CTV2 : THM_CTV1;
871 u16 val;
872
873 val = thm_readw(reg);
874 if (!(val & CTV_TEMP_ERROR))
875 val = (val) >> 6;
876 else
877 val = 0;
878
879 return val;
880}
881
882static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
883{
884 u32 val;
885 u32 ret;
886
887
888
889
890
891 val = thm_readl(THM_CEC);
892
893
894 ret = (((val - *last) * 1000) / period);
895 ret = (ret * 1000) / 65535;
896 *last = val;
897
898 return 0;
899}
900
901static const u16 temp_decay_factor = 2;
902static u16 update_average_temp(u16 avg, u16 val)
903{
904 u16 ret;
905
906
907 ret = (val * 100 / temp_decay_factor) +
908 (((temp_decay_factor - 1) * avg) / temp_decay_factor);
909 return ret;
910}
911
912static const u16 power_decay_factor = 2;
913static u16 update_average_power(u32 avg, u32 val)
914{
915 u32 ret;
916
917 ret = (val / power_decay_factor) +
918 (((power_decay_factor - 1) * avg) / power_decay_factor);
919
920 return ret;
921}
922
923static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
924{
925 u64 total = 0;
926 u32 avg;
927 int i;
928
929 for (i = 0; i < IPS_SAMPLE_COUNT; i++)
930 total += array[i];
931
932 do_div(total, IPS_SAMPLE_COUNT);
933 avg = (u32)total;
934
935 return avg;
936}
937
938static void monitor_timeout(struct timer_list *t)
939{
940 struct ips_driver *ips = from_timer(ips, t, timer);
941 wake_up_process(ips->monitor);
942}
943
944
945
946
947
948
949
950
951
952
953
954
955static int ips_monitor(void *data)
956{
957 struct ips_driver *ips = data;
958 unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
959 int i;
960 u32 *cpu_samples, *mchp_samples, old_cpu_power;
961 u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
962 u8 cur_seqno, last_seqno;
963
964 mcp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
965 ctv1_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
966 ctv2_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
967 mch_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
968 cpu_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
969 mchp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
970 if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
971 !cpu_samples || !mchp_samples) {
972 dev_err(ips->dev,
973 "failed to allocate sample array, ips disabled\n");
974 kfree(mcp_samples);
975 kfree(ctv1_samples);
976 kfree(ctv2_samples);
977 kfree(mch_samples);
978 kfree(cpu_samples);
979 kfree(mchp_samples);
980 return -ENOMEM;
981 }
982
983 last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
984 ITV_ME_SEQNO_SHIFT;
985 seqno_timestamp = get_jiffies_64();
986
987 old_cpu_power = thm_readl(THM_CEC);
988 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
989
990
991 for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
992 u32 mchp, cpu_power;
993 u16 val;
994
995 mcp_samples[i] = read_ptv(ips);
996
997 val = read_ctv(ips, 0);
998 ctv1_samples[i] = val;
999
1000 val = read_ctv(ips, 1);
1001 ctv2_samples[i] = val;
1002
1003 val = read_mgtv(ips);
1004 mch_samples[i] = val;
1005
1006 cpu_power = get_cpu_power(ips, &old_cpu_power,
1007 IPS_SAMPLE_PERIOD);
1008 cpu_samples[i] = cpu_power;
1009
1010 if (ips->read_mch_val) {
1011 mchp = ips->read_mch_val();
1012 mchp_samples[i] = mchp;
1013 }
1014
1015 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
1016 if (kthread_should_stop())
1017 break;
1018 }
1019
1020 ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples);
1021 ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples);
1022 ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples);
1023 ips->mch_avg_temp = calc_avg_temp(ips, mch_samples);
1024 ips->cpu_avg_power = calc_avg_power(ips, cpu_samples);
1025 ips->mch_avg_power = calc_avg_power(ips, mchp_samples);
1026 kfree(mcp_samples);
1027 kfree(ctv1_samples);
1028 kfree(ctv2_samples);
1029 kfree(mch_samples);
1030 kfree(cpu_samples);
1031 kfree(mchp_samples);
1032
1033
1034 wake_up_process(ips->adjust);
1035
1036
1037
1038
1039
1040
1041 old_cpu_power = thm_readl(THM_CEC);
1042 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
1043 last_sample_period = IPS_SAMPLE_PERIOD;
1044
1045 timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
1046 do {
1047 u32 cpu_val, mch_val;
1048 u16 val;
1049
1050
1051 val = read_ptv(ips);
1052 ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val);
1053
1054
1055 val = read_ctv(ips, 0);
1056 ips->ctv1_avg_temp =
1057 update_average_temp(ips->ctv1_avg_temp, val);
1058
1059 cpu_val = get_cpu_power(ips, &old_cpu_power,
1060 last_sample_period);
1061 ips->cpu_avg_power =
1062 update_average_power(ips->cpu_avg_power, cpu_val);
1063
1064 if (ips->second_cpu) {
1065
1066 val = read_ctv(ips, 1);
1067 ips->ctv2_avg_temp =
1068 update_average_temp(ips->ctv2_avg_temp, val);
1069 }
1070
1071
1072 val = read_mgtv(ips);
1073 ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val);
1074
1075 if (ips->read_mch_val) {
1076 mch_val = ips->read_mch_val();
1077 ips->mch_avg_power =
1078 update_average_power(ips->mch_avg_power,
1079 mch_val);
1080 }
1081
1082
1083
1084
1085
1086
1087
1088 cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
1089 ITV_ME_SEQNO_SHIFT;
1090 if (cur_seqno == last_seqno &&
1091 time_after(jiffies, seqno_timestamp + HZ)) {
1092 dev_warn(ips->dev,
1093 "ME failed to update for more than 1s, likely hung\n");
1094 } else {
1095 seqno_timestamp = get_jiffies_64();
1096 last_seqno = cur_seqno;
1097 }
1098
1099 last_msecs = jiffies_to_msecs(jiffies);
1100 expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
1101
1102 __set_current_state(TASK_INTERRUPTIBLE);
1103 mod_timer(&ips->timer, expire);
1104 schedule();
1105
1106
1107 last_sample_period = jiffies_to_msecs(jiffies) - last_msecs;
1108 if (!last_sample_period)
1109 last_sample_period = 1;
1110 } while (!kthread_should_stop());
1111
1112 del_timer_sync(&ips->timer);
1113
1114 dev_dbg(ips->dev, "ips-monitor thread stopped\n");
1115
1116 return 0;
1117}
1118
1119#if 0
1120#define THM_DUMPW(reg) \
1121 { \
1122 u16 val = thm_readw(reg); \
1123 dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
1124 }
1125#define THM_DUMPL(reg) \
1126 { \
1127 u32 val = thm_readl(reg); \
1128 dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
1129 }
1130#define THM_DUMPQ(reg) \
1131 { \
1132 u64 val = thm_readq(reg); \
1133 dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
1134 }
1135
1136static void dump_thermal_info(struct ips_driver *ips)
1137{
1138 u16 ptl;
1139
1140 ptl = thm_readw(THM_PTL);
1141 dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
1142
1143 THM_DUMPW(THM_CTA);
1144 THM_DUMPW(THM_TRC);
1145 THM_DUMPW(THM_CTV1);
1146 THM_DUMPL(THM_STS);
1147 THM_DUMPW(THM_PTV);
1148 THM_DUMPQ(THM_MGTV);
1149}
1150#endif
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161static irqreturn_t ips_irq_handler(int irq, void *arg)
1162{
1163 struct ips_driver *ips = arg;
1164 u8 tses = thm_readb(THM_TSES);
1165 u8 tes = thm_readb(THM_TES);
1166
1167 if (!tses && !tes)
1168 return IRQ_NONE;
1169
1170 dev_info(ips->dev, "TSES: 0x%02x\n", tses);
1171 dev_info(ips->dev, "TES: 0x%02x\n", tes);
1172
1173
1174 if (tes & 1) {
1175 u32 sts, tc1;
1176
1177 sts = thm_readl(THM_STS);
1178 tc1 = thm_readl(THM_TC1);
1179
1180 if (sts & STS_NVV) {
1181 spin_lock(&ips->turbo_status_lock);
1182 ips->core_power_limit = (sts & STS_PCPL_MASK) >>
1183 STS_PCPL_SHIFT;
1184 ips->mch_power_limit = (sts & STS_GPL_MASK) >>
1185 STS_GPL_SHIFT;
1186
1187 ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
1188
1189
1190
1191
1192 ips->cpu_turbo_enabled = false;
1193 if (ips->gpu_busy)
1194 ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
1195 ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
1196 STS_PTL_SHIFT;
1197 ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
1198 STS_PPL_SHIFT;
1199 verify_limits(ips);
1200 spin_unlock(&ips->turbo_status_lock);
1201
1202 thm_writeb(THM_SEC, SEC_ACK);
1203 }
1204 thm_writeb(THM_TES, tes);
1205 }
1206
1207
1208 if (tses) {
1209 dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
1210 tses);
1211 thm_writeb(THM_TSES, tses);
1212 }
1213
1214 return IRQ_HANDLED;
1215}
1216
1217#ifndef CONFIG_DEBUG_FS
1218static void ips_debugfs_init(struct ips_driver *ips) { return; }
1219static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
1220#else
1221
1222
1223
1224struct ips_debugfs_node {
1225 struct ips_driver *ips;
1226 char *name;
1227 int (*show)(struct seq_file *m, void *data);
1228};
1229
1230static int show_cpu_temp(struct seq_file *m, void *data)
1231{
1232 struct ips_driver *ips = m->private;
1233
1234 seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100,
1235 ips->ctv1_avg_temp % 100);
1236
1237 return 0;
1238}
1239
1240static int show_cpu_power(struct seq_file *m, void *data)
1241{
1242 struct ips_driver *ips = m->private;
1243
1244 seq_printf(m, "%dmW\n", ips->cpu_avg_power);
1245
1246 return 0;
1247}
1248
1249static int show_cpu_clamp(struct seq_file *m, void *data)
1250{
1251 u64 turbo_override;
1252 int tdp, tdc;
1253
1254 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
1255
1256 tdp = (int)(turbo_override & TURBO_TDP_MASK);
1257 tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
1258
1259
1260 tdp = tdp * 10 / 8;
1261 tdc = tdc * 10 / 8;
1262
1263
1264 seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10,
1265 tdc / 10, tdc % 10);
1266
1267 return 0;
1268}
1269
1270static int show_mch_temp(struct seq_file *m, void *data)
1271{
1272 struct ips_driver *ips = m->private;
1273
1274 seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100,
1275 ips->mch_avg_temp % 100);
1276
1277 return 0;
1278}
1279
1280static int show_mch_power(struct seq_file *m, void *data)
1281{
1282 struct ips_driver *ips = m->private;
1283
1284 seq_printf(m, "%dmW\n", ips->mch_avg_power);
1285
1286 return 0;
1287}
1288
1289static struct ips_debugfs_node ips_debug_files[] = {
1290 { NULL, "cpu_temp", show_cpu_temp },
1291 { NULL, "cpu_power", show_cpu_power },
1292 { NULL, "cpu_clamp", show_cpu_clamp },
1293 { NULL, "mch_temp", show_mch_temp },
1294 { NULL, "mch_power", show_mch_power },
1295};
1296
1297static int ips_debugfs_open(struct inode *inode, struct file *file)
1298{
1299 struct ips_debugfs_node *node = inode->i_private;
1300
1301 return single_open(file, node->show, node->ips);
1302}
1303
1304static const struct file_operations ips_debugfs_ops = {
1305 .owner = THIS_MODULE,
1306 .open = ips_debugfs_open,
1307 .read = seq_read,
1308 .llseek = seq_lseek,
1309 .release = single_release,
1310};
1311
1312static void ips_debugfs_cleanup(struct ips_driver *ips)
1313{
1314 if (ips->debug_root)
1315 debugfs_remove_recursive(ips->debug_root);
1316 return;
1317}
1318
1319static void ips_debugfs_init(struct ips_driver *ips)
1320{
1321 int i;
1322
1323 ips->debug_root = debugfs_create_dir("ips", NULL);
1324 if (!ips->debug_root) {
1325 dev_err(ips->dev, "failed to create debugfs entries: %ld\n",
1326 PTR_ERR(ips->debug_root));
1327 return;
1328 }
1329
1330 for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) {
1331 struct dentry *ent;
1332 struct ips_debugfs_node *node = &ips_debug_files[i];
1333
1334 node->ips = ips;
1335 ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
1336 ips->debug_root, node,
1337 &ips_debugfs_ops);
1338 if (!ent) {
1339 dev_err(ips->dev, "failed to create debug file: %ld\n",
1340 PTR_ERR(ent));
1341 goto err_cleanup;
1342 }
1343 }
1344
1345 return;
1346
1347err_cleanup:
1348 ips_debugfs_cleanup(ips);
1349 return;
1350}
1351#endif
1352
1353
1354
1355
1356
1357
1358
1359static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1360{
1361 u64 turbo_power, misc_en;
1362 struct ips_mcp_limits *limits = NULL;
1363 u16 tdp;
1364
1365 if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
1366 dev_info(ips->dev, "Non-IPS CPU detected.\n");
1367 return NULL;
1368 }
1369
1370 rdmsrl(IA32_MISC_ENABLE, misc_en);
1371
1372
1373
1374
1375
1376 if (misc_en & IA32_MISC_TURBO_EN)
1377 ips->turbo_toggle_allowed = true;
1378 else
1379 ips->turbo_toggle_allowed = false;
1380
1381 if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
1382 limits = &ips_sv_limits;
1383 else if (strstr(boot_cpu_data.x86_model_id, "CPU L"))
1384 limits = &ips_lv_limits;
1385 else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
1386 limits = &ips_ulv_limits;
1387 else {
1388 dev_info(ips->dev, "No CPUID match found.\n");
1389 return NULL;
1390 }
1391
1392 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
1393 tdp = turbo_power & TURBO_TDP_MASK;
1394
1395
1396 if (limits->core_power_limit != (tdp / 8) * 1000) {
1397 dev_info(ips->dev,
1398 "CPU TDP doesn't match expected value (found %d, expected %d)\n",
1399 tdp / 8, limits->core_power_limit / 1000);
1400 limits->core_power_limit = (tdp / 8) * 1000;
1401 }
1402
1403 return limits;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static bool ips_get_i915_syms(struct ips_driver *ips)
1416{
1417 ips->read_mch_val = symbol_get(i915_read_mch_val);
1418 if (!ips->read_mch_val)
1419 goto out_err;
1420 ips->gpu_raise = symbol_get(i915_gpu_raise);
1421 if (!ips->gpu_raise)
1422 goto out_put_mch;
1423 ips->gpu_lower = symbol_get(i915_gpu_lower);
1424 if (!ips->gpu_lower)
1425 goto out_put_raise;
1426 ips->gpu_busy = symbol_get(i915_gpu_busy);
1427 if (!ips->gpu_busy)
1428 goto out_put_lower;
1429 ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable);
1430 if (!ips->gpu_turbo_disable)
1431 goto out_put_busy;
1432
1433 return true;
1434
1435out_put_busy:
1436 symbol_put(i915_gpu_busy);
1437out_put_lower:
1438 symbol_put(i915_gpu_lower);
1439out_put_raise:
1440 symbol_put(i915_gpu_raise);
1441out_put_mch:
1442 symbol_put(i915_read_mch_val);
1443out_err:
1444 return false;
1445}
1446
1447static bool
1448ips_gpu_turbo_enabled(struct ips_driver *ips)
1449{
1450 if (!ips->gpu_busy && late_i915_load) {
1451 if (ips_get_i915_syms(ips)) {
1452 dev_info(ips->dev,
1453 "i915 driver attached, reenabling gpu turbo\n");
1454 ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
1455 }
1456 }
1457
1458 return ips->gpu_turbo_enabled;
1459}
1460
1461void
1462ips_link_to_i915_driver(void)
1463{
1464
1465
1466
1467
1468 late_i915_load = true;
1469}
1470EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
1471
1472static const struct pci_device_id ips_id_table[] = {
1473 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
1474 { 0, }
1475};
1476
1477MODULE_DEVICE_TABLE(pci, ips_id_table);
1478
1479static int ips_blacklist_callback(const struct dmi_system_id *id)
1480{
1481 pr_info("Blacklisted intel_ips for %s\n", id->ident);
1482 return 1;
1483}
1484
1485static const struct dmi_system_id ips_blacklist[] = {
1486 {
1487 .callback = ips_blacklist_callback,
1488 .ident = "HP ProBook",
1489 .matches = {
1490 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1491 DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
1492 },
1493 },
1494 { }
1495};
1496
1497static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1498{
1499 u64 platform_info;
1500 struct ips_driver *ips;
1501 u32 hts;
1502 int ret = 0;
1503 u16 htshi, trc, trc_required_mask;
1504 u8 tse;
1505
1506 if (dmi_check_system(ips_blacklist))
1507 return -ENODEV;
1508
1509 ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
1510 if (!ips)
1511 return -ENOMEM;
1512
1513 spin_lock_init(&ips->turbo_status_lock);
1514 ips->dev = &dev->dev;
1515
1516 ips->limits = ips_detect_cpu(ips);
1517 if (!ips->limits) {
1518 dev_info(&dev->dev, "IPS not supported on this CPU\n");
1519 return -ENXIO;
1520 }
1521
1522 ret = pcim_enable_device(dev);
1523 if (ret) {
1524 dev_err(&dev->dev, "can't enable PCI device, aborting\n");
1525 return ret;
1526 }
1527
1528 ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
1529 if (ret) {
1530 dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
1531 return ret;
1532 }
1533 ips->regmap = pcim_iomap_table(dev)[0];
1534
1535 pci_set_drvdata(dev, ips);
1536
1537 tse = thm_readb(THM_TSE);
1538 if (tse != TSE_EN) {
1539 dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
1540 return -ENXIO;
1541 }
1542
1543 trc = thm_readw(THM_TRC);
1544 trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
1545 if ((trc & trc_required_mask) != trc_required_mask) {
1546 dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
1547 return -ENXIO;
1548 }
1549
1550 if (trc & TRC_CORE2_EN)
1551 ips->second_cpu = true;
1552
1553 update_turbo_limits(ips);
1554 dev_dbg(&dev->dev, "max cpu power clamp: %dW\n",
1555 ips->mcp_power_limit / 10);
1556 dev_dbg(&dev->dev, "max core power clamp: %dW\n",
1557 ips->core_power_limit / 10);
1558
1559 if (thm_readl(THM_PSC) & PSP_PBRT)
1560 ips->poll_turbo_status = true;
1561
1562 if (!ips_get_i915_syms(ips)) {
1563 dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
1564 ips->gpu_turbo_enabled = false;
1565 } else {
1566 dev_dbg(&dev->dev, "graphics turbo enabled\n");
1567 ips->gpu_turbo_enabled = true;
1568 }
1569
1570
1571
1572
1573
1574 rdmsrl(PLATFORM_INFO, platform_info);
1575 if (!(platform_info & PLATFORM_TDP)) {
1576 dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
1577 return -ENODEV;
1578 }
1579
1580
1581
1582
1583
1584 ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
1585 if (ret < 0)
1586 return ret;
1587
1588 ips->irq = pci_irq_vector(dev, 0);
1589
1590 ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
1591 if (ret) {
1592 dev_err(&dev->dev, "request irq failed, aborting\n");
1593 return ret;
1594 }
1595
1596
1597 thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI |
1598 TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI);
1599 thm_writeb(THM_TEN, TEN_UPDATE_EN);
1600
1601
1602 ips->cta_val = thm_readw(THM_CTA);
1603 ips->pta_val = thm_readw(THM_PTA);
1604 ips->mgta_val = thm_readw(THM_MGTA);
1605
1606
1607 rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
1608
1609 ips_disable_cpu_turbo(ips);
1610 ips->cpu_turbo_enabled = false;
1611
1612
1613 ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
1614 if (IS_ERR(ips->adjust)) {
1615 dev_err(&dev->dev,
1616 "failed to create thermal adjust thread, aborting\n");
1617 ret = -ENOMEM;
1618 goto error_free_irq;
1619
1620 }
1621
1622
1623
1624
1625
1626 ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
1627 if (IS_ERR(ips->monitor)) {
1628 dev_err(&dev->dev,
1629 "failed to create thermal monitor thread, aborting\n");
1630 ret = -ENOMEM;
1631 goto error_thread_cleanup;
1632 }
1633
1634 hts = (ips->core_power_limit << HTS_PCPL_SHIFT) |
1635 (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV;
1636 htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT;
1637
1638 thm_writew(THM_HTSHI, htshi);
1639 thm_writel(THM_HTS, hts);
1640
1641 ips_debugfs_init(ips);
1642
1643 dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n",
1644 ips->mcp_temp_limit);
1645 return ret;
1646
1647error_thread_cleanup:
1648 kthread_stop(ips->adjust);
1649error_free_irq:
1650 free_irq(ips->irq, ips);
1651 pci_free_irq_vectors(dev);
1652 return ret;
1653}
1654
1655static void ips_remove(struct pci_dev *dev)
1656{
1657 struct ips_driver *ips = pci_get_drvdata(dev);
1658 u64 turbo_override;
1659
1660 if (!ips)
1661 return;
1662
1663 ips_debugfs_cleanup(ips);
1664
1665
1666 if (ips->read_mch_val)
1667 symbol_put(i915_read_mch_val);
1668 if (ips->gpu_raise)
1669 symbol_put(i915_gpu_raise);
1670 if (ips->gpu_lower)
1671 symbol_put(i915_gpu_lower);
1672 if (ips->gpu_busy)
1673 symbol_put(i915_gpu_busy);
1674 if (ips->gpu_turbo_disable)
1675 symbol_put(i915_gpu_turbo_disable);
1676
1677 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
1678 turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
1679 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
1680 wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
1681
1682 free_irq(ips->irq, ips);
1683 pci_free_irq_vectors(dev);
1684 if (ips->adjust)
1685 kthread_stop(ips->adjust);
1686 if (ips->monitor)
1687 kthread_stop(ips->monitor);
1688 dev_dbg(&dev->dev, "IPS driver removed\n");
1689}
1690
1691static struct pci_driver ips_pci_driver = {
1692 .name = "intel ips",
1693 .id_table = ips_id_table,
1694 .probe = ips_probe,
1695 .remove = ips_remove,
1696};
1697
1698module_pci_driver(ips_pci_driver);
1699
1700MODULE_LICENSE("GPL");
1701MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>");
1702MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
1703