1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/kernel_stat.h>
17#include <linux/module.h>
18#include <linux/ktime.h>
19#include <linux/hrtimer.h>
20#include <linux/tick.h>
21#include <linux/slab.h>
22#include <linux/sched/cpufreq.h>
23#include <linux/list.h>
24#include <linux/cpu.h>
25#include <linux/cpufreq.h>
26#include <linux/sysfs.h>
27#include <linux/types.h>
28#include <linux/fs.h>
29#include <linux/debugfs.h>
30#include <linux/acpi.h>
31#include <linux/vmalloc.h>
32#include <trace/events/power.h>
33
34#include <asm/div64.h>
35#include <asm/msr.h>
36#include <asm/cpu_device_id.h>
37#include <asm/cpufeature.h>
38#include <asm/intel-family.h>
39
40#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
41
42#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
43#define INTEL_CPUFREQ_TRANSITION_DELAY 500
44
45#ifdef CONFIG_ACPI
46#include <acpi/processor.h>
47#include <acpi/cppc_acpi.h>
48#endif
49
50#define FRAC_BITS 8
51#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
52#define fp_toint(X) ((X) >> FRAC_BITS)
53
54#define EXT_BITS 6
55#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
56#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
57#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
58
59static inline int32_t mul_fp(int32_t x, int32_t y)
60{
61 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
62}
63
64static inline int32_t div_fp(s64 x, s64 y)
65{
66 return div64_s64((int64_t)x << FRAC_BITS, y);
67}
68
69static inline int ceiling_fp(int32_t x)
70{
71 int mask, ret;
72
73 ret = fp_toint(x);
74 mask = (1 << FRAC_BITS) - 1;
75 if (x & mask)
76 ret += 1;
77 return ret;
78}
79
80static inline int32_t percent_fp(int percent)
81{
82 return div_fp(percent, 100);
83}
84
85static inline u64 mul_ext_fp(u64 x, u64 y)
86{
87 return (x * y) >> EXT_FRAC_BITS;
88}
89
90static inline u64 div_ext_fp(u64 x, u64 y)
91{
92 return div64_u64(x << EXT_FRAC_BITS, y);
93}
94
95static inline int32_t percent_ext_fp(int percent)
96{
97 return div_ext_fp(percent, 100);
98}
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118struct sample {
119 int32_t core_avg_perf;
120 int32_t busy_scaled;
121 u64 aperf;
122 u64 mperf;
123 u64 tsc;
124 u64 time;
125};
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143struct pstate_data {
144 int current_pstate;
145 int min_pstate;
146 int max_pstate;
147 int max_pstate_physical;
148 int scaling;
149 int turbo_pstate;
150 unsigned int max_freq;
151 unsigned int turbo_freq;
152};
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167struct vid_data {
168 int min;
169 int max;
170 int turbo;
171 int32_t ratio;
172};
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct global_params {
187 bool no_turbo;
188 bool turbo_disabled;
189 int max_perf_pct;
190 int min_perf_pct;
191};
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228struct cpudata {
229 int cpu;
230
231 unsigned int policy;
232 struct update_util_data update_util;
233 bool update_util_set;
234
235 struct pstate_data pstate;
236 struct vid_data vid;
237
238 u64 last_update;
239 u64 last_sample_time;
240 u64 aperf_mperf_shift;
241 u64 prev_aperf;
242 u64 prev_mperf;
243 u64 prev_tsc;
244 u64 prev_cummulative_iowait;
245 struct sample sample;
246 int32_t min_perf_ratio;
247 int32_t max_perf_ratio;
248#ifdef CONFIG_ACPI
249 struct acpi_processor_performance acpi_perf_data;
250 bool valid_pss_table;
251#endif
252 unsigned int iowait_boost;
253 s16 epp_powersave;
254 s16 epp_policy;
255 s16 epp_default;
256 s16 epp_saved;
257};
258
259static struct cpudata **all_cpu_data;
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274struct pstate_funcs {
275 int (*get_max)(void);
276 int (*get_max_physical)(void);
277 int (*get_min)(void);
278 int (*get_turbo)(void);
279 int (*get_scaling)(void);
280 int (*get_aperf_mperf_shift)(void);
281 u64 (*get_val)(struct cpudata*, int pstate);
282 void (*get_vid)(struct cpudata *);
283};
284
285static struct pstate_funcs pstate_funcs __read_mostly;
286
287static int hwp_active __read_mostly;
288static bool per_cpu_limits __read_mostly;
289
290static struct cpufreq_driver *intel_pstate_driver __read_mostly;
291
292#ifdef CONFIG_ACPI
293static bool acpi_ppc;
294#endif
295
296static struct global_params global;
297
298static DEFINE_MUTEX(intel_pstate_driver_lock);
299static DEFINE_MUTEX(intel_pstate_limits_lock);
300
301#ifdef CONFIG_ACPI
302
303static bool intel_pstate_get_ppc_enable_status(void)
304{
305 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
306 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
307 return true;
308
309 return acpi_ppc;
310}
311
312#ifdef CONFIG_ACPI_CPPC_LIB
313
314
315static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
316{
317 sched_set_itmt_support();
318}
319
320static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
321
322static void intel_pstate_set_itmt_prio(int cpu)
323{
324 struct cppc_perf_caps cppc_perf;
325 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
326 int ret;
327
328 ret = cppc_get_perf_caps(cpu, &cppc_perf);
329 if (ret)
330 return;
331
332
333
334
335
336
337 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
338
339 if (max_highest_perf <= min_highest_perf) {
340 if (cppc_perf.highest_perf > max_highest_perf)
341 max_highest_perf = cppc_perf.highest_perf;
342
343 if (cppc_perf.highest_perf < min_highest_perf)
344 min_highest_perf = cppc_perf.highest_perf;
345
346 if (max_highest_perf > min_highest_perf) {
347
348
349
350
351
352
353 schedule_work(&sched_itmt_work);
354 }
355 }
356}
357#else
358static void intel_pstate_set_itmt_prio(int cpu)
359{
360}
361#endif
362
363static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
364{
365 struct cpudata *cpu;
366 int ret;
367 int i;
368
369 if (hwp_active) {
370 intel_pstate_set_itmt_prio(policy->cpu);
371 return;
372 }
373
374 if (!intel_pstate_get_ppc_enable_status())
375 return;
376
377 cpu = all_cpu_data[policy->cpu];
378
379 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
380 policy->cpu);
381 if (ret)
382 return;
383
384
385
386
387
388
389 if (cpu->acpi_perf_data.control_register.space_id !=
390 ACPI_ADR_SPACE_FIXED_HARDWARE)
391 goto err;
392
393
394
395
396
397 if (cpu->acpi_perf_data.state_count < 2)
398 goto err;
399
400 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
401 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
402 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
403 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
404 (u32) cpu->acpi_perf_data.states[i].core_frequency,
405 (u32) cpu->acpi_perf_data.states[i].power,
406 (u32) cpu->acpi_perf_data.states[i].control);
407 }
408
409
410
411
412
413
414
415
416
417
418
419
420 if (!global.turbo_disabled)
421 cpu->acpi_perf_data.states[0].core_frequency =
422 policy->cpuinfo.max_freq / 1000;
423 cpu->valid_pss_table = true;
424 pr_debug("_PPC limits will be enforced\n");
425
426 return;
427
428 err:
429 cpu->valid_pss_table = false;
430 acpi_processor_unregister_performance(policy->cpu);
431}
432
433static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
434{
435 struct cpudata *cpu;
436
437 cpu = all_cpu_data[policy->cpu];
438 if (!cpu->valid_pss_table)
439 return;
440
441 acpi_processor_unregister_performance(policy->cpu);
442}
443#else
444static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
445{
446}
447
448static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
449{
450}
451#endif
452
453static inline void update_turbo_state(void)
454{
455 u64 misc_en;
456 struct cpudata *cpu;
457
458 cpu = all_cpu_data[0];
459 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
460 global.turbo_disabled =
461 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
462 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
463}
464
465static int min_perf_pct_min(void)
466{
467 struct cpudata *cpu = all_cpu_data[0];
468 int turbo_pstate = cpu->pstate.turbo_pstate;
469
470 return turbo_pstate ?
471 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
472}
473
474static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
475{
476 u64 epb;
477 int ret;
478
479 if (!static_cpu_has(X86_FEATURE_EPB))
480 return -ENXIO;
481
482 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
483 if (ret)
484 return (s16)ret;
485
486 return (s16)(epb & 0x0f);
487}
488
489static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
490{
491 s16 epp;
492
493 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
494
495
496
497
498 if (!hwp_req_data) {
499 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
500 &hwp_req_data);
501 if (epp)
502 return epp;
503 }
504 epp = (hwp_req_data >> 24) & 0xff;
505 } else {
506
507 epp = intel_pstate_get_epb(cpu_data);
508 }
509
510 return epp;
511}
512
513static int intel_pstate_set_epb(int cpu, s16 pref)
514{
515 u64 epb;
516 int ret;
517
518 if (!static_cpu_has(X86_FEATURE_EPB))
519 return -ENXIO;
520
521 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
522 if (ret)
523 return ret;
524
525 epb = (epb & ~0x0f) | pref;
526 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
527
528 return 0;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542static const char * const energy_perf_strings[] = {
543 "default",
544 "performance",
545 "balance_performance",
546 "balance_power",
547 "power",
548 NULL
549};
550static const unsigned int epp_values[] = {
551 HWP_EPP_PERFORMANCE,
552 HWP_EPP_BALANCE_PERFORMANCE,
553 HWP_EPP_BALANCE_POWERSAVE,
554 HWP_EPP_POWERSAVE
555};
556
557static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
558{
559 s16 epp;
560 int index = -EINVAL;
561
562 epp = intel_pstate_get_epp(cpu_data, 0);
563 if (epp < 0)
564 return epp;
565
566 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
567 if (epp == HWP_EPP_PERFORMANCE)
568 return 1;
569 if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
570 return 2;
571 if (epp <= HWP_EPP_BALANCE_POWERSAVE)
572 return 3;
573 else
574 return 4;
575 } else if (static_cpu_has(X86_FEATURE_EPB)) {
576
577
578
579
580
581
582
583
584
585
586 index = (epp >> 2) + 1;
587 }
588
589 return index;
590}
591
592static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
593 int pref_index)
594{
595 int epp = -EINVAL;
596 int ret;
597
598 if (!pref_index)
599 epp = cpu_data->epp_default;
600
601 mutex_lock(&intel_pstate_limits_lock);
602
603 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
604 u64 value;
605
606 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
607 if (ret)
608 goto return_pref;
609
610 value &= ~GENMASK_ULL(31, 24);
611
612 if (epp == -EINVAL)
613 epp = epp_values[pref_index - 1];
614
615 value |= (u64)epp << 24;
616 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
617 } else {
618 if (epp == -EINVAL)
619 epp = (pref_index - 1) << 2;
620 ret = intel_pstate_set_epb(cpu_data->cpu, epp);
621 }
622return_pref:
623 mutex_unlock(&intel_pstate_limits_lock);
624
625 return ret;
626}
627
628static ssize_t show_energy_performance_available_preferences(
629 struct cpufreq_policy *policy, char *buf)
630{
631 int i = 0;
632 int ret = 0;
633
634 while (energy_perf_strings[i] != NULL)
635 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
636
637 ret += sprintf(&buf[ret], "\n");
638
639 return ret;
640}
641
642cpufreq_freq_attr_ro(energy_performance_available_preferences);
643
644static ssize_t store_energy_performance_preference(
645 struct cpufreq_policy *policy, const char *buf, size_t count)
646{
647 struct cpudata *cpu_data = all_cpu_data[policy->cpu];
648 char str_preference[21];
649 int ret, i = 0;
650
651 ret = sscanf(buf, "%20s", str_preference);
652 if (ret != 1)
653 return -EINVAL;
654
655 while (energy_perf_strings[i] != NULL) {
656 if (!strcmp(str_preference, energy_perf_strings[i])) {
657 intel_pstate_set_energy_pref_index(cpu_data, i);
658 return count;
659 }
660 ++i;
661 }
662
663 return -EINVAL;
664}
665
666static ssize_t show_energy_performance_preference(
667 struct cpufreq_policy *policy, char *buf)
668{
669 struct cpudata *cpu_data = all_cpu_data[policy->cpu];
670 int preference;
671
672 preference = intel_pstate_get_energy_pref_index(cpu_data);
673 if (preference < 0)
674 return preference;
675
676 return sprintf(buf, "%s\n", energy_perf_strings[preference]);
677}
678
679cpufreq_freq_attr_rw(energy_performance_preference);
680
681static struct freq_attr *hwp_cpufreq_attrs[] = {
682 &energy_performance_preference,
683 &energy_performance_available_preferences,
684 NULL,
685};
686
687static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
688 int *current_max)
689{
690 u64 cap;
691
692 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
693 if (global.no_turbo)
694 *current_max = HWP_GUARANTEED_PERF(cap);
695 else
696 *current_max = HWP_HIGHEST_PERF(cap);
697
698 *phy_max = HWP_HIGHEST_PERF(cap);
699}
700
701static void intel_pstate_hwp_set(unsigned int cpu)
702{
703 struct cpudata *cpu_data = all_cpu_data[cpu];
704 int max, min;
705 u64 value;
706 s16 epp;
707
708 max = cpu_data->max_perf_ratio;
709 min = cpu_data->min_perf_ratio;
710
711 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
712 min = max;
713
714 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
715
716 value &= ~HWP_MIN_PERF(~0L);
717 value |= HWP_MIN_PERF(min);
718
719 value &= ~HWP_MAX_PERF(~0L);
720 value |= HWP_MAX_PERF(max);
721
722 if (cpu_data->epp_policy == cpu_data->policy)
723 goto skip_epp;
724
725 cpu_data->epp_policy = cpu_data->policy;
726
727 if (cpu_data->epp_saved >= 0) {
728 epp = cpu_data->epp_saved;
729 cpu_data->epp_saved = -EINVAL;
730 goto update_epp;
731 }
732
733 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
734 epp = intel_pstate_get_epp(cpu_data, value);
735 cpu_data->epp_powersave = epp;
736
737 if (epp < 0)
738 goto skip_epp;
739
740 epp = 0;
741 } else {
742
743 if (cpu_data->epp_powersave < 0)
744 goto skip_epp;
745
746
747
748
749
750
751
752
753 epp = intel_pstate_get_epp(cpu_data, value);
754 if (epp)
755 goto skip_epp;
756
757 epp = cpu_data->epp_powersave;
758 }
759update_epp:
760 if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
761 value &= ~GENMASK_ULL(31, 24);
762 value |= (u64)epp << 24;
763 } else {
764 intel_pstate_set_epb(cpu, epp);
765 }
766skip_epp:
767 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
768}
769
770static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
771{
772 struct cpudata *cpu_data = all_cpu_data[policy->cpu];
773
774 if (!hwp_active)
775 return 0;
776
777 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
778
779 return 0;
780}
781
782static int intel_pstate_resume(struct cpufreq_policy *policy)
783{
784 if (!hwp_active)
785 return 0;
786
787 mutex_lock(&intel_pstate_limits_lock);
788
789 all_cpu_data[policy->cpu]->epp_policy = 0;
790 intel_pstate_hwp_set(policy->cpu);
791
792 mutex_unlock(&intel_pstate_limits_lock);
793
794 return 0;
795}
796
797static void intel_pstate_update_policies(void)
798{
799 int cpu;
800
801 for_each_possible_cpu(cpu)
802 cpufreq_update_policy(cpu);
803}
804
805
806#define show_one(file_name, object) \
807 static ssize_t show_##file_name \
808 (struct kobject *kobj, struct attribute *attr, char *buf) \
809 { \
810 return sprintf(buf, "%u\n", global.object); \
811 }
812
813static ssize_t intel_pstate_show_status(char *buf);
814static int intel_pstate_update_status(const char *buf, size_t size);
815
816static ssize_t show_status(struct kobject *kobj,
817 struct attribute *attr, char *buf)
818{
819 ssize_t ret;
820
821 mutex_lock(&intel_pstate_driver_lock);
822 ret = intel_pstate_show_status(buf);
823 mutex_unlock(&intel_pstate_driver_lock);
824
825 return ret;
826}
827
828static ssize_t store_status(struct kobject *a, struct attribute *b,
829 const char *buf, size_t count)
830{
831 char *p = memchr(buf, '\n', count);
832 int ret;
833
834 mutex_lock(&intel_pstate_driver_lock);
835 ret = intel_pstate_update_status(buf, p ? p - buf : count);
836 mutex_unlock(&intel_pstate_driver_lock);
837
838 return ret < 0 ? ret : count;
839}
840
841static ssize_t show_turbo_pct(struct kobject *kobj,
842 struct attribute *attr, char *buf)
843{
844 struct cpudata *cpu;
845 int total, no_turbo, turbo_pct;
846 uint32_t turbo_fp;
847
848 mutex_lock(&intel_pstate_driver_lock);
849
850 if (!intel_pstate_driver) {
851 mutex_unlock(&intel_pstate_driver_lock);
852 return -EAGAIN;
853 }
854
855 cpu = all_cpu_data[0];
856
857 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
858 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
859 turbo_fp = div_fp(no_turbo, total);
860 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
861
862 mutex_unlock(&intel_pstate_driver_lock);
863
864 return sprintf(buf, "%u\n", turbo_pct);
865}
866
867static ssize_t show_num_pstates(struct kobject *kobj,
868 struct attribute *attr, char *buf)
869{
870 struct cpudata *cpu;
871 int total;
872
873 mutex_lock(&intel_pstate_driver_lock);
874
875 if (!intel_pstate_driver) {
876 mutex_unlock(&intel_pstate_driver_lock);
877 return -EAGAIN;
878 }
879
880 cpu = all_cpu_data[0];
881 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
882
883 mutex_unlock(&intel_pstate_driver_lock);
884
885 return sprintf(buf, "%u\n", total);
886}
887
888static ssize_t show_no_turbo(struct kobject *kobj,
889 struct attribute *attr, char *buf)
890{
891 ssize_t ret;
892
893 mutex_lock(&intel_pstate_driver_lock);
894
895 if (!intel_pstate_driver) {
896 mutex_unlock(&intel_pstate_driver_lock);
897 return -EAGAIN;
898 }
899
900 update_turbo_state();
901 if (global.turbo_disabled)
902 ret = sprintf(buf, "%u\n", global.turbo_disabled);
903 else
904 ret = sprintf(buf, "%u\n", global.no_turbo);
905
906 mutex_unlock(&intel_pstate_driver_lock);
907
908 return ret;
909}
910
911static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
912 const char *buf, size_t count)
913{
914 unsigned int input;
915 int ret;
916
917 ret = sscanf(buf, "%u", &input);
918 if (ret != 1)
919 return -EINVAL;
920
921 mutex_lock(&intel_pstate_driver_lock);
922
923 if (!intel_pstate_driver) {
924 mutex_unlock(&intel_pstate_driver_lock);
925 return -EAGAIN;
926 }
927
928 mutex_lock(&intel_pstate_limits_lock);
929
930 update_turbo_state();
931 if (global.turbo_disabled) {
932 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
933 mutex_unlock(&intel_pstate_limits_lock);
934 mutex_unlock(&intel_pstate_driver_lock);
935 return -EPERM;
936 }
937
938 global.no_turbo = clamp_t(int, input, 0, 1);
939
940 if (global.no_turbo) {
941 struct cpudata *cpu = all_cpu_data[0];
942 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
943
944
945 if (global.min_perf_pct > pct)
946 global.min_perf_pct = pct;
947 }
948
949 mutex_unlock(&intel_pstate_limits_lock);
950
951 intel_pstate_update_policies();
952
953 mutex_unlock(&intel_pstate_driver_lock);
954
955 return count;
956}
957
958static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
959 const char *buf, size_t count)
960{
961 unsigned int input;
962 int ret;
963
964 ret = sscanf(buf, "%u", &input);
965 if (ret != 1)
966 return -EINVAL;
967
968 mutex_lock(&intel_pstate_driver_lock);
969
970 if (!intel_pstate_driver) {
971 mutex_unlock(&intel_pstate_driver_lock);
972 return -EAGAIN;
973 }
974
975 mutex_lock(&intel_pstate_limits_lock);
976
977 global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
978
979 mutex_unlock(&intel_pstate_limits_lock);
980
981 intel_pstate_update_policies();
982
983 mutex_unlock(&intel_pstate_driver_lock);
984
985 return count;
986}
987
988static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
989 const char *buf, size_t count)
990{
991 unsigned int input;
992 int ret;
993
994 ret = sscanf(buf, "%u", &input);
995 if (ret != 1)
996 return -EINVAL;
997
998 mutex_lock(&intel_pstate_driver_lock);
999
1000 if (!intel_pstate_driver) {
1001 mutex_unlock(&intel_pstate_driver_lock);
1002 return -EAGAIN;
1003 }
1004
1005 mutex_lock(&intel_pstate_limits_lock);
1006
1007 global.min_perf_pct = clamp_t(int, input,
1008 min_perf_pct_min(), global.max_perf_pct);
1009
1010 mutex_unlock(&intel_pstate_limits_lock);
1011
1012 intel_pstate_update_policies();
1013
1014 mutex_unlock(&intel_pstate_driver_lock);
1015
1016 return count;
1017}
1018
1019show_one(max_perf_pct, max_perf_pct);
1020show_one(min_perf_pct, min_perf_pct);
1021
1022define_one_global_rw(status);
1023define_one_global_rw(no_turbo);
1024define_one_global_rw(max_perf_pct);
1025define_one_global_rw(min_perf_pct);
1026define_one_global_ro(turbo_pct);
1027define_one_global_ro(num_pstates);
1028
1029static struct attribute *intel_pstate_attributes[] = {
1030 &status.attr,
1031 &no_turbo.attr,
1032 &turbo_pct.attr,
1033 &num_pstates.attr,
1034 NULL
1035};
1036
1037static const struct attribute_group intel_pstate_attr_group = {
1038 .attrs = intel_pstate_attributes,
1039};
1040
1041static void __init intel_pstate_sysfs_expose_params(void)
1042{
1043 struct kobject *intel_pstate_kobject;
1044 int rc;
1045
1046 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1047 &cpu_subsys.dev_root->kobj);
1048 if (WARN_ON(!intel_pstate_kobject))
1049 return;
1050
1051 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1052 if (WARN_ON(rc))
1053 return;
1054
1055
1056
1057
1058
1059 if (per_cpu_limits)
1060 return;
1061
1062 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1063 WARN_ON(rc);
1064
1065 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1066 WARN_ON(rc);
1067
1068}
1069
1070
1071static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1072{
1073
1074 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
1075 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1076
1077 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1078 cpudata->epp_policy = 0;
1079 if (cpudata->epp_default == -EINVAL)
1080 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1081}
1082
1083#define MSR_IA32_POWER_CTL_BIT_EE 19
1084
1085
1086static void intel_pstate_disable_ee(int cpu)
1087{
1088 u64 power_ctl;
1089 int ret;
1090
1091 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
1092 if (ret)
1093 return;
1094
1095 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
1096 pr_info("Disabling energy efficiency optimization\n");
1097 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1098 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
1099 }
1100}
1101
1102static int atom_get_min_pstate(void)
1103{
1104 u64 value;
1105
1106 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1107 return (value >> 8) & 0x7F;
1108}
1109
1110static int atom_get_max_pstate(void)
1111{
1112 u64 value;
1113
1114 rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1115 return (value >> 16) & 0x7F;
1116}
1117
1118static int atom_get_turbo_pstate(void)
1119{
1120 u64 value;
1121
1122 rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1123 return value & 0x7F;
1124}
1125
1126static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1127{
1128 u64 val;
1129 int32_t vid_fp;
1130 u32 vid;
1131
1132 val = (u64)pstate << 8;
1133 if (global.no_turbo && !global.turbo_disabled)
1134 val |= (u64)1 << 32;
1135
1136 vid_fp = cpudata->vid.min + mul_fp(
1137 int_tofp(pstate - cpudata->pstate.min_pstate),
1138 cpudata->vid.ratio);
1139
1140 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1141 vid = ceiling_fp(vid_fp);
1142
1143 if (pstate > cpudata->pstate.max_pstate)
1144 vid = cpudata->vid.turbo;
1145
1146 return val | vid;
1147}
1148
1149static int silvermont_get_scaling(void)
1150{
1151 u64 value;
1152 int i;
1153
1154 static int silvermont_freq_table[] = {
1155 83300, 100000, 133300, 116700, 80000};
1156
1157 rdmsrl(MSR_FSB_FREQ, value);
1158 i = value & 0x7;
1159 WARN_ON(i > 4);
1160
1161 return silvermont_freq_table[i];
1162}
1163
1164static int airmont_get_scaling(void)
1165{
1166 u64 value;
1167 int i;
1168
1169 static int airmont_freq_table[] = {
1170 83300, 100000, 133300, 116700, 80000,
1171 93300, 90000, 88900, 87500};
1172
1173 rdmsrl(MSR_FSB_FREQ, value);
1174 i = value & 0xF;
1175 WARN_ON(i > 8);
1176
1177 return airmont_freq_table[i];
1178}
1179
1180static void atom_get_vid(struct cpudata *cpudata)
1181{
1182 u64 value;
1183
1184 rdmsrl(MSR_ATOM_CORE_VIDS, value);
1185 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1186 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1187 cpudata->vid.ratio = div_fp(
1188 cpudata->vid.max - cpudata->vid.min,
1189 int_tofp(cpudata->pstate.max_pstate -
1190 cpudata->pstate.min_pstate));
1191
1192 rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1193 cpudata->vid.turbo = value & 0x7f;
1194}
1195
1196static int core_get_min_pstate(void)
1197{
1198 u64 value;
1199
1200 rdmsrl(MSR_PLATFORM_INFO, value);
1201 return (value >> 40) & 0xFF;
1202}
1203
1204static int core_get_max_pstate_physical(void)
1205{
1206 u64 value;
1207
1208 rdmsrl(MSR_PLATFORM_INFO, value);
1209 return (value >> 8) & 0xFF;
1210}
1211
1212static int core_get_tdp_ratio(u64 plat_info)
1213{
1214
1215 if (plat_info & 0x600000000) {
1216 u64 tdp_ctrl;
1217 u64 tdp_ratio;
1218 int tdp_msr;
1219 int err;
1220
1221
1222 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1223 if (err)
1224 return err;
1225
1226
1227 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1228 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
1229 if (err)
1230 return err;
1231
1232
1233 if (tdp_ctrl & 0x03)
1234 tdp_ratio >>= 16;
1235
1236 tdp_ratio &= 0xff;
1237 pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1238
1239 return (int)tdp_ratio;
1240 }
1241
1242 return -ENXIO;
1243}
1244
1245static int core_get_max_pstate(void)
1246{
1247 u64 tar;
1248 u64 plat_info;
1249 int max_pstate;
1250 int tdp_ratio;
1251 int err;
1252
1253 rdmsrl(MSR_PLATFORM_INFO, plat_info);
1254 max_pstate = (plat_info >> 8) & 0xFF;
1255
1256 tdp_ratio = core_get_tdp_ratio(plat_info);
1257 if (tdp_ratio <= 0)
1258 return max_pstate;
1259
1260 if (hwp_active) {
1261
1262 return tdp_ratio;
1263 }
1264
1265 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
1266 if (!err) {
1267 int tar_levels;
1268
1269
1270 tar_levels = tar & 0xff;
1271 if (tdp_ratio - 1 == tar_levels) {
1272 max_pstate = tar_levels;
1273 pr_debug("max_pstate=TAC %x\n", max_pstate);
1274 }
1275 }
1276
1277 return max_pstate;
1278}
1279
1280static int core_get_turbo_pstate(void)
1281{
1282 u64 value;
1283 int nont, ret;
1284
1285 rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1286 nont = core_get_max_pstate();
1287 ret = (value) & 255;
1288 if (ret <= nont)
1289 ret = nont;
1290 return ret;
1291}
1292
1293static inline int core_get_scaling(void)
1294{
1295 return 100000;
1296}
1297
1298static u64 core_get_val(struct cpudata *cpudata, int pstate)
1299{
1300 u64 val;
1301
1302 val = (u64)pstate << 8;
1303 if (global.no_turbo && !global.turbo_disabled)
1304 val |= (u64)1 << 32;
1305
1306 return val;
1307}
1308
1309static int knl_get_aperf_mperf_shift(void)
1310{
1311 return 10;
1312}
1313
1314static int knl_get_turbo_pstate(void)
1315{
1316 u64 value;
1317 int nont, ret;
1318
1319 rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1320 nont = core_get_max_pstate();
1321 ret = (((value) >> 8) & 0xFF);
1322 if (ret <= nont)
1323 ret = nont;
1324 return ret;
1325}
1326
1327static int intel_pstate_get_base_pstate(struct cpudata *cpu)
1328{
1329 return global.no_turbo || global.turbo_disabled ?
1330 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1331}
1332
1333static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1334{
1335 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1336 cpu->pstate.current_pstate = pstate;
1337
1338
1339
1340
1341
1342 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1343 pstate_funcs.get_val(cpu, pstate));
1344}
1345
1346static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1347{
1348 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1349}
1350
1351static void intel_pstate_max_within_limits(struct cpudata *cpu)
1352{
1353 int pstate;
1354
1355 update_turbo_state();
1356 pstate = intel_pstate_get_base_pstate(cpu);
1357 pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1358 intel_pstate_set_pstate(cpu, pstate);
1359}
1360
1361static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1362{
1363 cpu->pstate.min_pstate = pstate_funcs.get_min();
1364 cpu->pstate.max_pstate = pstate_funcs.get_max();
1365 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1366 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1367 cpu->pstate.scaling = pstate_funcs.get_scaling();
1368 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1369 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1370
1371 if (pstate_funcs.get_aperf_mperf_shift)
1372 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1373
1374 if (pstate_funcs.get_vid)
1375 pstate_funcs.get_vid(cpu);
1376
1377 intel_pstate_set_min_pstate(cpu);
1378}
1379
1380static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1381{
1382 struct sample *sample = &cpu->sample;
1383
1384 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1385}
1386
1387static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1388{
1389 u64 aperf, mperf;
1390 unsigned long flags;
1391 u64 tsc;
1392
1393 local_irq_save(flags);
1394 rdmsrl(MSR_IA32_APERF, aperf);
1395 rdmsrl(MSR_IA32_MPERF, mperf);
1396 tsc = rdtsc();
1397 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1398 local_irq_restore(flags);
1399 return false;
1400 }
1401 local_irq_restore(flags);
1402
1403 cpu->last_sample_time = cpu->sample.time;
1404 cpu->sample.time = time;
1405 cpu->sample.aperf = aperf;
1406 cpu->sample.mperf = mperf;
1407 cpu->sample.tsc = tsc;
1408 cpu->sample.aperf -= cpu->prev_aperf;
1409 cpu->sample.mperf -= cpu->prev_mperf;
1410 cpu->sample.tsc -= cpu->prev_tsc;
1411
1412 cpu->prev_aperf = aperf;
1413 cpu->prev_mperf = mperf;
1414 cpu->prev_tsc = tsc;
1415
1416
1417
1418
1419
1420
1421
1422 if (cpu->last_sample_time) {
1423 intel_pstate_calc_avg_perf(cpu);
1424 return true;
1425 }
1426 return false;
1427}
1428
1429static inline int32_t get_avg_frequency(struct cpudata *cpu)
1430{
1431 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
1432}
1433
1434static inline int32_t get_avg_pstate(struct cpudata *cpu)
1435{
1436 return mul_ext_fp(cpu->pstate.max_pstate_physical,
1437 cpu->sample.core_avg_perf);
1438}
1439
1440static inline int32_t get_target_pstate(struct cpudata *cpu)
1441{
1442 struct sample *sample = &cpu->sample;
1443 int32_t busy_frac, boost;
1444 int target, avg_pstate;
1445
1446 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1447 sample->tsc);
1448
1449 boost = cpu->iowait_boost;
1450 cpu->iowait_boost >>= 1;
1451
1452 if (busy_frac < boost)
1453 busy_frac = boost;
1454
1455 sample->busy_scaled = busy_frac * 100;
1456
1457 target = global.no_turbo || global.turbo_disabled ?
1458 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1459 target += target >> 2;
1460 target = mul_fp(target, busy_frac);
1461 if (target < cpu->pstate.min_pstate)
1462 target = cpu->pstate.min_pstate;
1463
1464
1465
1466
1467
1468
1469
1470
1471 avg_pstate = get_avg_pstate(cpu);
1472 if (avg_pstate > target)
1473 target += (avg_pstate - target) >> 1;
1474
1475 return target;
1476}
1477
1478static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1479{
1480 int max_pstate = intel_pstate_get_base_pstate(cpu);
1481 int min_pstate;
1482
1483 min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1484 max_pstate = max(min_pstate, cpu->max_perf_ratio);
1485 return clamp_t(int, pstate, min_pstate, max_pstate);
1486}
1487
1488static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1489{
1490 if (pstate == cpu->pstate.current_pstate)
1491 return;
1492
1493 cpu->pstate.current_pstate = pstate;
1494 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1495}
1496
1497static void intel_pstate_adjust_pstate(struct cpudata *cpu)
1498{
1499 int from = cpu->pstate.current_pstate;
1500 struct sample *sample;
1501 int target_pstate;
1502
1503 update_turbo_state();
1504
1505 target_pstate = get_target_pstate(cpu);
1506 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1507 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
1508 intel_pstate_update_pstate(cpu, target_pstate);
1509
1510 sample = &cpu->sample;
1511 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1512 fp_toint(sample->busy_scaled),
1513 from,
1514 cpu->pstate.current_pstate,
1515 sample->mperf,
1516 sample->aperf,
1517 sample->tsc,
1518 get_avg_frequency(cpu),
1519 fp_toint(cpu->iowait_boost * 100));
1520}
1521
1522static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1523 unsigned int flags)
1524{
1525 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1526 u64 delta_ns;
1527
1528
1529 if (smp_processor_id() != cpu->cpu)
1530 return;
1531
1532 if (flags & SCHED_CPUFREQ_IOWAIT) {
1533 cpu->iowait_boost = int_tofp(1);
1534 cpu->last_update = time;
1535
1536
1537
1538
1539 if (fp_toint(cpu->sample.busy_scaled) == 100)
1540 return;
1541
1542 goto set_pstate;
1543 } else if (cpu->iowait_boost) {
1544
1545 delta_ns = time - cpu->last_update;
1546 if (delta_ns > TICK_NSEC)
1547 cpu->iowait_boost = 0;
1548 }
1549 cpu->last_update = time;
1550 delta_ns = time - cpu->sample.time;
1551 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
1552 return;
1553
1554set_pstate:
1555 if (intel_pstate_sample(cpu, time))
1556 intel_pstate_adjust_pstate(cpu);
1557}
1558
1559static struct pstate_funcs core_funcs = {
1560 .get_max = core_get_max_pstate,
1561 .get_max_physical = core_get_max_pstate_physical,
1562 .get_min = core_get_min_pstate,
1563 .get_turbo = core_get_turbo_pstate,
1564 .get_scaling = core_get_scaling,
1565 .get_val = core_get_val,
1566};
1567
1568static const struct pstate_funcs silvermont_funcs = {
1569 .get_max = atom_get_max_pstate,
1570 .get_max_physical = atom_get_max_pstate,
1571 .get_min = atom_get_min_pstate,
1572 .get_turbo = atom_get_turbo_pstate,
1573 .get_val = atom_get_val,
1574 .get_scaling = silvermont_get_scaling,
1575 .get_vid = atom_get_vid,
1576};
1577
1578static const struct pstate_funcs airmont_funcs = {
1579 .get_max = atom_get_max_pstate,
1580 .get_max_physical = atom_get_max_pstate,
1581 .get_min = atom_get_min_pstate,
1582 .get_turbo = atom_get_turbo_pstate,
1583 .get_val = atom_get_val,
1584 .get_scaling = airmont_get_scaling,
1585 .get_vid = atom_get_vid,
1586};
1587
1588static const struct pstate_funcs knl_funcs = {
1589 .get_max = core_get_max_pstate,
1590 .get_max_physical = core_get_max_pstate_physical,
1591 .get_min = core_get_min_pstate,
1592 .get_turbo = knl_get_turbo_pstate,
1593 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
1594 .get_scaling = core_get_scaling,
1595 .get_val = core_get_val,
1596};
1597
1598static const struct pstate_funcs bxt_funcs = {
1599 .get_max = core_get_max_pstate,
1600 .get_max_physical = core_get_max_pstate_physical,
1601 .get_min = core_get_min_pstate,
1602 .get_turbo = core_get_turbo_pstate,
1603 .get_scaling = core_get_scaling,
1604 .get_val = core_get_val,
1605};
1606
1607#define ICPU(model, policy) \
1608 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1609 (unsigned long)&policy }
1610
1611static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1612 ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
1613 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
1614 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
1615 ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
1616 ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
1617 ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
1618 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
1619 ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
1620 ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs),
1621 ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs),
1622 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs),
1623 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
1624 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs),
1625 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
1626 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
1627 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
1628 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
1629 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
1630 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
1631 ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
1632 {}
1633};
1634MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1635
1636static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1637 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
1638 ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
1639 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
1640 {}
1641};
1642
1643static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1644 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs),
1645 {}
1646};
1647
1648static int intel_pstate_init_cpu(unsigned int cpunum)
1649{
1650 struct cpudata *cpu;
1651
1652 cpu = all_cpu_data[cpunum];
1653
1654 if (!cpu) {
1655 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
1656 if (!cpu)
1657 return -ENOMEM;
1658
1659 all_cpu_data[cpunum] = cpu;
1660
1661 cpu->epp_default = -EINVAL;
1662 cpu->epp_powersave = -EINVAL;
1663 cpu->epp_saved = -EINVAL;
1664 }
1665
1666 cpu = all_cpu_data[cpunum];
1667
1668 cpu->cpu = cpunum;
1669
1670 if (hwp_active) {
1671 const struct x86_cpu_id *id;
1672
1673 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
1674 if (id)
1675 intel_pstate_disable_ee(cpunum);
1676
1677 intel_pstate_hwp_enable(cpu);
1678 }
1679
1680 intel_pstate_get_cpu_pstates(cpu);
1681
1682 pr_debug("controlling: cpu %d\n", cpunum);
1683
1684 return 0;
1685}
1686
1687static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1688{
1689 struct cpudata *cpu = all_cpu_data[cpu_num];
1690
1691 if (hwp_active)
1692 return;
1693
1694 if (cpu->update_util_set)
1695 return;
1696
1697
1698 cpu->sample.time = 0;
1699 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
1700 intel_pstate_update_util);
1701 cpu->update_util_set = true;
1702}
1703
1704static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1705{
1706 struct cpudata *cpu_data = all_cpu_data[cpu];
1707
1708 if (!cpu_data->update_util_set)
1709 return;
1710
1711 cpufreq_remove_update_util_hook(cpu);
1712 cpu_data->update_util_set = false;
1713 synchronize_sched();
1714}
1715
1716static int intel_pstate_get_max_freq(struct cpudata *cpu)
1717{
1718 return global.turbo_disabled || global.no_turbo ?
1719 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
1720}
1721
1722static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
1723 struct cpudata *cpu)
1724{
1725 int max_freq = intel_pstate_get_max_freq(cpu);
1726 int32_t max_policy_perf, min_policy_perf;
1727 int max_state, turbo_max;
1728
1729
1730
1731
1732
1733
1734 if (hwp_active) {
1735 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
1736 } else {
1737 max_state = intel_pstate_get_base_pstate(cpu);
1738 turbo_max = cpu->pstate.turbo_pstate;
1739 }
1740
1741 max_policy_perf = max_state * policy->max / max_freq;
1742 if (policy->max == policy->min) {
1743 min_policy_perf = max_policy_perf;
1744 } else {
1745 min_policy_perf = max_state * policy->min / max_freq;
1746 min_policy_perf = clamp_t(int32_t, min_policy_perf,
1747 0, max_policy_perf);
1748 }
1749
1750 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
1751 policy->cpu, max_state,
1752 min_policy_perf, max_policy_perf);
1753
1754
1755 if (per_cpu_limits) {
1756 cpu->min_perf_ratio = min_policy_perf;
1757 cpu->max_perf_ratio = max_policy_perf;
1758 } else {
1759 int32_t global_min, global_max;
1760
1761
1762 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
1763 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
1764 global_min = clamp_t(int32_t, global_min, 0, global_max);
1765
1766 pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
1767 global_min, global_max);
1768
1769 cpu->min_perf_ratio = max(min_policy_perf, global_min);
1770 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
1771 cpu->max_perf_ratio = min(max_policy_perf, global_max);
1772 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
1773
1774
1775 cpu->min_perf_ratio = min(cpu->min_perf_ratio,
1776 cpu->max_perf_ratio);
1777
1778 }
1779 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
1780 cpu->max_perf_ratio,
1781 cpu->min_perf_ratio);
1782}
1783
1784static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1785{
1786 struct cpudata *cpu;
1787
1788 if (!policy->cpuinfo.max_freq)
1789 return -ENODEV;
1790
1791 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1792 policy->cpuinfo.max_freq, policy->max);
1793
1794 cpu = all_cpu_data[policy->cpu];
1795 cpu->policy = policy->policy;
1796
1797 mutex_lock(&intel_pstate_limits_lock);
1798
1799 intel_pstate_update_perf_limits(policy, cpu);
1800
1801 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
1802
1803
1804
1805
1806 intel_pstate_clear_update_util_hook(policy->cpu);
1807 intel_pstate_max_within_limits(cpu);
1808 } else {
1809 intel_pstate_set_update_util_hook(policy->cpu);
1810 }
1811
1812 if (hwp_active)
1813 intel_pstate_hwp_set(policy->cpu);
1814
1815 mutex_unlock(&intel_pstate_limits_lock);
1816
1817 return 0;
1818}
1819
1820static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
1821 struct cpudata *cpu)
1822{
1823 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
1824 policy->max < policy->cpuinfo.max_freq &&
1825 policy->max > cpu->pstate.max_freq) {
1826 pr_debug("policy->max > max non turbo frequency\n");
1827 policy->max = policy->cpuinfo.max_freq;
1828 }
1829}
1830
1831static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1832{
1833 struct cpudata *cpu = all_cpu_data[policy->cpu];
1834
1835 update_turbo_state();
1836 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
1837 intel_pstate_get_max_freq(cpu));
1838
1839 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1840 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1841 return -EINVAL;
1842
1843 intel_pstate_adjust_policy_max(policy, cpu);
1844
1845 return 0;
1846}
1847
1848static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
1849{
1850 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
1851}
1852
1853static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1854{
1855 pr_debug("CPU %d exiting\n", policy->cpu);
1856
1857 intel_pstate_clear_update_util_hook(policy->cpu);
1858 if (hwp_active)
1859 intel_pstate_hwp_save_state(policy);
1860 else
1861 intel_cpufreq_stop_cpu(policy);
1862}
1863
1864static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1865{
1866 intel_pstate_exit_perf_limits(policy);
1867
1868 policy->fast_switch_possible = false;
1869
1870 return 0;
1871}
1872
1873static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
1874{
1875 struct cpudata *cpu;
1876 int rc;
1877
1878 rc = intel_pstate_init_cpu(policy->cpu);
1879 if (rc)
1880 return rc;
1881
1882 cpu = all_cpu_data[policy->cpu];
1883
1884 cpu->max_perf_ratio = 0xFF;
1885 cpu->min_perf_ratio = 0;
1886
1887 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1888 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1889
1890
1891 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1892 update_turbo_state();
1893 policy->cpuinfo.max_freq = global.turbo_disabled ?
1894 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1895 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
1896
1897 intel_pstate_init_acpi_perf_limits(policy);
1898
1899 policy->fast_switch_possible = true;
1900
1901 return 0;
1902}
1903
1904static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1905{
1906 int ret = __intel_pstate_cpu_init(policy);
1907
1908 if (ret)
1909 return ret;
1910
1911 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
1912 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1913 else
1914 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1915
1916 return 0;
1917}
1918
1919static struct cpufreq_driver intel_pstate = {
1920 .flags = CPUFREQ_CONST_LOOPS,
1921 .verify = intel_pstate_verify_policy,
1922 .setpolicy = intel_pstate_set_policy,
1923 .suspend = intel_pstate_hwp_save_state,
1924 .resume = intel_pstate_resume,
1925 .init = intel_pstate_cpu_init,
1926 .exit = intel_pstate_cpu_exit,
1927 .stop_cpu = intel_pstate_stop_cpu,
1928 .name = "intel_pstate",
1929};
1930
1931static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
1932{
1933 struct cpudata *cpu = all_cpu_data[policy->cpu];
1934
1935 update_turbo_state();
1936 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
1937 intel_pstate_get_max_freq(cpu));
1938
1939 intel_pstate_adjust_policy_max(policy, cpu);
1940
1941 intel_pstate_update_perf_limits(policy, cpu);
1942
1943 return 0;
1944}
1945
1946static int intel_cpufreq_target(struct cpufreq_policy *policy,
1947 unsigned int target_freq,
1948 unsigned int relation)
1949{
1950 struct cpudata *cpu = all_cpu_data[policy->cpu];
1951 struct cpufreq_freqs freqs;
1952 int target_pstate;
1953
1954 update_turbo_state();
1955
1956 freqs.old = policy->cur;
1957 freqs.new = target_freq;
1958
1959 cpufreq_freq_transition_begin(policy, &freqs);
1960 switch (relation) {
1961 case CPUFREQ_RELATION_L:
1962 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
1963 break;
1964 case CPUFREQ_RELATION_H:
1965 target_pstate = freqs.new / cpu->pstate.scaling;
1966 break;
1967 default:
1968 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
1969 break;
1970 }
1971 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1972 if (target_pstate != cpu->pstate.current_pstate) {
1973 cpu->pstate.current_pstate = target_pstate;
1974 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
1975 pstate_funcs.get_val(cpu, target_pstate));
1976 }
1977 freqs.new = target_pstate * cpu->pstate.scaling;
1978 cpufreq_freq_transition_end(policy, &freqs, false);
1979
1980 return 0;
1981}
1982
1983static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
1984 unsigned int target_freq)
1985{
1986 struct cpudata *cpu = all_cpu_data[policy->cpu];
1987 int target_pstate;
1988
1989 update_turbo_state();
1990
1991 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
1992 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1993 intel_pstate_update_pstate(cpu, target_pstate);
1994 return target_pstate * cpu->pstate.scaling;
1995}
1996
1997static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
1998{
1999 int ret = __intel_pstate_cpu_init(policy);
2000
2001 if (ret)
2002 return ret;
2003
2004 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2005 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
2006
2007 policy->cur = policy->cpuinfo.min_freq;
2008
2009 return 0;
2010}
2011
2012static struct cpufreq_driver intel_cpufreq = {
2013 .flags = CPUFREQ_CONST_LOOPS,
2014 .verify = intel_cpufreq_verify_policy,
2015 .target = intel_cpufreq_target,
2016 .fast_switch = intel_cpufreq_fast_switch,
2017 .init = intel_cpufreq_cpu_init,
2018 .exit = intel_pstate_cpu_exit,
2019 .stop_cpu = intel_cpufreq_stop_cpu,
2020 .name = "intel_cpufreq",
2021};
2022
2023static struct cpufreq_driver *default_driver = &intel_pstate;
2024
2025static void intel_pstate_driver_cleanup(void)
2026{
2027 unsigned int cpu;
2028
2029 get_online_cpus();
2030 for_each_online_cpu(cpu) {
2031 if (all_cpu_data[cpu]) {
2032 if (intel_pstate_driver == &intel_pstate)
2033 intel_pstate_clear_update_util_hook(cpu);
2034
2035 kfree(all_cpu_data[cpu]);
2036 all_cpu_data[cpu] = NULL;
2037 }
2038 }
2039 put_online_cpus();
2040 intel_pstate_driver = NULL;
2041}
2042
2043static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2044{
2045 int ret;
2046
2047 memset(&global, 0, sizeof(global));
2048 global.max_perf_pct = 100;
2049
2050 intel_pstate_driver = driver;
2051 ret = cpufreq_register_driver(intel_pstate_driver);
2052 if (ret) {
2053 intel_pstate_driver_cleanup();
2054 return ret;
2055 }
2056
2057 global.min_perf_pct = min_perf_pct_min();
2058
2059 return 0;
2060}
2061
2062static int intel_pstate_unregister_driver(void)
2063{
2064 if (hwp_active)
2065 return -EBUSY;
2066
2067 cpufreq_unregister_driver(intel_pstate_driver);
2068 intel_pstate_driver_cleanup();
2069
2070 return 0;
2071}
2072
2073static ssize_t intel_pstate_show_status(char *buf)
2074{
2075 if (!intel_pstate_driver)
2076 return sprintf(buf, "off\n");
2077
2078 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
2079 "active" : "passive");
2080}
2081
2082static int intel_pstate_update_status(const char *buf, size_t size)
2083{
2084 int ret;
2085
2086 if (size == 3 && !strncmp(buf, "off", size))
2087 return intel_pstate_driver ?
2088 intel_pstate_unregister_driver() : -EINVAL;
2089
2090 if (size == 6 && !strncmp(buf, "active", size)) {
2091 if (intel_pstate_driver) {
2092 if (intel_pstate_driver == &intel_pstate)
2093 return 0;
2094
2095 ret = intel_pstate_unregister_driver();
2096 if (ret)
2097 return ret;
2098 }
2099
2100 return intel_pstate_register_driver(&intel_pstate);
2101 }
2102
2103 if (size == 7 && !strncmp(buf, "passive", size)) {
2104 if (intel_pstate_driver) {
2105 if (intel_pstate_driver == &intel_cpufreq)
2106 return 0;
2107
2108 ret = intel_pstate_unregister_driver();
2109 if (ret)
2110 return ret;
2111 }
2112
2113 return intel_pstate_register_driver(&intel_cpufreq);
2114 }
2115
2116 return -EINVAL;
2117}
2118
2119static int no_load __initdata;
2120static int no_hwp __initdata;
2121static int hwp_only __initdata;
2122static unsigned int force_load __initdata;
2123
2124static int __init intel_pstate_msrs_not_valid(void)
2125{
2126 if (!pstate_funcs.get_max() ||
2127 !pstate_funcs.get_min() ||
2128 !pstate_funcs.get_turbo())
2129 return -ENODEV;
2130
2131 return 0;
2132}
2133
2134static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2135{
2136 pstate_funcs.get_max = funcs->get_max;
2137 pstate_funcs.get_max_physical = funcs->get_max_physical;
2138 pstate_funcs.get_min = funcs->get_min;
2139 pstate_funcs.get_turbo = funcs->get_turbo;
2140 pstate_funcs.get_scaling = funcs->get_scaling;
2141 pstate_funcs.get_val = funcs->get_val;
2142 pstate_funcs.get_vid = funcs->get_vid;
2143 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
2144}
2145
2146#ifdef CONFIG_ACPI
2147
2148static bool __init intel_pstate_no_acpi_pss(void)
2149{
2150 int i;
2151
2152 for_each_possible_cpu(i) {
2153 acpi_status status;
2154 union acpi_object *pss;
2155 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
2156 struct acpi_processor *pr = per_cpu(processors, i);
2157
2158 if (!pr)
2159 continue;
2160
2161 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
2162 if (ACPI_FAILURE(status))
2163 continue;
2164
2165 pss = buffer.pointer;
2166 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
2167 kfree(pss);
2168 return false;
2169 }
2170
2171 kfree(pss);
2172 }
2173
2174 return true;
2175}
2176
2177static bool __init intel_pstate_has_acpi_ppc(void)
2178{
2179 int i;
2180
2181 for_each_possible_cpu(i) {
2182 struct acpi_processor *pr = per_cpu(processors, i);
2183
2184 if (!pr)
2185 continue;
2186 if (acpi_has_method(pr->handle, "_PPC"))
2187 return true;
2188 }
2189 return false;
2190}
2191
2192enum {
2193 PSS,
2194 PPC,
2195};
2196
2197
2198static struct acpi_platform_list plat_info[] __initdata = {
2199 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
2200 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2201 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2202 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2203 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2204 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2205 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2206 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2207 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2208 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2209 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2210 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2211 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2212 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2213 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
2214 { }
2215};
2216
2217static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2218{
2219 const struct x86_cpu_id *id;
2220 u64 misc_pwr;
2221 int idx;
2222
2223 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2224 if (id) {
2225 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2226 if ( misc_pwr & (1 << 8))
2227 return true;
2228 }
2229
2230 idx = acpi_match_platform_list(plat_info);
2231 if (idx < 0)
2232 return false;
2233
2234 switch (plat_info[idx].data) {
2235 case PSS:
2236 return intel_pstate_no_acpi_pss();
2237 case PPC:
2238 return intel_pstate_has_acpi_ppc() && !force_load;
2239 }
2240
2241 return false;
2242}
2243
2244static void intel_pstate_request_control_from_smm(void)
2245{
2246
2247
2248
2249
2250 if (acpi_ppc)
2251 acpi_processor_pstate_control();
2252}
2253#else
2254static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2255static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2256static inline void intel_pstate_request_control_from_smm(void) {}
2257#endif
2258
2259static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2260 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
2261 {}
2262};
2263
2264static int __init intel_pstate_init(void)
2265{
2266 int rc;
2267
2268 if (no_load)
2269 return -ENODEV;
2270
2271 if (x86_match_cpu(hwp_support_ids)) {
2272 copy_cpu_funcs(&core_funcs);
2273 if (!no_hwp) {
2274 hwp_active++;
2275 intel_pstate.attr = hwp_cpufreq_attrs;
2276 goto hwp_cpu_matched;
2277 }
2278 } else {
2279 const struct x86_cpu_id *id;
2280
2281 id = x86_match_cpu(intel_pstate_cpu_ids);
2282 if (!id)
2283 return -ENODEV;
2284
2285 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
2286 }
2287
2288 if (intel_pstate_msrs_not_valid())
2289 return -ENODEV;
2290
2291hwp_cpu_matched:
2292
2293
2294
2295
2296 if (intel_pstate_platform_pwr_mgmt_exists())
2297 return -ENODEV;
2298
2299 if (!hwp_active && hwp_only)
2300 return -ENOTSUPP;
2301
2302 pr_info("Intel P-state driver initializing\n");
2303
2304 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
2305 if (!all_cpu_data)
2306 return -ENOMEM;
2307
2308 intel_pstate_request_control_from_smm();
2309
2310 intel_pstate_sysfs_expose_params();
2311
2312 mutex_lock(&intel_pstate_driver_lock);
2313 rc = intel_pstate_register_driver(default_driver);
2314 mutex_unlock(&intel_pstate_driver_lock);
2315 if (rc)
2316 return rc;
2317
2318 if (hwp_active)
2319 pr_info("HWP enabled\n");
2320
2321 return 0;
2322}
2323device_initcall(intel_pstate_init);
2324
2325static int __init intel_pstate_setup(char *str)
2326{
2327 if (!str)
2328 return -EINVAL;
2329
2330 if (!strcmp(str, "disable")) {
2331 no_load = 1;
2332 } else if (!strcmp(str, "passive")) {
2333 pr_info("Passive mode enabled\n");
2334 default_driver = &intel_cpufreq;
2335 no_hwp = 1;
2336 }
2337 if (!strcmp(str, "no_hwp")) {
2338 pr_info("HWP disabled\n");
2339 no_hwp = 1;
2340 }
2341 if (!strcmp(str, "force"))
2342 force_load = 1;
2343 if (!strcmp(str, "hwp_only"))
2344 hwp_only = 1;
2345 if (!strcmp(str, "per_cpu_perf_limits"))
2346 per_cpu_limits = true;
2347
2348#ifdef CONFIG_ACPI
2349 if (!strcmp(str, "support_acpi_ppc"))
2350 acpi_ppc = true;
2351#endif
2352
2353 return 0;
2354}
2355early_param("intel_pstate", intel_pstate_setup);
2356
2357MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
2358MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
2359MODULE_LICENSE("GPL");
2360