1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) "powernv-cpufreq: " fmt
21
22#include <linux/kernel.h>
23#include <linux/sysfs.h>
24#include <linux/cpumask.h>
25#include <linux/module.h>
26#include <linux/cpufreq.h>
27#include <linux/smp.h>
28#include <linux/of.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/cpu.h>
32#include <linux/hashtable.h>
33#include <trace/events/power.h>
34
35#include <asm/cputhreads.h>
36#include <asm/firmware.h>
37#include <asm/reg.h>
38#include <asm/smp.h>
39#include <asm/opal.h>
40#include <linux/timer.h>
41
42#define POWERNV_MAX_PSTATES_ORDER 8
43#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
44#define PMSR_PSAFE_ENABLE (1UL << 30)
45#define PMSR_SPR_EM_DISABLE (1UL << 31)
46#define MAX_PSTATE_SHIFT 32
47#define LPSTATE_SHIFT 48
48#define GPSTATE_SHIFT 56
49
50#define MAX_RAMP_DOWN_TIME 5120
51
52
53
54
55
56
57
58
59
60
61
62
63#define ramp_down_percent(time) ((time * time) >> 18)
64
65
66#define GPSTATE_TIMER_INTERVAL 2000
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85struct global_pstate_info {
86 int highest_lpstate_idx;
87 unsigned int elapsed_time;
88 unsigned int last_sampled_time;
89 int last_lpstate_idx;
90 int last_gpstate_idx;
91 spinlock_t gpstate_lock;
92 struct timer_list timer;
93 struct cpufreq_policy *policy;
94};
95
96static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
97
98DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
99
100
101
102
103
104
105
106
107
108
109
110
111
112struct pstate_idx_revmap_data {
113 u8 pstate_id;
114 unsigned int cpufreq_table_idx;
115 struct hlist_node hentry;
116};
117
118static bool rebooting, throttled, occ_reset;
119
120static const char * const throttle_reason[] = {
121 "No throttling",
122 "Power Cap",
123 "Processor Over Temperature",
124 "Power Supply Failure",
125 "Over Current",
126 "OCC Reset"
127};
128
129enum throttle_reason_type {
130 NO_THROTTLE = 0,
131 POWERCAP,
132 CPU_OVERTEMP,
133 POWER_SUPPLY_FAILURE,
134 OVERCURRENT,
135 OCC_RESET_THROTTLE,
136 OCC_MAX_REASON
137};
138
139static struct chip {
140 unsigned int id;
141 bool throttled;
142 bool restore;
143 u8 throttle_reason;
144 cpumask_t mask;
145 struct work_struct throttle;
146 int throttle_turbo;
147 int throttle_sub_turbo;
148 int reason[OCC_MAX_REASON];
149} *chips;
150
151static int nr_chips;
152static DEFINE_PER_CPU(struct chip *, chip_info);
153
154
155
156
157
158
159
160
161
162
163
164static struct powernv_pstate_info {
165 unsigned int min;
166 unsigned int max;
167 unsigned int nominal;
168 unsigned int nr_pstates;
169 bool wof_enabled;
170} powernv_pstate_info;
171
172static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
173{
174 return ((pmsr_val >> shift) & 0xFF);
175}
176
177#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
178#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
179#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
180
181
182
183
184
185
186
187
188
189
190
191static inline u8 idx_to_pstate(unsigned int i)
192{
193 if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
194 pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
195 return powernv_freqs[powernv_pstate_info.nominal].driver_data;
196 }
197
198 return powernv_freqs[i].driver_data;
199}
200
201
202
203
204
205
206
207
208
209
210static unsigned int pstate_to_idx(u8 pstate)
211{
212 unsigned int key = pstate % POWERNV_MAX_PSTATES;
213 struct pstate_idx_revmap_data *revmap_data;
214
215 hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
216 if (revmap_data->pstate_id == pstate)
217 return revmap_data->cpufreq_table_idx;
218 }
219
220 pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
221 return powernv_pstate_info.nominal;
222}
223
224static inline void reset_gpstates(struct cpufreq_policy *policy)
225{
226 struct global_pstate_info *gpstates = policy->driver_data;
227
228 gpstates->highest_lpstate_idx = 0;
229 gpstates->elapsed_time = 0;
230 gpstates->last_sampled_time = 0;
231 gpstates->last_lpstate_idx = 0;
232 gpstates->last_gpstate_idx = 0;
233}
234
235
236
237
238
239static int init_powernv_pstates(void)
240{
241 struct device_node *power_mgt;
242 int i, nr_pstates = 0;
243 const __be32 *pstate_ids, *pstate_freqs;
244 u32 len_ids, len_freqs;
245 u32 pstate_min, pstate_max, pstate_nominal;
246 u32 pstate_turbo, pstate_ultra_turbo;
247
248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
249 if (!power_mgt) {
250 pr_warn("power-mgt node not found\n");
251 return -ENODEV;
252 }
253
254 if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
255 pr_warn("ibm,pstate-min node not found\n");
256 return -ENODEV;
257 }
258
259 if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
260 pr_warn("ibm,pstate-max node not found\n");
261 return -ENODEV;
262 }
263
264 if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
265 &pstate_nominal)) {
266 pr_warn("ibm,pstate-nominal not found\n");
267 return -ENODEV;
268 }
269
270 if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
271 &pstate_ultra_turbo)) {
272 powernv_pstate_info.wof_enabled = false;
273 goto next;
274 }
275
276 if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
277 &pstate_turbo)) {
278 powernv_pstate_info.wof_enabled = false;
279 goto next;
280 }
281
282 if (pstate_turbo == pstate_ultra_turbo)
283 powernv_pstate_info.wof_enabled = false;
284 else
285 powernv_pstate_info.wof_enabled = true;
286
287next:
288 pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
289 pstate_nominal, pstate_max);
290 pr_info("Workload Optimized Frequency is %s in the platform\n",
291 (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
292
293 pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
294 if (!pstate_ids) {
295 pr_warn("ibm,pstate-ids not found\n");
296 return -ENODEV;
297 }
298
299 pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
300 &len_freqs);
301 if (!pstate_freqs) {
302 pr_warn("ibm,pstate-frequencies-mhz not found\n");
303 return -ENODEV;
304 }
305
306 if (len_ids != len_freqs) {
307 pr_warn("Entries in ibm,pstate-ids and "
308 "ibm,pstate-frequencies-mhz does not match\n");
309 }
310
311 nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
312 if (!nr_pstates) {
313 pr_warn("No PStates found\n");
314 return -ENODEV;
315 }
316
317 powernv_pstate_info.nr_pstates = nr_pstates;
318 pr_debug("NR PStates %d\n", nr_pstates);
319
320 for (i = 0; i < nr_pstates; i++) {
321 u32 id = be32_to_cpu(pstate_ids[i]);
322 u32 freq = be32_to_cpu(pstate_freqs[i]);
323 struct pstate_idx_revmap_data *revmap_data;
324 unsigned int key;
325
326 pr_debug("PState id %d freq %d MHz\n", id, freq);
327 powernv_freqs[i].frequency = freq * 1000;
328 powernv_freqs[i].driver_data = id & 0xFF;
329
330 revmap_data = (struct pstate_idx_revmap_data *)
331 kmalloc(sizeof(*revmap_data), GFP_KERNEL);
332
333 revmap_data->pstate_id = id & 0xFF;
334 revmap_data->cpufreq_table_idx = i;
335 key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
336 hash_add(pstate_revmap, &revmap_data->hentry, key);
337
338 if (id == pstate_max)
339 powernv_pstate_info.max = i;
340 if (id == pstate_nominal)
341 powernv_pstate_info.nominal = i;
342 if (id == pstate_min)
343 powernv_pstate_info.min = i;
344
345 if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
346 int j;
347
348 for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
349 powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
350 }
351 }
352
353
354 powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
355 return 0;
356}
357
358
359static unsigned int pstate_id_to_freq(u8 pstate_id)
360{
361 int i;
362
363 i = pstate_to_idx(pstate_id);
364 if (i >= powernv_pstate_info.nr_pstates || i < 0) {
365 pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
366 pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
367 i = powernv_pstate_info.nominal;
368 }
369
370 return powernv_freqs[i].frequency;
371}
372
373
374
375
376
377static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
378 char *buf)
379{
380 return sprintf(buf, "%u\n",
381 powernv_freqs[powernv_pstate_info.nominal].frequency);
382}
383
384struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
385 __ATTR_RO(cpuinfo_nominal_freq);
386
387#define SCALING_BOOST_FREQS_ATTR_INDEX 2
388
389static struct freq_attr *powernv_cpu_freq_attr[] = {
390 &cpufreq_freq_attr_scaling_available_freqs,
391 &cpufreq_freq_attr_cpuinfo_nominal_freq,
392 &cpufreq_freq_attr_scaling_boost_freqs,
393 NULL,
394};
395
396#define throttle_attr(name, member) \
397static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
398{ \
399 struct chip *chip = per_cpu(chip_info, policy->cpu); \
400 \
401 return sprintf(buf, "%u\n", chip->member); \
402} \
403 \
404static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
405
406throttle_attr(unthrottle, reason[NO_THROTTLE]);
407throttle_attr(powercap, reason[POWERCAP]);
408throttle_attr(overtemp, reason[CPU_OVERTEMP]);
409throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
410throttle_attr(overcurrent, reason[OVERCURRENT]);
411throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
412throttle_attr(turbo_stat, throttle_turbo);
413throttle_attr(sub_turbo_stat, throttle_sub_turbo);
414
415static struct attribute *throttle_attrs[] = {
416 &throttle_attr_unthrottle.attr,
417 &throttle_attr_powercap.attr,
418 &throttle_attr_overtemp.attr,
419 &throttle_attr_supply_fault.attr,
420 &throttle_attr_overcurrent.attr,
421 &throttle_attr_occ_reset.attr,
422 &throttle_attr_turbo_stat.attr,
423 &throttle_attr_sub_turbo_stat.attr,
424 NULL,
425};
426
427static const struct attribute_group throttle_attr_grp = {
428 .name = "throttle_stats",
429 .attrs = throttle_attrs,
430};
431
432
433
434
435
436static inline unsigned long get_pmspr(unsigned long sprn)
437{
438 switch (sprn) {
439 case SPRN_PMCR:
440 return mfspr(SPRN_PMCR);
441
442 case SPRN_PMICR:
443 return mfspr(SPRN_PMICR);
444
445 case SPRN_PMSR:
446 return mfspr(SPRN_PMSR);
447 }
448 BUG();
449}
450
451static inline void set_pmspr(unsigned long sprn, unsigned long val)
452{
453 switch (sprn) {
454 case SPRN_PMCR:
455 mtspr(SPRN_PMCR, val);
456 return;
457
458 case SPRN_PMICR:
459 mtspr(SPRN_PMICR, val);
460 return;
461 }
462 BUG();
463}
464
465
466
467
468
469struct powernv_smp_call_data {
470 unsigned int freq;
471 u8 pstate_id;
472 u8 gpstate_id;
473};
474
475
476
477
478
479
480
481
482
483
484
485
486static void powernv_read_cpu_freq(void *arg)
487{
488 unsigned long pmspr_val;
489 struct powernv_smp_call_data *freq_data = arg;
490
491 pmspr_val = get_pmspr(SPRN_PMSR);
492 freq_data->pstate_id = extract_local_pstate(pmspr_val);
493 freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
494
495 pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
496 raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
497 freq_data->freq);
498}
499
500
501
502
503
504
505static unsigned int powernv_cpufreq_get(unsigned int cpu)
506{
507 struct powernv_smp_call_data freq_data;
508
509 smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
510 &freq_data, 1);
511
512 return freq_data.freq;
513}
514
515
516
517
518
519
520
521
522
523
524static void set_pstate(void *data)
525{
526 unsigned long val;
527 struct powernv_smp_call_data *freq_data = data;
528 unsigned long pstate_ul = freq_data->pstate_id;
529 unsigned long gpstate_ul = freq_data->gpstate_id;
530
531 val = get_pmspr(SPRN_PMCR);
532 val = val & 0x0000FFFFFFFFFFFFULL;
533
534 pstate_ul = pstate_ul & 0xFF;
535 gpstate_ul = gpstate_ul & 0xFF;
536
537
538 val = val | (gpstate_ul << 56) | (pstate_ul << 48);
539
540 pr_debug("Setting cpu %d pmcr to %016lX\n",
541 raw_smp_processor_id(), val);
542 set_pmspr(SPRN_PMCR, val);
543}
544
545
546
547
548
549static inline unsigned int get_nominal_index(void)
550{
551 return powernv_pstate_info.nominal;
552}
553
554static void powernv_cpufreq_throttle_check(void *data)
555{
556 struct chip *chip;
557 unsigned int cpu = smp_processor_id();
558 unsigned long pmsr;
559 u8 pmsr_pmax;
560 unsigned int pmsr_pmax_idx;
561
562 pmsr = get_pmspr(SPRN_PMSR);
563 chip = this_cpu_read(chip_info);
564
565
566 pmsr_pmax = extract_max_pstate(pmsr);
567 pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
568 if (pmsr_pmax_idx != powernv_pstate_info.max) {
569 if (chip->throttled)
570 goto next;
571 chip->throttled = true;
572 if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
573 pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
574 cpu, chip->id, pmsr_pmax,
575 idx_to_pstate(powernv_pstate_info.nominal));
576 chip->throttle_sub_turbo++;
577 } else {
578 chip->throttle_turbo++;
579 }
580 trace_powernv_throttle(chip->id,
581 throttle_reason[chip->throttle_reason],
582 pmsr_pmax);
583 } else if (chip->throttled) {
584 chip->throttled = false;
585 trace_powernv_throttle(chip->id,
586 throttle_reason[chip->throttle_reason],
587 pmsr_pmax);
588 }
589
590
591next:
592 if (pmsr & PMSR_PSAFE_ENABLE) {
593 throttled = true;
594 pr_info("Pstate set to safe frequency\n");
595 }
596
597
598 if (pmsr & PMSR_SPR_EM_DISABLE) {
599 throttled = true;
600 pr_info("Frequency Control disabled from OS\n");
601 }
602
603 if (throttled) {
604 pr_info("PMSR = %16lx\n", pmsr);
605 pr_warn("CPU Frequency could be throttled\n");
606 }
607}
608
609
610
611
612
613
614
615
616
617
618
619static inline int calc_global_pstate(unsigned int elapsed_time,
620 int highest_lpstate_idx,
621 int local_pstate_idx)
622{
623 int index_diff;
624
625
626
627
628
629
630
631
632 index_diff = ((int)ramp_down_percent(elapsed_time) *
633 (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
634
635
636 if (highest_lpstate_idx + index_diff >= local_pstate_idx)
637 return local_pstate_idx;
638 else
639 return highest_lpstate_idx + index_diff;
640}
641
642static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
643{
644 unsigned int timer_interval;
645
646
647
648
649
650
651
652 if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
653 > MAX_RAMP_DOWN_TIME)
654 timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
655 else
656 timer_interval = GPSTATE_TIMER_INTERVAL;
657
658 mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
659}
660
661
662
663
664
665
666
667
668
669
670void gpstate_timer_handler(struct timer_list *t)
671{
672 struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
673 struct cpufreq_policy *policy = gpstates->policy;
674 int gpstate_idx, lpstate_idx;
675 unsigned long val;
676 unsigned int time_diff = jiffies_to_msecs(jiffies)
677 - gpstates->last_sampled_time;
678 struct powernv_smp_call_data freq_data;
679
680 if (!spin_trylock(&gpstates->gpstate_lock))
681 return;
682
683
684
685
686 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
687 gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
688 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
689 spin_unlock(&gpstates->gpstate_lock);
690 return;
691 }
692
693
694
695
696
697
698 val = get_pmspr(SPRN_PMCR);
699 freq_data.gpstate_id = extract_global_pstate(val);
700 freq_data.pstate_id = extract_local_pstate(val);
701 if (freq_data.gpstate_id == freq_data.pstate_id) {
702 reset_gpstates(policy);
703 spin_unlock(&gpstates->gpstate_lock);
704 return;
705 }
706
707 gpstates->last_sampled_time += time_diff;
708 gpstates->elapsed_time += time_diff;
709
710 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
711 gpstate_idx = pstate_to_idx(freq_data.pstate_id);
712 lpstate_idx = gpstate_idx;
713 reset_gpstates(policy);
714 gpstates->highest_lpstate_idx = gpstate_idx;
715 } else {
716 lpstate_idx = pstate_to_idx(freq_data.pstate_id);
717 gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
718 gpstates->highest_lpstate_idx,
719 lpstate_idx);
720 }
721 freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
722 gpstates->last_gpstate_idx = gpstate_idx;
723 gpstates->last_lpstate_idx = lpstate_idx;
724
725
726
727
728 if (gpstate_idx != gpstates->last_lpstate_idx)
729 queue_gpstate_timer(gpstates);
730
731 set_pstate(&freq_data);
732 spin_unlock(&gpstates->gpstate_lock);
733}
734
735
736
737
738
739
740static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
741 unsigned int new_index)
742{
743 struct powernv_smp_call_data freq_data;
744 unsigned int cur_msec, gpstate_idx;
745 struct global_pstate_info *gpstates = policy->driver_data;
746
747 if (unlikely(rebooting) && new_index != get_nominal_index())
748 return 0;
749
750 if (!throttled) {
751
752
753
754 preempt_disable();
755 powernv_cpufreq_throttle_check(NULL);
756 preempt_enable();
757 }
758
759 cur_msec = jiffies_to_msecs(get_jiffies_64());
760
761 spin_lock(&gpstates->gpstate_lock);
762 freq_data.pstate_id = idx_to_pstate(new_index);
763
764 if (!gpstates->last_sampled_time) {
765 gpstate_idx = new_index;
766 gpstates->highest_lpstate_idx = new_index;
767 goto gpstates_done;
768 }
769
770 if (gpstates->last_gpstate_idx < new_index) {
771 gpstates->elapsed_time += cur_msec -
772 gpstates->last_sampled_time;
773
774
775
776
777
778
779 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
780 reset_gpstates(policy);
781 gpstates->highest_lpstate_idx = new_index;
782 gpstate_idx = new_index;
783 } else {
784
785 gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
786 gpstates->highest_lpstate_idx,
787 new_index);
788 }
789 } else {
790 reset_gpstates(policy);
791 gpstates->highest_lpstate_idx = new_index;
792 gpstate_idx = new_index;
793 }
794
795
796
797
798
799 if (gpstate_idx != new_index)
800 queue_gpstate_timer(gpstates);
801 else
802 del_timer_sync(&gpstates->timer);
803
804gpstates_done:
805 freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
806 gpstates->last_sampled_time = cur_msec;
807 gpstates->last_gpstate_idx = gpstate_idx;
808 gpstates->last_lpstate_idx = new_index;
809
810 spin_unlock(&gpstates->gpstate_lock);
811
812
813
814
815
816
817 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
818 return 0;
819}
820
821static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
822{
823 int base, i;
824 struct kernfs_node *kn;
825 struct global_pstate_info *gpstates;
826
827 base = cpu_first_thread_sibling(policy->cpu);
828
829 for (i = 0; i < threads_per_core; i++)
830 cpumask_set_cpu(base + i, policy->cpus);
831
832 kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
833 if (!kn) {
834 int ret;
835
836 ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
837 if (ret) {
838 pr_info("Failed to create throttle stats directory for cpu %d\n",
839 policy->cpu);
840 return ret;
841 }
842 } else {
843 kernfs_put(kn);
844 }
845
846 gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
847 if (!gpstates)
848 return -ENOMEM;
849
850 policy->driver_data = gpstates;
851
852
853 gpstates->policy = policy;
854 timer_setup(&gpstates->timer, gpstate_timer_handler,
855 TIMER_PINNED | TIMER_DEFERRABLE);
856 gpstates->timer.expires = jiffies +
857 msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
858 spin_lock_init(&gpstates->gpstate_lock);
859
860 policy->freq_table = powernv_freqs;
861 policy->fast_switch_possible = true;
862 return 0;
863}
864
865static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
866{
867
868 kfree(policy->driver_data);
869
870 return 0;
871}
872
873static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
874 unsigned long action, void *unused)
875{
876 int cpu;
877 struct cpufreq_policy cpu_policy;
878
879 rebooting = true;
880 for_each_online_cpu(cpu) {
881 cpufreq_get_policy(&cpu_policy, cpu);
882 powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
883 }
884
885 return NOTIFY_DONE;
886}
887
888static struct notifier_block powernv_cpufreq_reboot_nb = {
889 .notifier_call = powernv_cpufreq_reboot_notifier,
890};
891
892void powernv_cpufreq_work_fn(struct work_struct *work)
893{
894 struct chip *chip = container_of(work, struct chip, throttle);
895 unsigned int cpu;
896 cpumask_t mask;
897
898 get_online_cpus();
899 cpumask_and(&mask, &chip->mask, cpu_online_mask);
900 smp_call_function_any(&mask,
901 powernv_cpufreq_throttle_check, NULL, 0);
902
903 if (!chip->restore)
904 goto out;
905
906 chip->restore = false;
907 for_each_cpu(cpu, &mask) {
908 int index;
909 struct cpufreq_policy policy;
910
911 cpufreq_get_policy(&policy, cpu);
912 index = cpufreq_table_find_index_c(&policy, policy.cur);
913 powernv_cpufreq_target_index(&policy, index);
914 cpumask_andnot(&mask, &mask, policy.cpus);
915 }
916out:
917 put_online_cpus();
918}
919
920static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
921 unsigned long msg_type, void *_msg)
922{
923 struct opal_msg *msg = _msg;
924 struct opal_occ_msg omsg;
925 int i;
926
927 if (msg_type != OPAL_MSG_OCC)
928 return 0;
929
930 omsg.type = be64_to_cpu(msg->params[0]);
931
932 switch (omsg.type) {
933 case OCC_RESET:
934 occ_reset = true;
935 pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
936
937
938
939
940
941
942
943 if (!throttled) {
944 throttled = true;
945 pr_warn("CPU frequency is throttled for duration\n");
946 }
947
948 break;
949 case OCC_LOAD:
950 pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
951 break;
952 case OCC_THROTTLE:
953 omsg.chip = be64_to_cpu(msg->params[1]);
954 omsg.throttle_status = be64_to_cpu(msg->params[2]);
955
956 if (occ_reset) {
957 occ_reset = false;
958 throttled = false;
959 pr_info("OCC Active, CPU frequency is no longer throttled\n");
960
961 for (i = 0; i < nr_chips; i++) {
962 chips[i].restore = true;
963 schedule_work(&chips[i].throttle);
964 }
965
966 return 0;
967 }
968
969 for (i = 0; i < nr_chips; i++)
970 if (chips[i].id == omsg.chip)
971 break;
972
973 if (omsg.throttle_status >= 0 &&
974 omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
975 chips[i].throttle_reason = omsg.throttle_status;
976 chips[i].reason[omsg.throttle_status]++;
977 }
978
979 if (!omsg.throttle_status)
980 chips[i].restore = true;
981
982 schedule_work(&chips[i].throttle);
983 }
984 return 0;
985}
986
987static struct notifier_block powernv_cpufreq_opal_nb = {
988 .notifier_call = powernv_cpufreq_occ_msg,
989 .next = NULL,
990 .priority = 0,
991};
992
993static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
994{
995 struct powernv_smp_call_data freq_data;
996 struct global_pstate_info *gpstates = policy->driver_data;
997
998 freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
999 freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
1000 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
1001 del_timer_sync(&gpstates->timer);
1002}
1003
1004static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
1005 unsigned int target_freq)
1006{
1007 int index;
1008 struct powernv_smp_call_data freq_data;
1009
1010 index = cpufreq_table_find_index_dl(policy, target_freq);
1011 freq_data.pstate_id = powernv_freqs[index].driver_data;
1012 freq_data.gpstate_id = powernv_freqs[index].driver_data;
1013 set_pstate(&freq_data);
1014
1015 return powernv_freqs[index].frequency;
1016}
1017
1018static struct cpufreq_driver powernv_cpufreq_driver = {
1019 .name = "powernv-cpufreq",
1020 .flags = CPUFREQ_CONST_LOOPS,
1021 .init = powernv_cpufreq_cpu_init,
1022 .exit = powernv_cpufreq_cpu_exit,
1023 .verify = cpufreq_generic_frequency_table_verify,
1024 .target_index = powernv_cpufreq_target_index,
1025 .fast_switch = powernv_fast_switch,
1026 .get = powernv_cpufreq_get,
1027 .stop_cpu = powernv_cpufreq_stop_cpu,
1028 .attr = powernv_cpu_freq_attr,
1029};
1030
1031static int init_chip_info(void)
1032{
1033 unsigned int chip[256];
1034 unsigned int cpu, i;
1035 unsigned int prev_chip_id = UINT_MAX;
1036
1037 for_each_possible_cpu(cpu) {
1038 unsigned int id = cpu_to_chip_id(cpu);
1039
1040 if (prev_chip_id != id) {
1041 prev_chip_id = id;
1042 chip[nr_chips++] = id;
1043 }
1044 }
1045
1046 chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
1047 if (!chips)
1048 return -ENOMEM;
1049
1050 for (i = 0; i < nr_chips; i++) {
1051 chips[i].id = chip[i];
1052 cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
1053 INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
1054 for_each_cpu(cpu, &chips[i].mask)
1055 per_cpu(chip_info, cpu) = &chips[i];
1056 }
1057
1058 return 0;
1059}
1060
1061static inline void clean_chip_info(void)
1062{
1063 kfree(chips);
1064}
1065
1066static inline void unregister_all_notifiers(void)
1067{
1068 opal_message_notifier_unregister(OPAL_MSG_OCC,
1069 &powernv_cpufreq_opal_nb);
1070 unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
1071}
1072
1073static int __init powernv_cpufreq_init(void)
1074{
1075 int rc = 0;
1076
1077
1078 if (!firmware_has_feature(FW_FEATURE_OPAL))
1079 return -ENODEV;
1080
1081
1082 rc = init_powernv_pstates();
1083 if (rc)
1084 goto out;
1085
1086
1087 rc = init_chip_info();
1088 if (rc)
1089 goto out;
1090
1091 register_reboot_notifier(&powernv_cpufreq_reboot_nb);
1092 opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
1093
1094 if (powernv_pstate_info.wof_enabled)
1095 powernv_cpufreq_driver.boost_enabled = true;
1096 else
1097 powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
1098
1099 rc = cpufreq_register_driver(&powernv_cpufreq_driver);
1100 if (rc) {
1101 pr_info("Failed to register the cpufreq driver (%d)\n", rc);
1102 goto cleanup_notifiers;
1103 }
1104
1105 if (powernv_pstate_info.wof_enabled)
1106 cpufreq_enable_boost_support();
1107
1108 return 0;
1109cleanup_notifiers:
1110 unregister_all_notifiers();
1111 clean_chip_info();
1112out:
1113 pr_info("Platform driver disabled. System does not support PState control\n");
1114 return rc;
1115}
1116module_init(powernv_cpufreq_init);
1117
1118static void __exit powernv_cpufreq_exit(void)
1119{
1120 cpufreq_unregister_driver(&powernv_cpufreq_driver);
1121 unregister_all_notifiers();
1122 clean_chip_info();
1123}
1124module_exit(powernv_cpufreq_exit);
1125
1126MODULE_LICENSE("GPL");
1127MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
1128