1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/cpu.h>
18#include <linux/cpufreq.h>
19#include <linux/cpu_cooling.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/pm_qos.h>
27#include <linux/slab.h>
28#include <linux/suspend.h>
29#include <linux/syscore_ops.h>
30#include <linux/tick.h>
31#include <trace/events/power.h>
32
33static LIST_HEAD(cpufreq_policy_list);
34
35
36#define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
39
40#define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42#define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
44
45
46static LIST_HEAD(cpufreq_governor_list);
47#define for_each_governor(__governor) \
48 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
49
50static char default_governor[CPUFREQ_NAME_LEN];
51
52
53
54
55
56
57static struct cpufreq_driver *cpufreq_driver;
58static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
59static DEFINE_RWLOCK(cpufreq_driver_lock);
60
61static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
62bool cpufreq_supports_freq_invariance(void)
63{
64 return static_branch_likely(&cpufreq_freq_invariance);
65}
66
67
68static bool cpufreq_suspended;
69
70static inline bool has_target(void)
71{
72 return cpufreq_driver->target_index || cpufreq_driver->target;
73}
74
75
76static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
77static int cpufreq_init_governor(struct cpufreq_policy *policy);
78static void cpufreq_exit_governor(struct cpufreq_policy *policy);
79static void cpufreq_governor_limits(struct cpufreq_policy *policy);
80static int cpufreq_set_policy(struct cpufreq_policy *policy,
81 struct cpufreq_governor *new_gov,
82 unsigned int new_pol);
83
84
85
86
87
88
89
90
91static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
93
94static int off __read_mostly;
95static int cpufreq_disabled(void)
96{
97 return off;
98}
99void disable_cpufreq(void)
100{
101 off = 1;
102}
103static DEFINE_MUTEX(cpufreq_governor_mutex);
104
105bool have_governor_per_policy(void)
106{
107 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
108}
109EXPORT_SYMBOL_GPL(have_governor_per_policy);
110
111static struct kobject *cpufreq_global_kobject;
112
113struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
114{
115 if (have_governor_per_policy())
116 return &policy->kobj;
117 else
118 return cpufreq_global_kobject;
119}
120EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
121
122static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123{
124 struct kernel_cpustat kcpustat;
125 u64 cur_wall_time;
126 u64 idle_time;
127 u64 busy_time;
128
129 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
130
131 kcpustat_cpu_fetch(&kcpustat, cpu);
132
133 busy_time = kcpustat.cpustat[CPUTIME_USER];
134 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
135 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
136 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
137 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
138 busy_time += kcpustat.cpustat[CPUTIME_NICE];
139
140 idle_time = cur_wall_time - busy_time;
141 if (wall)
142 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
143
144 return div_u64(idle_time, NSEC_PER_USEC);
145}
146
147u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
148{
149 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
150
151 if (idle_time == -1ULL)
152 return get_cpu_idle_time_jiffy(cpu, wall);
153 else if (!io_busy)
154 idle_time += get_cpu_iowait_time_us(cpu, wall);
155
156 return idle_time;
157}
158EXPORT_SYMBOL_GPL(get_cpu_idle_time);
159
160
161
162
163
164
165
166
167void cpufreq_generic_init(struct cpufreq_policy *policy,
168 struct cpufreq_frequency_table *table,
169 unsigned int transition_latency)
170{
171 policy->freq_table = table;
172 policy->cpuinfo.transition_latency = transition_latency;
173
174
175
176
177
178 cpumask_setall(policy->cpus);
179}
180EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181
182struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
183{
184 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185
186 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
187}
188EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
189
190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
204
205
206
207
208
209
210
211
212
213
214
215struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
216{
217 struct cpufreq_policy *policy = NULL;
218 unsigned long flags;
219
220 if (WARN_ON(cpu >= nr_cpu_ids))
221 return NULL;
222
223
224 read_lock_irqsave(&cpufreq_driver_lock, flags);
225
226 if (cpufreq_driver) {
227
228 policy = cpufreq_cpu_get_raw(cpu);
229 if (policy)
230 kobject_get(&policy->kobj);
231 }
232
233 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
234
235 return policy;
236}
237EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
238
239
240
241
242
243void cpufreq_cpu_put(struct cpufreq_policy *policy)
244{
245 kobject_put(&policy->kobj);
246}
247EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
248
249
250
251
252
253void cpufreq_cpu_release(struct cpufreq_policy *policy)
254{
255 if (WARN_ON(!policy))
256 return;
257
258 lockdep_assert_held(&policy->rwsem);
259
260 up_write(&policy->rwsem);
261
262 cpufreq_cpu_put(policy);
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
278{
279 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
280
281 if (!policy)
282 return NULL;
283
284 down_write(&policy->rwsem);
285
286 if (policy_is_inactive(policy)) {
287 cpufreq_cpu_release(policy);
288 return NULL;
289 }
290
291 return policy;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309{
310#ifndef CONFIG_SMP
311 static unsigned long l_p_j_ref;
312 static unsigned int l_p_j_ref_freq;
313
314 if (ci->flags & CPUFREQ_CONST_LOOPS)
315 return;
316
317 if (!l_p_j_ref_freq) {
318 l_p_j_ref = loops_per_jiffy;
319 l_p_j_ref_freq = ci->old;
320 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
321 l_p_j_ref, l_p_j_ref_freq);
322 }
323 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
324 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
325 ci->new);
326 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
327 loops_per_jiffy, ci->new);
328 }
329#endif
330}
331
332
333
334
335
336
337
338
339
340
341
342static void cpufreq_notify_transition(struct cpufreq_policy *policy,
343 struct cpufreq_freqs *freqs,
344 unsigned int state)
345{
346 int cpu;
347
348 BUG_ON(irqs_disabled());
349
350 if (cpufreq_disabled())
351 return;
352
353 freqs->policy = policy;
354 freqs->flags = cpufreq_driver->flags;
355 pr_debug("notification %u of frequency transition to %u kHz\n",
356 state, freqs->new);
357
358 switch (state) {
359 case CPUFREQ_PRECHANGE:
360
361
362
363
364
365 if (policy->cur && policy->cur != freqs->old) {
366 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
367 freqs->old, policy->cur);
368 freqs->old = policy->cur;
369 }
370
371 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
372 CPUFREQ_PRECHANGE, freqs);
373
374 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
375 break;
376
377 case CPUFREQ_POSTCHANGE:
378 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
379 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
380 cpumask_pr_args(policy->cpus));
381
382 for_each_cpu(cpu, policy->cpus)
383 trace_cpu_frequency(freqs->new, cpu);
384
385 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
386 CPUFREQ_POSTCHANGE, freqs);
387
388 cpufreq_stats_record_transition(policy, freqs->new);
389 policy->cur = freqs->new;
390 }
391}
392
393
394static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
395 struct cpufreq_freqs *freqs, int transition_failed)
396{
397 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
398 if (!transition_failed)
399 return;
400
401 swap(freqs->old, freqs->new);
402 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
404}
405
406void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
407 struct cpufreq_freqs *freqs)
408{
409
410
411
412
413
414
415
416
417
418 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
419 && current == policy->transition_task);
420
421wait:
422 wait_event(policy->transition_wait, !policy->transition_ongoing);
423
424 spin_lock(&policy->transition_lock);
425
426 if (unlikely(policy->transition_ongoing)) {
427 spin_unlock(&policy->transition_lock);
428 goto wait;
429 }
430
431 policy->transition_ongoing = true;
432 policy->transition_task = current;
433
434 spin_unlock(&policy->transition_lock);
435
436 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
437}
438EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
439
440void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
441 struct cpufreq_freqs *freqs, int transition_failed)
442{
443 if (WARN_ON(!policy->transition_ongoing))
444 return;
445
446 cpufreq_notify_post_transition(policy, freqs, transition_failed);
447
448 arch_set_freq_scale(policy->related_cpus,
449 policy->cur,
450 policy->cpuinfo.max_freq);
451
452 policy->transition_ongoing = false;
453 policy->transition_task = NULL;
454
455 wake_up(&policy->transition_wait);
456}
457EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
458
459
460
461
462
463static int cpufreq_fast_switch_count;
464static DEFINE_MUTEX(cpufreq_fast_switch_lock);
465
466static void cpufreq_list_transition_notifiers(void)
467{
468 struct notifier_block *nb;
469
470 pr_info("Registered transition notifiers:\n");
471
472 mutex_lock(&cpufreq_transition_notifier_list.mutex);
473
474 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
475 pr_info("%pS\n", nb->notifier_call);
476
477 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
478}
479
480
481
482
483
484
485
486
487
488
489
490
491void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
492{
493 lockdep_assert_held(&policy->rwsem);
494
495 if (!policy->fast_switch_possible)
496 return;
497
498 mutex_lock(&cpufreq_fast_switch_lock);
499 if (cpufreq_fast_switch_count >= 0) {
500 cpufreq_fast_switch_count++;
501 policy->fast_switch_enabled = true;
502 } else {
503 pr_warn("CPU%u: Fast frequency switching not enabled\n",
504 policy->cpu);
505 cpufreq_list_transition_notifiers();
506 }
507 mutex_unlock(&cpufreq_fast_switch_lock);
508}
509EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
510
511
512
513
514
515void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
516{
517 mutex_lock(&cpufreq_fast_switch_lock);
518 if (policy->fast_switch_enabled) {
519 policy->fast_switch_enabled = false;
520 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
521 cpufreq_fast_switch_count--;
522 }
523 mutex_unlock(&cpufreq_fast_switch_lock);
524}
525EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
526
527static unsigned int __resolve_freq(struct cpufreq_policy *policy,
528 unsigned int target_freq, unsigned int relation)
529{
530 unsigned int idx;
531
532 target_freq = clamp_val(target_freq, policy->min, policy->max);
533
534 if (!cpufreq_driver->target_index)
535 return target_freq;
536
537 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
538 policy->cached_resolved_idx = idx;
539 policy->cached_target_freq = target_freq;
540 return policy->freq_table[idx].frequency;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
555 unsigned int target_freq)
556{
557 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
558}
559EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
560
561unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
562{
563 unsigned int latency;
564
565 if (policy->transition_delay_us)
566 return policy->transition_delay_us;
567
568 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
569 if (latency) {
570
571
572
573
574
575
576
577
578
579
580 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
581 }
582
583 return LATENCY_MULTIPLIER;
584}
585EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
586
587
588
589
590static ssize_t show_boost(struct kobject *kobj,
591 struct kobj_attribute *attr, char *buf)
592{
593 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
594}
595
596static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
597 const char *buf, size_t count)
598{
599 int ret, enable;
600
601 ret = sscanf(buf, "%d", &enable);
602 if (ret != 1 || enable < 0 || enable > 1)
603 return -EINVAL;
604
605 if (cpufreq_boost_trigger_state(enable)) {
606 pr_err("%s: Cannot %s BOOST!\n",
607 __func__, enable ? "enable" : "disable");
608 return -EINVAL;
609 }
610
611 pr_debug("%s: cpufreq BOOST %s\n",
612 __func__, enable ? "enabled" : "disabled");
613
614 return count;
615}
616define_one_global_rw(boost);
617
618static struct cpufreq_governor *find_governor(const char *str_governor)
619{
620 struct cpufreq_governor *t;
621
622 for_each_governor(t)
623 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
624 return t;
625
626 return NULL;
627}
628
629static struct cpufreq_governor *get_governor(const char *str_governor)
630{
631 struct cpufreq_governor *t;
632
633 mutex_lock(&cpufreq_governor_mutex);
634 t = find_governor(str_governor);
635 if (!t)
636 goto unlock;
637
638 if (!try_module_get(t->owner))
639 t = NULL;
640
641unlock:
642 mutex_unlock(&cpufreq_governor_mutex);
643
644 return t;
645}
646
647static unsigned int cpufreq_parse_policy(char *str_governor)
648{
649 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
650 return CPUFREQ_POLICY_PERFORMANCE;
651
652 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
653 return CPUFREQ_POLICY_POWERSAVE;
654
655 return CPUFREQ_POLICY_UNKNOWN;
656}
657
658
659
660
661
662static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
663{
664 struct cpufreq_governor *t;
665
666 t = get_governor(str_governor);
667 if (t)
668 return t;
669
670 if (request_module("cpufreq_%s", str_governor))
671 return NULL;
672
673 return get_governor(str_governor);
674}
675
676
677
678
679
680
681
682
683
684#define show_one(file_name, object) \
685static ssize_t show_##file_name \
686(struct cpufreq_policy *policy, char *buf) \
687{ \
688 return sprintf(buf, "%u\n", policy->object); \
689}
690
691show_one(cpuinfo_min_freq, cpuinfo.min_freq);
692show_one(cpuinfo_max_freq, cpuinfo.max_freq);
693show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
694show_one(scaling_min_freq, min);
695show_one(scaling_max_freq, max);
696
697__weak unsigned int arch_freq_get_on_cpu(int cpu)
698{
699 return 0;
700}
701
702static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
703{
704 ssize_t ret;
705 unsigned int freq;
706
707 freq = arch_freq_get_on_cpu(policy->cpu);
708 if (freq)
709 ret = sprintf(buf, "%u\n", freq);
710 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
711 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
712 else
713 ret = sprintf(buf, "%u\n", policy->cur);
714 return ret;
715}
716
717
718
719
720#define store_one(file_name, object) \
721static ssize_t store_##file_name \
722(struct cpufreq_policy *policy, const char *buf, size_t count) \
723{ \
724 unsigned long val; \
725 int ret; \
726 \
727 ret = sscanf(buf, "%lu", &val); \
728 if (ret != 1) \
729 return -EINVAL; \
730 \
731 ret = freq_qos_update_request(policy->object##_freq_req, val);\
732 return ret >= 0 ? count : ret; \
733}
734
735store_one(scaling_min_freq, min);
736store_one(scaling_max_freq, max);
737
738
739
740
741static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
742 char *buf)
743{
744 unsigned int cur_freq = __cpufreq_get(policy);
745
746 if (cur_freq)
747 return sprintf(buf, "%u\n", cur_freq);
748
749 return sprintf(buf, "<unknown>\n");
750}
751
752
753
754
755static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
756{
757 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
758 return sprintf(buf, "powersave\n");
759 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
760 return sprintf(buf, "performance\n");
761 else if (policy->governor)
762 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
763 policy->governor->name);
764 return -EINVAL;
765}
766
767
768
769
770static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
771 const char *buf, size_t count)
772{
773 char str_governor[16];
774 int ret;
775
776 ret = sscanf(buf, "%15s", str_governor);
777 if (ret != 1)
778 return -EINVAL;
779
780 if (cpufreq_driver->setpolicy) {
781 unsigned int new_pol;
782
783 new_pol = cpufreq_parse_policy(str_governor);
784 if (!new_pol)
785 return -EINVAL;
786
787 ret = cpufreq_set_policy(policy, NULL, new_pol);
788 } else {
789 struct cpufreq_governor *new_gov;
790
791 new_gov = cpufreq_parse_governor(str_governor);
792 if (!new_gov)
793 return -EINVAL;
794
795 ret = cpufreq_set_policy(policy, new_gov,
796 CPUFREQ_POLICY_UNKNOWN);
797
798 module_put(new_gov->owner);
799 }
800
801 return ret ? ret : count;
802}
803
804
805
806
807static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
808{
809 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
810}
811
812
813
814
815static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
816 char *buf)
817{
818 ssize_t i = 0;
819 struct cpufreq_governor *t;
820
821 if (!has_target()) {
822 i += sprintf(buf, "performance powersave");
823 goto out;
824 }
825
826 mutex_lock(&cpufreq_governor_mutex);
827 for_each_governor(t) {
828 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
829 - (CPUFREQ_NAME_LEN + 2)))
830 break;
831 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
832 }
833 mutex_unlock(&cpufreq_governor_mutex);
834out:
835 i += sprintf(&buf[i], "\n");
836 return i;
837}
838
839ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
840{
841 ssize_t i = 0;
842 unsigned int cpu;
843
844 for_each_cpu(cpu, mask) {
845 if (i)
846 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
847 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
848 if (i >= (PAGE_SIZE - 5))
849 break;
850 }
851 i += sprintf(&buf[i], "\n");
852 return i;
853}
854EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
855
856
857
858
859
860static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
861{
862 return cpufreq_show_cpus(policy->related_cpus, buf);
863}
864
865
866
867
868static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
869{
870 return cpufreq_show_cpus(policy->cpus, buf);
871}
872
873static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
874 const char *buf, size_t count)
875{
876 unsigned int freq = 0;
877 unsigned int ret;
878
879 if (!policy->governor || !policy->governor->store_setspeed)
880 return -EINVAL;
881
882 ret = sscanf(buf, "%u", &freq);
883 if (ret != 1)
884 return -EINVAL;
885
886 policy->governor->store_setspeed(policy, freq);
887
888 return count;
889}
890
891static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
892{
893 if (!policy->governor || !policy->governor->show_setspeed)
894 return sprintf(buf, "<unsupported>\n");
895
896 return policy->governor->show_setspeed(policy, buf);
897}
898
899
900
901
902static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
903{
904 unsigned int limit;
905 int ret;
906 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
907 if (!ret)
908 return sprintf(buf, "%u\n", limit);
909 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
910}
911
912cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
913cpufreq_freq_attr_ro(cpuinfo_min_freq);
914cpufreq_freq_attr_ro(cpuinfo_max_freq);
915cpufreq_freq_attr_ro(cpuinfo_transition_latency);
916cpufreq_freq_attr_ro(scaling_available_governors);
917cpufreq_freq_attr_ro(scaling_driver);
918cpufreq_freq_attr_ro(scaling_cur_freq);
919cpufreq_freq_attr_ro(bios_limit);
920cpufreq_freq_attr_ro(related_cpus);
921cpufreq_freq_attr_ro(affected_cpus);
922cpufreq_freq_attr_rw(scaling_min_freq);
923cpufreq_freq_attr_rw(scaling_max_freq);
924cpufreq_freq_attr_rw(scaling_governor);
925cpufreq_freq_attr_rw(scaling_setspeed);
926
927static struct attribute *default_attrs[] = {
928 &cpuinfo_min_freq.attr,
929 &cpuinfo_max_freq.attr,
930 &cpuinfo_transition_latency.attr,
931 &scaling_min_freq.attr,
932 &scaling_max_freq.attr,
933 &affected_cpus.attr,
934 &related_cpus.attr,
935 &scaling_governor.attr,
936 &scaling_driver.attr,
937 &scaling_available_governors.attr,
938 &scaling_setspeed.attr,
939 NULL
940};
941
942#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
943#define to_attr(a) container_of(a, struct freq_attr, attr)
944
945static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
946{
947 struct cpufreq_policy *policy = to_policy(kobj);
948 struct freq_attr *fattr = to_attr(attr);
949 ssize_t ret;
950
951 if (!fattr->show)
952 return -EIO;
953
954 down_read(&policy->rwsem);
955 ret = fattr->show(policy, buf);
956 up_read(&policy->rwsem);
957
958 return ret;
959}
960
961static ssize_t store(struct kobject *kobj, struct attribute *attr,
962 const char *buf, size_t count)
963{
964 struct cpufreq_policy *policy = to_policy(kobj);
965 struct freq_attr *fattr = to_attr(attr);
966 ssize_t ret = -EINVAL;
967
968 if (!fattr->store)
969 return -EIO;
970
971
972
973
974
975 if (!cpus_read_trylock())
976 return -EBUSY;
977
978 if (cpu_online(policy->cpu)) {
979 down_write(&policy->rwsem);
980 ret = fattr->store(policy, buf, count);
981 up_write(&policy->rwsem);
982 }
983
984 cpus_read_unlock();
985
986 return ret;
987}
988
989static void cpufreq_sysfs_release(struct kobject *kobj)
990{
991 struct cpufreq_policy *policy = to_policy(kobj);
992 pr_debug("last reference is dropped\n");
993 complete(&policy->kobj_unregister);
994}
995
996static const struct sysfs_ops sysfs_ops = {
997 .show = show,
998 .store = store,
999};
1000
1001static struct kobj_type ktype_cpufreq = {
1002 .sysfs_ops = &sysfs_ops,
1003 .default_attrs = default_attrs,
1004 .release = cpufreq_sysfs_release,
1005};
1006
1007static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1008{
1009 struct device *dev = get_cpu_device(cpu);
1010
1011 if (unlikely(!dev))
1012 return;
1013
1014 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1015 return;
1016
1017 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1018 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1019 dev_err(dev, "cpufreq symlink creation failed\n");
1020}
1021
1022static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1023 struct device *dev)
1024{
1025 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1026 sysfs_remove_link(&dev->kobj, "cpufreq");
1027}
1028
1029static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1030{
1031 struct freq_attr **drv_attr;
1032 int ret = 0;
1033
1034
1035 drv_attr = cpufreq_driver->attr;
1036 while (drv_attr && *drv_attr) {
1037 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1038 if (ret)
1039 return ret;
1040 drv_attr++;
1041 }
1042 if (cpufreq_driver->get) {
1043 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1044 if (ret)
1045 return ret;
1046 }
1047
1048 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1049 if (ret)
1050 return ret;
1051
1052 if (cpufreq_driver->bios_limit) {
1053 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1054 if (ret)
1055 return ret;
1056 }
1057
1058 return 0;
1059}
1060
1061static int cpufreq_init_policy(struct cpufreq_policy *policy)
1062{
1063 struct cpufreq_governor *gov = NULL;
1064 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1065 int ret;
1066
1067 if (has_target()) {
1068
1069 gov = get_governor(policy->last_governor);
1070 if (gov) {
1071 pr_debug("Restoring governor %s for cpu %d\n",
1072 gov->name, policy->cpu);
1073 } else {
1074 gov = get_governor(default_governor);
1075 }
1076
1077 if (!gov) {
1078 gov = cpufreq_default_governor();
1079 __module_get(gov->owner);
1080 }
1081
1082 } else {
1083
1084
1085 if (policy->last_policy) {
1086 pol = policy->last_policy;
1087 } else {
1088 pol = cpufreq_parse_policy(default_governor);
1089
1090
1091
1092
1093
1094 if (pol == CPUFREQ_POLICY_UNKNOWN)
1095 pol = policy->policy;
1096 }
1097 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1098 pol != CPUFREQ_POLICY_POWERSAVE)
1099 return -ENODATA;
1100 }
1101
1102 ret = cpufreq_set_policy(policy, gov, pol);
1103 if (gov)
1104 module_put(gov->owner);
1105
1106 return ret;
1107}
1108
1109static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1110{
1111 int ret = 0;
1112
1113
1114 if (cpumask_test_cpu(cpu, policy->cpus))
1115 return 0;
1116
1117 down_write(&policy->rwsem);
1118 if (has_target())
1119 cpufreq_stop_governor(policy);
1120
1121 cpumask_set_cpu(cpu, policy->cpus);
1122
1123 if (has_target()) {
1124 ret = cpufreq_start_governor(policy);
1125 if (ret)
1126 pr_err("%s: Failed to start governor\n", __func__);
1127 }
1128 up_write(&policy->rwsem);
1129 return ret;
1130}
1131
1132void refresh_frequency_limits(struct cpufreq_policy *policy)
1133{
1134 if (!policy_is_inactive(policy)) {
1135 pr_debug("updating policy for CPU %u\n", policy->cpu);
1136
1137 cpufreq_set_policy(policy, policy->governor, policy->policy);
1138 }
1139}
1140EXPORT_SYMBOL(refresh_frequency_limits);
1141
1142static void handle_update(struct work_struct *work)
1143{
1144 struct cpufreq_policy *policy =
1145 container_of(work, struct cpufreq_policy, update);
1146
1147 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1148 down_write(&policy->rwsem);
1149 refresh_frequency_limits(policy);
1150 up_write(&policy->rwsem);
1151}
1152
1153static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1154 void *data)
1155{
1156 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1157
1158 schedule_work(&policy->update);
1159 return 0;
1160}
1161
1162static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1163 void *data)
1164{
1165 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1166
1167 schedule_work(&policy->update);
1168 return 0;
1169}
1170
1171static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1172{
1173 struct kobject *kobj;
1174 struct completion *cmp;
1175
1176 down_write(&policy->rwsem);
1177 cpufreq_stats_free_table(policy);
1178 kobj = &policy->kobj;
1179 cmp = &policy->kobj_unregister;
1180 up_write(&policy->rwsem);
1181 kobject_put(kobj);
1182
1183
1184
1185
1186
1187
1188 pr_debug("waiting for dropping of refcount\n");
1189 wait_for_completion(cmp);
1190 pr_debug("wait complete\n");
1191}
1192
1193static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1194{
1195 struct cpufreq_policy *policy;
1196 struct device *dev = get_cpu_device(cpu);
1197 int ret;
1198
1199 if (!dev)
1200 return NULL;
1201
1202 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1203 if (!policy)
1204 return NULL;
1205
1206 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1207 goto err_free_policy;
1208
1209 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1210 goto err_free_cpumask;
1211
1212 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1213 goto err_free_rcpumask;
1214
1215 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1216 cpufreq_global_kobject, "policy%u", cpu);
1217 if (ret) {
1218 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1219
1220
1221
1222
1223
1224 kobject_put(&policy->kobj);
1225 goto err_free_real_cpus;
1226 }
1227
1228 freq_constraints_init(&policy->constraints);
1229
1230 policy->nb_min.notifier_call = cpufreq_notifier_min;
1231 policy->nb_max.notifier_call = cpufreq_notifier_max;
1232
1233 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1234 &policy->nb_min);
1235 if (ret) {
1236 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1237 ret, cpumask_pr_args(policy->cpus));
1238 goto err_kobj_remove;
1239 }
1240
1241 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1242 &policy->nb_max);
1243 if (ret) {
1244 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1245 ret, cpumask_pr_args(policy->cpus));
1246 goto err_min_qos_notifier;
1247 }
1248
1249 INIT_LIST_HEAD(&policy->policy_list);
1250 init_rwsem(&policy->rwsem);
1251 spin_lock_init(&policy->transition_lock);
1252 init_waitqueue_head(&policy->transition_wait);
1253 init_completion(&policy->kobj_unregister);
1254 INIT_WORK(&policy->update, handle_update);
1255
1256 policy->cpu = cpu;
1257 return policy;
1258
1259err_min_qos_notifier:
1260 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1261 &policy->nb_min);
1262err_kobj_remove:
1263 cpufreq_policy_put_kobj(policy);
1264err_free_real_cpus:
1265 free_cpumask_var(policy->real_cpus);
1266err_free_rcpumask:
1267 free_cpumask_var(policy->related_cpus);
1268err_free_cpumask:
1269 free_cpumask_var(policy->cpus);
1270err_free_policy:
1271 kfree(policy);
1272
1273 return NULL;
1274}
1275
1276static void cpufreq_policy_free(struct cpufreq_policy *policy)
1277{
1278 unsigned long flags;
1279 int cpu;
1280
1281
1282 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283 list_del(&policy->policy_list);
1284
1285 for_each_cpu(cpu, policy->related_cpus)
1286 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1287 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1288
1289 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1290 &policy->nb_max);
1291 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1292 &policy->nb_min);
1293
1294
1295 cancel_work_sync(&policy->update);
1296
1297 if (policy->max_freq_req) {
1298
1299
1300
1301
1302 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1303 CPUFREQ_REMOVE_POLICY, policy);
1304 freq_qos_remove_request(policy->max_freq_req);
1305 }
1306
1307 freq_qos_remove_request(policy->min_freq_req);
1308 kfree(policy->min_freq_req);
1309
1310 cpufreq_policy_put_kobj(policy);
1311 free_cpumask_var(policy->real_cpus);
1312 free_cpumask_var(policy->related_cpus);
1313 free_cpumask_var(policy->cpus);
1314 kfree(policy);
1315}
1316
1317static int cpufreq_online(unsigned int cpu)
1318{
1319 struct cpufreq_policy *policy;
1320 bool new_policy;
1321 unsigned long flags;
1322 unsigned int j;
1323 int ret;
1324
1325 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1326
1327
1328 policy = per_cpu(cpufreq_cpu_data, cpu);
1329 if (policy) {
1330 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1331 if (!policy_is_inactive(policy))
1332 return cpufreq_add_policy_cpu(policy, cpu);
1333
1334
1335 new_policy = false;
1336 down_write(&policy->rwsem);
1337 policy->cpu = cpu;
1338 policy->governor = NULL;
1339 up_write(&policy->rwsem);
1340 } else {
1341 new_policy = true;
1342 policy = cpufreq_policy_alloc(cpu);
1343 if (!policy)
1344 return -ENOMEM;
1345 }
1346
1347 if (!new_policy && cpufreq_driver->online) {
1348 ret = cpufreq_driver->online(policy);
1349 if (ret) {
1350 pr_debug("%s: %d: initialization failed\n", __func__,
1351 __LINE__);
1352 goto out_exit_policy;
1353 }
1354
1355
1356 cpumask_copy(policy->cpus, policy->related_cpus);
1357 } else {
1358 cpumask_copy(policy->cpus, cpumask_of(cpu));
1359
1360
1361
1362
1363
1364 ret = cpufreq_driver->init(policy);
1365 if (ret) {
1366 pr_debug("%s: %d: initialization failed\n", __func__,
1367 __LINE__);
1368 goto out_free_policy;
1369 }
1370
1371
1372
1373
1374
1375
1376 ret = cpufreq_table_validate_and_sort(policy);
1377 if (ret)
1378 goto out_offline_policy;
1379
1380
1381 cpumask_copy(policy->related_cpus, policy->cpus);
1382 }
1383
1384 down_write(&policy->rwsem);
1385
1386
1387
1388
1389 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1390
1391 if (new_policy) {
1392 for_each_cpu(j, policy->related_cpus) {
1393 per_cpu(cpufreq_cpu_data, j) = policy;
1394 add_cpu_dev_symlink(policy, j);
1395 }
1396
1397 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1398 GFP_KERNEL);
1399 if (!policy->min_freq_req) {
1400 ret = -ENOMEM;
1401 goto out_destroy_policy;
1402 }
1403
1404 ret = freq_qos_add_request(&policy->constraints,
1405 policy->min_freq_req, FREQ_QOS_MIN,
1406 policy->min);
1407 if (ret < 0) {
1408
1409
1410
1411
1412 kfree(policy->min_freq_req);
1413 policy->min_freq_req = NULL;
1414 goto out_destroy_policy;
1415 }
1416
1417
1418
1419
1420
1421
1422 policy->max_freq_req = policy->min_freq_req + 1;
1423
1424 ret = freq_qos_add_request(&policy->constraints,
1425 policy->max_freq_req, FREQ_QOS_MAX,
1426 policy->max);
1427 if (ret < 0) {
1428 policy->max_freq_req = NULL;
1429 goto out_destroy_policy;
1430 }
1431
1432 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1433 CPUFREQ_CREATE_POLICY, policy);
1434 }
1435
1436 if (cpufreq_driver->get && has_target()) {
1437 policy->cur = cpufreq_driver->get(policy->cpu);
1438 if (!policy->cur) {
1439 ret = -EIO;
1440 pr_err("%s: ->get() failed\n", __func__);
1441 goto out_destroy_policy;
1442 }
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1464 && has_target()) {
1465 unsigned int old_freq = policy->cur;
1466
1467
1468 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1469 if (ret == -EINVAL) {
1470 ret = __cpufreq_driver_target(policy, old_freq - 1,
1471 CPUFREQ_RELATION_L);
1472
1473
1474
1475
1476
1477
1478 BUG_ON(ret);
1479 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1480 __func__, policy->cpu, old_freq, policy->cur);
1481 }
1482 }
1483
1484 if (new_policy) {
1485 ret = cpufreq_add_dev_interface(policy);
1486 if (ret)
1487 goto out_destroy_policy;
1488
1489 cpufreq_stats_create_table(policy);
1490
1491 write_lock_irqsave(&cpufreq_driver_lock, flags);
1492 list_add(&policy->policy_list, &cpufreq_policy_list);
1493 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 if (cpufreq_driver->register_em)
1506 cpufreq_driver->register_em(policy);
1507 }
1508
1509 ret = cpufreq_init_policy(policy);
1510 if (ret) {
1511 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1512 __func__, cpu, ret);
1513 goto out_destroy_policy;
1514 }
1515
1516 up_write(&policy->rwsem);
1517
1518 kobject_uevent(&policy->kobj, KOBJ_ADD);
1519
1520 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1521 policy->cdev = of_cpufreq_cooling_register(policy);
1522
1523 pr_debug("initialization complete\n");
1524
1525 return 0;
1526
1527out_destroy_policy:
1528 for_each_cpu(j, policy->real_cpus)
1529 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1530
1531 up_write(&policy->rwsem);
1532
1533out_offline_policy:
1534 if (cpufreq_driver->offline)
1535 cpufreq_driver->offline(policy);
1536
1537out_exit_policy:
1538 if (cpufreq_driver->exit)
1539 cpufreq_driver->exit(policy);
1540
1541out_free_policy:
1542 cpufreq_policy_free(policy);
1543 return ret;
1544}
1545
1546
1547
1548
1549
1550
1551static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1552{
1553 struct cpufreq_policy *policy;
1554 unsigned cpu = dev->id;
1555 int ret;
1556
1557 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1558
1559 if (cpu_online(cpu)) {
1560 ret = cpufreq_online(cpu);
1561 if (ret)
1562 return ret;
1563 }
1564
1565
1566 policy = per_cpu(cpufreq_cpu_data, cpu);
1567 if (policy)
1568 add_cpu_dev_symlink(policy, cpu);
1569
1570 return 0;
1571}
1572
1573static int cpufreq_offline(unsigned int cpu)
1574{
1575 struct cpufreq_policy *policy;
1576 int ret;
1577
1578 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1579
1580 policy = cpufreq_cpu_get_raw(cpu);
1581 if (!policy) {
1582 pr_debug("%s: No cpu_data found\n", __func__);
1583 return 0;
1584 }
1585
1586 down_write(&policy->rwsem);
1587 if (has_target())
1588 cpufreq_stop_governor(policy);
1589
1590 cpumask_clear_cpu(cpu, policy->cpus);
1591
1592 if (policy_is_inactive(policy)) {
1593 if (has_target())
1594 strncpy(policy->last_governor, policy->governor->name,
1595 CPUFREQ_NAME_LEN);
1596 else
1597 policy->last_policy = policy->policy;
1598 } else if (cpu == policy->cpu) {
1599
1600 policy->cpu = cpumask_any(policy->cpus);
1601 }
1602
1603
1604 if (!policy_is_inactive(policy)) {
1605 if (has_target()) {
1606 ret = cpufreq_start_governor(policy);
1607 if (ret)
1608 pr_err("%s: Failed to start governor\n", __func__);
1609 }
1610
1611 goto unlock;
1612 }
1613
1614 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1615 cpufreq_cooling_unregister(policy->cdev);
1616 policy->cdev = NULL;
1617 }
1618
1619 if (has_target())
1620 cpufreq_exit_governor(policy);
1621
1622
1623
1624
1625
1626 if (cpufreq_driver->offline) {
1627 cpufreq_driver->offline(policy);
1628 } else if (cpufreq_driver->exit) {
1629 cpufreq_driver->exit(policy);
1630 policy->freq_table = NULL;
1631 }
1632
1633unlock:
1634 up_write(&policy->rwsem);
1635 return 0;
1636}
1637
1638
1639
1640
1641
1642
1643static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1644{
1645 unsigned int cpu = dev->id;
1646 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1647
1648 if (!policy)
1649 return;
1650
1651 if (cpu_online(cpu))
1652 cpufreq_offline(cpu);
1653
1654 cpumask_clear_cpu(cpu, policy->real_cpus);
1655 remove_cpu_dev_symlink(policy, dev);
1656
1657 if (cpumask_empty(policy->real_cpus)) {
1658
1659 if (cpufreq_driver->offline)
1660 cpufreq_driver->exit(policy);
1661
1662 cpufreq_policy_free(policy);
1663 }
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1675 unsigned int new_freq)
1676{
1677 struct cpufreq_freqs freqs;
1678
1679 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1680 policy->cur, new_freq);
1681
1682 freqs.old = policy->cur;
1683 freqs.new = new_freq;
1684
1685 cpufreq_freq_transition_begin(policy, &freqs);
1686 cpufreq_freq_transition_end(policy, &freqs, 0);
1687}
1688
1689static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1690{
1691 unsigned int new_freq;
1692
1693 new_freq = cpufreq_driver->get(policy->cpu);
1694 if (!new_freq)
1695 return 0;
1696
1697
1698
1699
1700
1701 if (policy->fast_switch_enabled || !has_target())
1702 return new_freq;
1703
1704 if (policy->cur != new_freq) {
1705 cpufreq_out_of_sync(policy, new_freq);
1706 if (update)
1707 schedule_work(&policy->update);
1708 }
1709
1710 return new_freq;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720unsigned int cpufreq_quick_get(unsigned int cpu)
1721{
1722 struct cpufreq_policy *policy;
1723 unsigned int ret_freq = 0;
1724 unsigned long flags;
1725
1726 read_lock_irqsave(&cpufreq_driver_lock, flags);
1727
1728 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1729 ret_freq = cpufreq_driver->get(cpu);
1730 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1731 return ret_freq;
1732 }
1733
1734 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1735
1736 policy = cpufreq_cpu_get(cpu);
1737 if (policy) {
1738 ret_freq = policy->cur;
1739 cpufreq_cpu_put(policy);
1740 }
1741
1742 return ret_freq;
1743}
1744EXPORT_SYMBOL(cpufreq_quick_get);
1745
1746
1747
1748
1749
1750
1751
1752unsigned int cpufreq_quick_get_max(unsigned int cpu)
1753{
1754 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1755 unsigned int ret_freq = 0;
1756
1757 if (policy) {
1758 ret_freq = policy->max;
1759 cpufreq_cpu_put(policy);
1760 }
1761
1762 return ret_freq;
1763}
1764EXPORT_SYMBOL(cpufreq_quick_get_max);
1765
1766
1767
1768
1769
1770
1771
1772__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1773{
1774 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1775 unsigned int ret_freq = 0;
1776
1777 if (policy) {
1778 ret_freq = policy->cpuinfo.max_freq;
1779 cpufreq_cpu_put(policy);
1780 }
1781
1782 return ret_freq;
1783}
1784EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1785
1786static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1787{
1788 if (unlikely(policy_is_inactive(policy)))
1789 return 0;
1790
1791 return cpufreq_verify_current_freq(policy, true);
1792}
1793
1794
1795
1796
1797
1798
1799
1800unsigned int cpufreq_get(unsigned int cpu)
1801{
1802 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1803 unsigned int ret_freq = 0;
1804
1805 if (policy) {
1806 down_read(&policy->rwsem);
1807 if (cpufreq_driver->get)
1808 ret_freq = __cpufreq_get(policy);
1809 up_read(&policy->rwsem);
1810
1811 cpufreq_cpu_put(policy);
1812 }
1813
1814 return ret_freq;
1815}
1816EXPORT_SYMBOL(cpufreq_get);
1817
1818static struct subsys_interface cpufreq_interface = {
1819 .name = "cpufreq",
1820 .subsys = &cpu_subsys,
1821 .add_dev = cpufreq_add_dev,
1822 .remove_dev = cpufreq_remove_dev,
1823};
1824
1825
1826
1827
1828
1829int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1830{
1831 int ret;
1832
1833 if (!policy->suspend_freq) {
1834 pr_debug("%s: suspend_freq not defined\n", __func__);
1835 return 0;
1836 }
1837
1838 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1839 policy->suspend_freq);
1840
1841 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1842 CPUFREQ_RELATION_H);
1843 if (ret)
1844 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1845 __func__, policy->suspend_freq, ret);
1846
1847 return ret;
1848}
1849EXPORT_SYMBOL(cpufreq_generic_suspend);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859void cpufreq_suspend(void)
1860{
1861 struct cpufreq_policy *policy;
1862
1863 if (!cpufreq_driver)
1864 return;
1865
1866 if (!has_target() && !cpufreq_driver->suspend)
1867 goto suspend;
1868
1869 pr_debug("%s: Suspending Governors\n", __func__);
1870
1871 for_each_active_policy(policy) {
1872 if (has_target()) {
1873 down_write(&policy->rwsem);
1874 cpufreq_stop_governor(policy);
1875 up_write(&policy->rwsem);
1876 }
1877
1878 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1879 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1880 cpufreq_driver->name);
1881 }
1882
1883suspend:
1884 cpufreq_suspended = true;
1885}
1886
1887
1888
1889
1890
1891
1892
1893void cpufreq_resume(void)
1894{
1895 struct cpufreq_policy *policy;
1896 int ret;
1897
1898 if (!cpufreq_driver)
1899 return;
1900
1901 if (unlikely(!cpufreq_suspended))
1902 return;
1903
1904 cpufreq_suspended = false;
1905
1906 if (!has_target() && !cpufreq_driver->resume)
1907 return;
1908
1909 pr_debug("%s: Resuming Governors\n", __func__);
1910
1911 for_each_active_policy(policy) {
1912 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1913 pr_err("%s: Failed to resume driver: %p\n", __func__,
1914 policy);
1915 } else if (has_target()) {
1916 down_write(&policy->rwsem);
1917 ret = cpufreq_start_governor(policy);
1918 up_write(&policy->rwsem);
1919
1920 if (ret)
1921 pr_err("%s: Failed to start governor for policy: %p\n",
1922 __func__, policy);
1923 }
1924 }
1925}
1926
1927
1928
1929
1930
1931
1932
1933
1934bool cpufreq_driver_test_flags(u16 flags)
1935{
1936 return !!(cpufreq_driver->flags & flags);
1937}
1938
1939
1940
1941
1942
1943
1944
1945const char *cpufreq_get_current_driver(void)
1946{
1947 if (cpufreq_driver)
1948 return cpufreq_driver->name;
1949
1950 return NULL;
1951}
1952EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1953
1954
1955
1956
1957
1958
1959
1960void *cpufreq_get_driver_data(void)
1961{
1962 if (cpufreq_driver)
1963 return cpufreq_driver->driver_data;
1964
1965 return NULL;
1966}
1967EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1986{
1987 int ret;
1988
1989 if (cpufreq_disabled())
1990 return -EINVAL;
1991
1992 switch (list) {
1993 case CPUFREQ_TRANSITION_NOTIFIER:
1994 mutex_lock(&cpufreq_fast_switch_lock);
1995
1996 if (cpufreq_fast_switch_count > 0) {
1997 mutex_unlock(&cpufreq_fast_switch_lock);
1998 return -EBUSY;
1999 }
2000 ret = srcu_notifier_chain_register(
2001 &cpufreq_transition_notifier_list, nb);
2002 if (!ret)
2003 cpufreq_fast_switch_count--;
2004
2005 mutex_unlock(&cpufreq_fast_switch_lock);
2006 break;
2007 case CPUFREQ_POLICY_NOTIFIER:
2008 ret = blocking_notifier_chain_register(
2009 &cpufreq_policy_notifier_list, nb);
2010 break;
2011 default:
2012 ret = -EINVAL;
2013 }
2014
2015 return ret;
2016}
2017EXPORT_SYMBOL(cpufreq_register_notifier);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2030{
2031 int ret;
2032
2033 if (cpufreq_disabled())
2034 return -EINVAL;
2035
2036 switch (list) {
2037 case CPUFREQ_TRANSITION_NOTIFIER:
2038 mutex_lock(&cpufreq_fast_switch_lock);
2039
2040 ret = srcu_notifier_chain_unregister(
2041 &cpufreq_transition_notifier_list, nb);
2042 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2043 cpufreq_fast_switch_count++;
2044
2045 mutex_unlock(&cpufreq_fast_switch_lock);
2046 break;
2047 case CPUFREQ_POLICY_NOTIFIER:
2048 ret = blocking_notifier_chain_unregister(
2049 &cpufreq_policy_notifier_list, nb);
2050 break;
2051 default:
2052 ret = -EINVAL;
2053 }
2054
2055 return ret;
2056}
2057EXPORT_SYMBOL(cpufreq_unregister_notifier);
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2088 unsigned int target_freq)
2089{
2090 unsigned int freq;
2091 int cpu;
2092
2093 target_freq = clamp_val(target_freq, policy->min, policy->max);
2094 freq = cpufreq_driver->fast_switch(policy, target_freq);
2095
2096 if (!freq)
2097 return 0;
2098
2099 policy->cur = freq;
2100 arch_set_freq_scale(policy->related_cpus, freq,
2101 policy->cpuinfo.max_freq);
2102 cpufreq_stats_record_transition(policy, freq);
2103
2104 if (trace_cpu_frequency_enabled()) {
2105 for_each_cpu(cpu, policy->cpus)
2106 trace_cpu_frequency(freq, cpu);
2107 }
2108
2109 return freq;
2110}
2111EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134void cpufreq_driver_adjust_perf(unsigned int cpu,
2135 unsigned long min_perf,
2136 unsigned long target_perf,
2137 unsigned long capacity)
2138{
2139 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2140}
2141
2142
2143
2144
2145
2146
2147
2148bool cpufreq_driver_has_adjust_perf(void)
2149{
2150 return !!cpufreq_driver->adjust_perf;
2151}
2152
2153
2154static int __target_intermediate(struct cpufreq_policy *policy,
2155 struct cpufreq_freqs *freqs, int index)
2156{
2157 int ret;
2158
2159 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2160
2161
2162 if (!freqs->new)
2163 return 0;
2164
2165 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2166 __func__, policy->cpu, freqs->old, freqs->new);
2167
2168 cpufreq_freq_transition_begin(policy, freqs);
2169 ret = cpufreq_driver->target_intermediate(policy, index);
2170 cpufreq_freq_transition_end(policy, freqs, ret);
2171
2172 if (ret)
2173 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2174 __func__, ret);
2175
2176 return ret;
2177}
2178
2179static int __target_index(struct cpufreq_policy *policy, int index)
2180{
2181 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2182 unsigned int restore_freq, intermediate_freq = 0;
2183 unsigned int newfreq = policy->freq_table[index].frequency;
2184 int retval = -EINVAL;
2185 bool notify;
2186
2187 if (newfreq == policy->cur)
2188 return 0;
2189
2190
2191 restore_freq = policy->cur;
2192
2193 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2194 if (notify) {
2195
2196 if (cpufreq_driver->get_intermediate) {
2197 retval = __target_intermediate(policy, &freqs, index);
2198 if (retval)
2199 return retval;
2200
2201 intermediate_freq = freqs.new;
2202
2203 if (intermediate_freq)
2204 freqs.old = freqs.new;
2205 }
2206
2207 freqs.new = newfreq;
2208 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2209 __func__, policy->cpu, freqs.old, freqs.new);
2210
2211 cpufreq_freq_transition_begin(policy, &freqs);
2212 }
2213
2214 retval = cpufreq_driver->target_index(policy, index);
2215 if (retval)
2216 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2217 retval);
2218
2219 if (notify) {
2220 cpufreq_freq_transition_end(policy, &freqs, retval);
2221
2222
2223
2224
2225
2226
2227
2228 if (unlikely(retval && intermediate_freq)) {
2229 freqs.old = intermediate_freq;
2230 freqs.new = restore_freq;
2231 cpufreq_freq_transition_begin(policy, &freqs);
2232 cpufreq_freq_transition_end(policy, &freqs, 0);
2233 }
2234 }
2235
2236 return retval;
2237}
2238
2239int __cpufreq_driver_target(struct cpufreq_policy *policy,
2240 unsigned int target_freq,
2241 unsigned int relation)
2242{
2243 unsigned int old_target_freq = target_freq;
2244
2245 if (cpufreq_disabled())
2246 return -ENODEV;
2247
2248 target_freq = __resolve_freq(policy, target_freq, relation);
2249
2250 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2251 policy->cpu, target_freq, relation, old_target_freq);
2252
2253
2254
2255
2256
2257
2258
2259 if (target_freq == policy->cur &&
2260 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2261 return 0;
2262
2263 if (cpufreq_driver->target)
2264 return cpufreq_driver->target(policy, target_freq, relation);
2265
2266 if (!cpufreq_driver->target_index)
2267 return -EINVAL;
2268
2269 return __target_index(policy, policy->cached_resolved_idx);
2270}
2271EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2272
2273int cpufreq_driver_target(struct cpufreq_policy *policy,
2274 unsigned int target_freq,
2275 unsigned int relation)
2276{
2277 int ret;
2278
2279 down_write(&policy->rwsem);
2280
2281 ret = __cpufreq_driver_target(policy, target_freq, relation);
2282
2283 up_write(&policy->rwsem);
2284
2285 return ret;
2286}
2287EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2288
2289__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2290{
2291 return NULL;
2292}
2293
2294static int cpufreq_init_governor(struct cpufreq_policy *policy)
2295{
2296 int ret;
2297
2298
2299 if (cpufreq_suspended)
2300 return 0;
2301
2302
2303
2304
2305 if (!policy->governor)
2306 return -EINVAL;
2307
2308
2309 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2310 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2311 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2312
2313 if (gov) {
2314 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2315 policy->governor->name, gov->name);
2316 policy->governor = gov;
2317 } else {
2318 return -EINVAL;
2319 }
2320 }
2321
2322 if (!try_module_get(policy->governor->owner))
2323 return -EINVAL;
2324
2325 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2326
2327 if (policy->governor->init) {
2328 ret = policy->governor->init(policy);
2329 if (ret) {
2330 module_put(policy->governor->owner);
2331 return ret;
2332 }
2333 }
2334
2335 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2336
2337 return 0;
2338}
2339
2340static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2341{
2342 if (cpufreq_suspended || !policy->governor)
2343 return;
2344
2345 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2346
2347 if (policy->governor->exit)
2348 policy->governor->exit(policy);
2349
2350 module_put(policy->governor->owner);
2351}
2352
2353int cpufreq_start_governor(struct cpufreq_policy *policy)
2354{
2355 int ret;
2356
2357 if (cpufreq_suspended)
2358 return 0;
2359
2360 if (!policy->governor)
2361 return -EINVAL;
2362
2363 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2364
2365 if (cpufreq_driver->get)
2366 cpufreq_verify_current_freq(policy, false);
2367
2368 if (policy->governor->start) {
2369 ret = policy->governor->start(policy);
2370 if (ret)
2371 return ret;
2372 }
2373
2374 if (policy->governor->limits)
2375 policy->governor->limits(policy);
2376
2377 return 0;
2378}
2379
2380void cpufreq_stop_governor(struct cpufreq_policy *policy)
2381{
2382 if (cpufreq_suspended || !policy->governor)
2383 return;
2384
2385 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2386
2387 if (policy->governor->stop)
2388 policy->governor->stop(policy);
2389}
2390
2391static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2392{
2393 if (cpufreq_suspended || !policy->governor)
2394 return;
2395
2396 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2397
2398 if (policy->governor->limits)
2399 policy->governor->limits(policy);
2400}
2401
2402int cpufreq_register_governor(struct cpufreq_governor *governor)
2403{
2404 int err;
2405
2406 if (!governor)
2407 return -EINVAL;
2408
2409 if (cpufreq_disabled())
2410 return -ENODEV;
2411
2412 mutex_lock(&cpufreq_governor_mutex);
2413
2414 err = -EBUSY;
2415 if (!find_governor(governor->name)) {
2416 err = 0;
2417 list_add(&governor->governor_list, &cpufreq_governor_list);
2418 }
2419
2420 mutex_unlock(&cpufreq_governor_mutex);
2421 return err;
2422}
2423EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2424
2425void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2426{
2427 struct cpufreq_policy *policy;
2428 unsigned long flags;
2429
2430 if (!governor)
2431 return;
2432
2433 if (cpufreq_disabled())
2434 return;
2435
2436
2437 read_lock_irqsave(&cpufreq_driver_lock, flags);
2438 for_each_inactive_policy(policy) {
2439 if (!strcmp(policy->last_governor, governor->name)) {
2440 policy->governor = NULL;
2441 strcpy(policy->last_governor, "\0");
2442 }
2443 }
2444 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2445
2446 mutex_lock(&cpufreq_governor_mutex);
2447 list_del(&governor->governor_list);
2448 mutex_unlock(&cpufreq_governor_mutex);
2449}
2450EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2466{
2467 struct cpufreq_policy *cpu_policy;
2468 if (!policy)
2469 return -EINVAL;
2470
2471 cpu_policy = cpufreq_cpu_get(cpu);
2472 if (!cpu_policy)
2473 return -EINVAL;
2474
2475 memcpy(policy, cpu_policy, sizeof(*policy));
2476
2477 cpufreq_cpu_put(cpu_policy);
2478 return 0;
2479}
2480EXPORT_SYMBOL(cpufreq_get_policy);
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static int cpufreq_set_policy(struct cpufreq_policy *policy,
2498 struct cpufreq_governor *new_gov,
2499 unsigned int new_pol)
2500{
2501 struct cpufreq_policy_data new_data;
2502 struct cpufreq_governor *old_gov;
2503 int ret;
2504
2505 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2506 new_data.freq_table = policy->freq_table;
2507 new_data.cpu = policy->cpu;
2508
2509
2510
2511
2512 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2513 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2514
2515 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2516 new_data.cpu, new_data.min, new_data.max);
2517
2518
2519
2520
2521
2522 ret = cpufreq_driver->verify(&new_data);
2523 if (ret)
2524 return ret;
2525
2526 policy->min = new_data.min;
2527 policy->max = new_data.max;
2528 trace_cpu_frequency_limits(policy);
2529
2530 policy->cached_target_freq = UINT_MAX;
2531
2532 pr_debug("new min and max freqs are %u - %u kHz\n",
2533 policy->min, policy->max);
2534
2535 if (cpufreq_driver->setpolicy) {
2536 policy->policy = new_pol;
2537 pr_debug("setting range\n");
2538 return cpufreq_driver->setpolicy(policy);
2539 }
2540
2541 if (new_gov == policy->governor) {
2542 pr_debug("governor limits update\n");
2543 cpufreq_governor_limits(policy);
2544 return 0;
2545 }
2546
2547 pr_debug("governor switch\n");
2548
2549
2550 old_gov = policy->governor;
2551
2552 if (old_gov) {
2553 cpufreq_stop_governor(policy);
2554 cpufreq_exit_governor(policy);
2555 }
2556
2557
2558 policy->governor = new_gov;
2559 ret = cpufreq_init_governor(policy);
2560 if (!ret) {
2561 ret = cpufreq_start_governor(policy);
2562 if (!ret) {
2563 pr_debug("governor change\n");
2564 sched_cpufreq_governor_change(policy, old_gov);
2565 return 0;
2566 }
2567 cpufreq_exit_governor(policy);
2568 }
2569
2570
2571 pr_debug("starting governor %s failed\n", policy->governor->name);
2572 if (old_gov) {
2573 policy->governor = old_gov;
2574 if (cpufreq_init_governor(policy))
2575 policy->governor = NULL;
2576 else
2577 cpufreq_start_governor(policy);
2578 }
2579
2580 return ret;
2581}
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592void cpufreq_update_policy(unsigned int cpu)
2593{
2594 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2595
2596 if (!policy)
2597 return;
2598
2599
2600
2601
2602
2603 if (cpufreq_driver->get && has_target() &&
2604 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2605 goto unlock;
2606
2607 refresh_frequency_limits(policy);
2608
2609unlock:
2610 cpufreq_cpu_release(policy);
2611}
2612EXPORT_SYMBOL(cpufreq_update_policy);
2613
2614
2615
2616
2617
2618
2619
2620
2621void cpufreq_update_limits(unsigned int cpu)
2622{
2623 if (cpufreq_driver->update_limits)
2624 cpufreq_driver->update_limits(cpu);
2625 else
2626 cpufreq_update_policy(cpu);
2627}
2628EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2629
2630
2631
2632
2633static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2634{
2635 int ret;
2636
2637 if (!policy->freq_table)
2638 return -ENXIO;
2639
2640 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2641 if (ret) {
2642 pr_err("%s: Policy frequency update failed\n", __func__);
2643 return ret;
2644 }
2645
2646 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2647 if (ret < 0)
2648 return ret;
2649
2650 return 0;
2651}
2652
2653int cpufreq_boost_trigger_state(int state)
2654{
2655 struct cpufreq_policy *policy;
2656 unsigned long flags;
2657 int ret = 0;
2658
2659 if (cpufreq_driver->boost_enabled == state)
2660 return 0;
2661
2662 write_lock_irqsave(&cpufreq_driver_lock, flags);
2663 cpufreq_driver->boost_enabled = state;
2664 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2665
2666 cpus_read_lock();
2667 for_each_active_policy(policy) {
2668 ret = cpufreq_driver->set_boost(policy, state);
2669 if (ret)
2670 goto err_reset_state;
2671 }
2672 cpus_read_unlock();
2673
2674 return 0;
2675
2676err_reset_state:
2677 cpus_read_unlock();
2678
2679 write_lock_irqsave(&cpufreq_driver_lock, flags);
2680 cpufreq_driver->boost_enabled = !state;
2681 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2682
2683 pr_err("%s: Cannot %s BOOST\n",
2684 __func__, state ? "enable" : "disable");
2685
2686 return ret;
2687}
2688
2689static bool cpufreq_boost_supported(void)
2690{
2691 return cpufreq_driver->set_boost;
2692}
2693
2694static int create_boost_sysfs_file(void)
2695{
2696 int ret;
2697
2698 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2699 if (ret)
2700 pr_err("%s: cannot register global BOOST sysfs file\n",
2701 __func__);
2702
2703 return ret;
2704}
2705
2706static void remove_boost_sysfs_file(void)
2707{
2708 if (cpufreq_boost_supported())
2709 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2710}
2711
2712int cpufreq_enable_boost_support(void)
2713{
2714 if (!cpufreq_driver)
2715 return -EINVAL;
2716
2717 if (cpufreq_boost_supported())
2718 return 0;
2719
2720 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2721
2722
2723 return create_boost_sysfs_file();
2724}
2725EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2726
2727int cpufreq_boost_enabled(void)
2728{
2729 return cpufreq_driver->boost_enabled;
2730}
2731EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2732
2733
2734
2735
2736static enum cpuhp_state hp_online;
2737
2738static int cpuhp_cpufreq_online(unsigned int cpu)
2739{
2740 cpufreq_online(cpu);
2741
2742 return 0;
2743}
2744
2745static int cpuhp_cpufreq_offline(unsigned int cpu)
2746{
2747 cpufreq_offline(cpu);
2748
2749 return 0;
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2763{
2764 unsigned long flags;
2765 int ret;
2766
2767 if (cpufreq_disabled())
2768 return -ENODEV;
2769
2770
2771
2772
2773
2774 if (!get_cpu_device(0))
2775 return -EPROBE_DEFER;
2776
2777 if (!driver_data || !driver_data->verify || !driver_data->init ||
2778 !(driver_data->setpolicy || driver_data->target_index ||
2779 driver_data->target) ||
2780 (driver_data->setpolicy && (driver_data->target_index ||
2781 driver_data->target)) ||
2782 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2783 (!driver_data->online != !driver_data->offline))
2784 return -EINVAL;
2785
2786 pr_debug("trying to register driver %s\n", driver_data->name);
2787
2788
2789 cpus_read_lock();
2790
2791 write_lock_irqsave(&cpufreq_driver_lock, flags);
2792 if (cpufreq_driver) {
2793 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2794 ret = -EEXIST;
2795 goto out;
2796 }
2797 cpufreq_driver = driver_data;
2798 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2799
2800
2801
2802
2803
2804 if (!cpufreq_driver->setpolicy) {
2805 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2806 pr_debug("supports frequency invariance");
2807 }
2808
2809 if (driver_data->setpolicy)
2810 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2811
2812 if (cpufreq_boost_supported()) {
2813 ret = create_boost_sysfs_file();
2814 if (ret)
2815 goto err_null_driver;
2816 }
2817
2818 ret = subsys_interface_register(&cpufreq_interface);
2819 if (ret)
2820 goto err_boost_unreg;
2821
2822 if (unlikely(list_empty(&cpufreq_policy_list))) {
2823
2824 ret = -ENODEV;
2825 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2826 driver_data->name);
2827 goto err_if_unreg;
2828 }
2829
2830 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2831 "cpufreq:online",
2832 cpuhp_cpufreq_online,
2833 cpuhp_cpufreq_offline);
2834 if (ret < 0)
2835 goto err_if_unreg;
2836 hp_online = ret;
2837 ret = 0;
2838
2839 pr_debug("driver %s up and running\n", driver_data->name);
2840 goto out;
2841
2842err_if_unreg:
2843 subsys_interface_unregister(&cpufreq_interface);
2844err_boost_unreg:
2845 remove_boost_sysfs_file();
2846err_null_driver:
2847 write_lock_irqsave(&cpufreq_driver_lock, flags);
2848 cpufreq_driver = NULL;
2849 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2850out:
2851 cpus_read_unlock();
2852 return ret;
2853}
2854EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2865{
2866 unsigned long flags;
2867
2868 if (!cpufreq_driver || (driver != cpufreq_driver))
2869 return -EINVAL;
2870
2871 pr_debug("unregistering driver %s\n", driver->name);
2872
2873
2874 cpus_read_lock();
2875 subsys_interface_unregister(&cpufreq_interface);
2876 remove_boost_sysfs_file();
2877 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2878 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2879
2880 write_lock_irqsave(&cpufreq_driver_lock, flags);
2881
2882 cpufreq_driver = NULL;
2883
2884 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2885 cpus_read_unlock();
2886
2887 return 0;
2888}
2889EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2890
2891static int __init cpufreq_core_init(void)
2892{
2893 struct cpufreq_governor *gov = cpufreq_default_governor();
2894
2895 if (cpufreq_disabled())
2896 return -ENODEV;
2897
2898 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2899 BUG_ON(!cpufreq_global_kobject);
2900
2901 if (!strlen(default_governor))
2902 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2903
2904 return 0;
2905}
2906module_param(off, int, 0444);
2907module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2908core_initcall(cpufreq_core_init);
2909