1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/cpu.h>
18#include <linux/cpufreq.h>
19#include <linux/cpu_cooling.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/pm_qos.h>
27#include <linux/slab.h>
28#include <linux/suspend.h>
29#include <linux/syscore_ops.h>
30#include <linux/tick.h>
31#include <trace/events/power.h>
32
33static LIST_HEAD(cpufreq_policy_list);
34
35
36#define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
39
40#define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42#define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
44
45#define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
47
48
49static LIST_HEAD(cpufreq_governor_list);
50#define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
52
53static char default_governor[CPUFREQ_NAME_LEN];
54
55
56
57
58
59
60static struct cpufreq_driver *cpufreq_driver;
61static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
62static DEFINE_RWLOCK(cpufreq_driver_lock);
63
64static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
65bool cpufreq_supports_freq_invariance(void)
66{
67 return static_branch_likely(&cpufreq_freq_invariance);
68}
69
70
71static bool cpufreq_suspended;
72
73static inline bool has_target(void)
74{
75 return cpufreq_driver->target_index || cpufreq_driver->target;
76}
77
78
79static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
80static int cpufreq_init_governor(struct cpufreq_policy *policy);
81static void cpufreq_exit_governor(struct cpufreq_policy *policy);
82static void cpufreq_governor_limits(struct cpufreq_policy *policy);
83static int cpufreq_set_policy(struct cpufreq_policy *policy,
84 struct cpufreq_governor *new_gov,
85 unsigned int new_pol);
86
87
88
89
90
91
92
93
94static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
95SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
96
97static int off __read_mostly;
98static int cpufreq_disabled(void)
99{
100 return off;
101}
102void disable_cpufreq(void)
103{
104 off = 1;
105}
106static DEFINE_MUTEX(cpufreq_governor_mutex);
107
108bool have_governor_per_policy(void)
109{
110 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
111}
112EXPORT_SYMBOL_GPL(have_governor_per_policy);
113
114static struct kobject *cpufreq_global_kobject;
115
116struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
117{
118 if (have_governor_per_policy())
119 return &policy->kobj;
120 else
121 return cpufreq_global_kobject;
122}
123EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
124
125static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
126{
127 struct kernel_cpustat kcpustat;
128 u64 cur_wall_time;
129 u64 idle_time;
130 u64 busy_time;
131
132 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
133
134 kcpustat_cpu_fetch(&kcpustat, cpu);
135
136 busy_time = kcpustat.cpustat[CPUTIME_USER];
137 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
138 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
139 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
140 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
141 busy_time += kcpustat.cpustat[CPUTIME_NICE];
142
143 idle_time = cur_wall_time - busy_time;
144 if (wall)
145 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
146
147 return div_u64(idle_time, NSEC_PER_USEC);
148}
149
150u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
151{
152 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
153
154 if (idle_time == -1ULL)
155 return get_cpu_idle_time_jiffy(cpu, wall);
156 else if (!io_busy)
157 idle_time += get_cpu_iowait_time_us(cpu, wall);
158
159 return idle_time;
160}
161EXPORT_SYMBOL_GPL(get_cpu_idle_time);
162
163
164
165
166
167
168
169
170void cpufreq_generic_init(struct cpufreq_policy *policy,
171 struct cpufreq_frequency_table *table,
172 unsigned int transition_latency)
173{
174 policy->freq_table = table;
175 policy->cpuinfo.transition_latency = transition_latency;
176
177
178
179
180
181 cpumask_setall(policy->cpus);
182}
183EXPORT_SYMBOL_GPL(cpufreq_generic_init);
184
185struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
186{
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
188
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
190}
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
192
193unsigned int cpufreq_generic_get(unsigned int cpu)
194{
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
196
197 if (!policy || IS_ERR(policy->clk)) {
198 pr_err("%s: No %s associated to cpu: %d\n",
199 __func__, policy ? "clk" : "policy", cpu);
200 return 0;
201 }
202
203 return clk_get_rate(policy->clk) / 1000;
204}
205EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206
207
208
209
210
211
212
213
214
215
216
217
218struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
219{
220 struct cpufreq_policy *policy = NULL;
221 unsigned long flags;
222
223 if (WARN_ON(cpu >= nr_cpu_ids))
224 return NULL;
225
226
227 read_lock_irqsave(&cpufreq_driver_lock, flags);
228
229 if (cpufreq_driver) {
230
231 policy = cpufreq_cpu_get_raw(cpu);
232 if (policy)
233 kobject_get(&policy->kobj);
234 }
235
236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
237
238 return policy;
239}
240EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242
243
244
245
246void cpufreq_cpu_put(struct cpufreq_policy *policy)
247{
248 kobject_put(&policy->kobj);
249}
250EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251
252
253
254
255
256void cpufreq_cpu_release(struct cpufreq_policy *policy)
257{
258 if (WARN_ON(!policy))
259 return;
260
261 lockdep_assert_held(&policy->rwsem);
262
263 up_write(&policy->rwsem);
264
265 cpufreq_cpu_put(policy);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
281{
282 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
283
284 if (!policy)
285 return NULL;
286
287 down_write(&policy->rwsem);
288
289 if (policy_is_inactive(policy)) {
290 cpufreq_cpu_release(policy);
291 return NULL;
292 }
293
294 return policy;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
312{
313#ifndef CONFIG_SMP
314 static unsigned long l_p_j_ref;
315 static unsigned int l_p_j_ref_freq;
316
317 if (ci->flags & CPUFREQ_CONST_LOOPS)
318 return;
319
320 if (!l_p_j_ref_freq) {
321 l_p_j_ref = loops_per_jiffy;
322 l_p_j_ref_freq = ci->old;
323 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
324 l_p_j_ref, l_p_j_ref_freq);
325 }
326 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
327 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
328 ci->new);
329 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
330 loops_per_jiffy, ci->new);
331 }
332#endif
333}
334
335
336
337
338
339
340
341
342
343
344
345static void cpufreq_notify_transition(struct cpufreq_policy *policy,
346 struct cpufreq_freqs *freqs,
347 unsigned int state)
348{
349 int cpu;
350
351 BUG_ON(irqs_disabled());
352
353 if (cpufreq_disabled())
354 return;
355
356 freqs->policy = policy;
357 freqs->flags = cpufreq_driver->flags;
358 pr_debug("notification %u of frequency transition to %u kHz\n",
359 state, freqs->new);
360
361 switch (state) {
362 case CPUFREQ_PRECHANGE:
363
364
365
366
367
368 if (policy->cur && policy->cur != freqs->old) {
369 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
370 freqs->old, policy->cur);
371 freqs->old = policy->cur;
372 }
373
374 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
375 CPUFREQ_PRECHANGE, freqs);
376
377 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
378 break;
379
380 case CPUFREQ_POSTCHANGE:
381 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
382 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
383 cpumask_pr_args(policy->cpus));
384
385 for_each_cpu(cpu, policy->cpus)
386 trace_cpu_frequency(freqs->new, cpu);
387
388 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
389 CPUFREQ_POSTCHANGE, freqs);
390
391 cpufreq_stats_record_transition(policy, freqs->new);
392 policy->cur = freqs->new;
393 }
394}
395
396
397static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
398 struct cpufreq_freqs *freqs, int transition_failed)
399{
400 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
401 if (!transition_failed)
402 return;
403
404 swap(freqs->old, freqs->new);
405 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
406 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
407}
408
409void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
410 struct cpufreq_freqs *freqs)
411{
412
413
414
415
416
417
418
419
420
421 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
422 && current == policy->transition_task);
423
424wait:
425 wait_event(policy->transition_wait, !policy->transition_ongoing);
426
427 spin_lock(&policy->transition_lock);
428
429 if (unlikely(policy->transition_ongoing)) {
430 spin_unlock(&policy->transition_lock);
431 goto wait;
432 }
433
434 policy->transition_ongoing = true;
435 policy->transition_task = current;
436
437 spin_unlock(&policy->transition_lock);
438
439 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
440}
441EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
442
443void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
444 struct cpufreq_freqs *freqs, int transition_failed)
445{
446 if (WARN_ON(!policy->transition_ongoing))
447 return;
448
449 cpufreq_notify_post_transition(policy, freqs, transition_failed);
450
451 arch_set_freq_scale(policy->related_cpus,
452 policy->cur,
453 policy->cpuinfo.max_freq);
454
455 policy->transition_ongoing = false;
456 policy->transition_task = NULL;
457
458 wake_up(&policy->transition_wait);
459}
460EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
461
462
463
464
465
466static int cpufreq_fast_switch_count;
467static DEFINE_MUTEX(cpufreq_fast_switch_lock);
468
469static void cpufreq_list_transition_notifiers(void)
470{
471 struct notifier_block *nb;
472
473 pr_info("Registered transition notifiers:\n");
474
475 mutex_lock(&cpufreq_transition_notifier_list.mutex);
476
477 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
478 pr_info("%pS\n", nb->notifier_call);
479
480 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
481}
482
483
484
485
486
487
488
489
490
491
492
493
494void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
495{
496 lockdep_assert_held(&policy->rwsem);
497
498 if (!policy->fast_switch_possible)
499 return;
500
501 mutex_lock(&cpufreq_fast_switch_lock);
502 if (cpufreq_fast_switch_count >= 0) {
503 cpufreq_fast_switch_count++;
504 policy->fast_switch_enabled = true;
505 } else {
506 pr_warn("CPU%u: Fast frequency switching not enabled\n",
507 policy->cpu);
508 cpufreq_list_transition_notifiers();
509 }
510 mutex_unlock(&cpufreq_fast_switch_lock);
511}
512EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
513
514
515
516
517
518void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
519{
520 mutex_lock(&cpufreq_fast_switch_lock);
521 if (policy->fast_switch_enabled) {
522 policy->fast_switch_enabled = false;
523 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
524 cpufreq_fast_switch_count--;
525 }
526 mutex_unlock(&cpufreq_fast_switch_lock);
527}
528EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
529
530
531
532
533
534
535
536
537
538
539
540
541unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
542 unsigned int target_freq)
543{
544 target_freq = clamp_val(target_freq, policy->min, policy->max);
545 policy->cached_target_freq = target_freq;
546
547 if (cpufreq_driver->target_index) {
548 unsigned int idx;
549
550 idx = cpufreq_frequency_table_target(policy, target_freq,
551 CPUFREQ_RELATION_L);
552 policy->cached_resolved_idx = idx;
553 return policy->freq_table[idx].frequency;
554 }
555
556 if (cpufreq_driver->resolve_freq)
557 return cpufreq_driver->resolve_freq(policy, target_freq);
558
559 return target_freq;
560}
561EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
562
563unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
564{
565 unsigned int latency;
566
567 if (policy->transition_delay_us)
568 return policy->transition_delay_us;
569
570 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
571 if (latency) {
572
573
574
575
576
577
578
579
580
581
582 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
583 }
584
585 return LATENCY_MULTIPLIER;
586}
587EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
588
589
590
591
592static ssize_t show_boost(struct kobject *kobj,
593 struct kobj_attribute *attr, char *buf)
594{
595 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
596}
597
598static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
599 const char *buf, size_t count)
600{
601 int ret, enable;
602
603 ret = sscanf(buf, "%d", &enable);
604 if (ret != 1 || enable < 0 || enable > 1)
605 return -EINVAL;
606
607 if (cpufreq_boost_trigger_state(enable)) {
608 pr_err("%s: Cannot %s BOOST!\n",
609 __func__, enable ? "enable" : "disable");
610 return -EINVAL;
611 }
612
613 pr_debug("%s: cpufreq BOOST %s\n",
614 __func__, enable ? "enabled" : "disabled");
615
616 return count;
617}
618define_one_global_rw(boost);
619
620static struct cpufreq_governor *find_governor(const char *str_governor)
621{
622 struct cpufreq_governor *t;
623
624 for_each_governor(t)
625 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
626 return t;
627
628 return NULL;
629}
630
631static struct cpufreq_governor *get_governor(const char *str_governor)
632{
633 struct cpufreq_governor *t;
634
635 mutex_lock(&cpufreq_governor_mutex);
636 t = find_governor(str_governor);
637 if (!t)
638 goto unlock;
639
640 if (!try_module_get(t->owner))
641 t = NULL;
642
643unlock:
644 mutex_unlock(&cpufreq_governor_mutex);
645
646 return t;
647}
648
649static unsigned int cpufreq_parse_policy(char *str_governor)
650{
651 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
652 return CPUFREQ_POLICY_PERFORMANCE;
653
654 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
655 return CPUFREQ_POLICY_POWERSAVE;
656
657 return CPUFREQ_POLICY_UNKNOWN;
658}
659
660
661
662
663
664static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
665{
666 struct cpufreq_governor *t;
667
668 t = get_governor(str_governor);
669 if (t)
670 return t;
671
672 if (request_module("cpufreq_%s", str_governor))
673 return NULL;
674
675 return get_governor(str_governor);
676}
677
678
679
680
681
682
683
684
685
686#define show_one(file_name, object) \
687static ssize_t show_##file_name \
688(struct cpufreq_policy *policy, char *buf) \
689{ \
690 return sprintf(buf, "%u\n", policy->object); \
691}
692
693show_one(cpuinfo_min_freq, cpuinfo.min_freq);
694show_one(cpuinfo_max_freq, cpuinfo.max_freq);
695show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
696show_one(scaling_min_freq, min);
697show_one(scaling_max_freq, max);
698
699__weak unsigned int arch_freq_get_on_cpu(int cpu)
700{
701 return 0;
702}
703
704static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
705{
706 ssize_t ret;
707 unsigned int freq;
708
709 freq = arch_freq_get_on_cpu(policy->cpu);
710 if (freq)
711 ret = sprintf(buf, "%u\n", freq);
712 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
713 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
714 else
715 ret = sprintf(buf, "%u\n", policy->cur);
716 return ret;
717}
718
719
720
721
722#define store_one(file_name, object) \
723static ssize_t store_##file_name \
724(struct cpufreq_policy *policy, const char *buf, size_t count) \
725{ \
726 unsigned long val; \
727 int ret; \
728 \
729 ret = sscanf(buf, "%lu", &val); \
730 if (ret != 1) \
731 return -EINVAL; \
732 \
733 ret = freq_qos_update_request(policy->object##_freq_req, val);\
734 return ret >= 0 ? count : ret; \
735}
736
737store_one(scaling_min_freq, min);
738store_one(scaling_max_freq, max);
739
740
741
742
743static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
744 char *buf)
745{
746 unsigned int cur_freq = __cpufreq_get(policy);
747
748 if (cur_freq)
749 return sprintf(buf, "%u\n", cur_freq);
750
751 return sprintf(buf, "<unknown>\n");
752}
753
754
755
756
757static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
758{
759 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
760 return sprintf(buf, "powersave\n");
761 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
762 return sprintf(buf, "performance\n");
763 else if (policy->governor)
764 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
765 policy->governor->name);
766 return -EINVAL;
767}
768
769
770
771
772static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
773 const char *buf, size_t count)
774{
775 char str_governor[16];
776 int ret;
777
778 ret = sscanf(buf, "%15s", str_governor);
779 if (ret != 1)
780 return -EINVAL;
781
782 if (cpufreq_driver->setpolicy) {
783 unsigned int new_pol;
784
785 new_pol = cpufreq_parse_policy(str_governor);
786 if (!new_pol)
787 return -EINVAL;
788
789 ret = cpufreq_set_policy(policy, NULL, new_pol);
790 } else {
791 struct cpufreq_governor *new_gov;
792
793 new_gov = cpufreq_parse_governor(str_governor);
794 if (!new_gov)
795 return -EINVAL;
796
797 ret = cpufreq_set_policy(policy, new_gov,
798 CPUFREQ_POLICY_UNKNOWN);
799
800 module_put(new_gov->owner);
801 }
802
803 return ret ? ret : count;
804}
805
806
807
808
809static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
810{
811 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
812}
813
814
815
816
817static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
818 char *buf)
819{
820 ssize_t i = 0;
821 struct cpufreq_governor *t;
822
823 if (!has_target()) {
824 i += sprintf(buf, "performance powersave");
825 goto out;
826 }
827
828 mutex_lock(&cpufreq_governor_mutex);
829 for_each_governor(t) {
830 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
831 - (CPUFREQ_NAME_LEN + 2)))
832 break;
833 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
834 }
835 mutex_unlock(&cpufreq_governor_mutex);
836out:
837 i += sprintf(&buf[i], "\n");
838 return i;
839}
840
841ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
842{
843 ssize_t i = 0;
844 unsigned int cpu;
845
846 for_each_cpu(cpu, mask) {
847 if (i)
848 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
849 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
850 if (i >= (PAGE_SIZE - 5))
851 break;
852 }
853 i += sprintf(&buf[i], "\n");
854 return i;
855}
856EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
857
858
859
860
861
862static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
863{
864 return cpufreq_show_cpus(policy->related_cpus, buf);
865}
866
867
868
869
870static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
871{
872 return cpufreq_show_cpus(policy->cpus, buf);
873}
874
875static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
876 const char *buf, size_t count)
877{
878 unsigned int freq = 0;
879 unsigned int ret;
880
881 if (!policy->governor || !policy->governor->store_setspeed)
882 return -EINVAL;
883
884 ret = sscanf(buf, "%u", &freq);
885 if (ret != 1)
886 return -EINVAL;
887
888 policy->governor->store_setspeed(policy, freq);
889
890 return count;
891}
892
893static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
894{
895 if (!policy->governor || !policy->governor->show_setspeed)
896 return sprintf(buf, "<unsupported>\n");
897
898 return policy->governor->show_setspeed(policy, buf);
899}
900
901
902
903
904static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
905{
906 unsigned int limit;
907 int ret;
908 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
909 if (!ret)
910 return sprintf(buf, "%u\n", limit);
911 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
912}
913
914cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
915cpufreq_freq_attr_ro(cpuinfo_min_freq);
916cpufreq_freq_attr_ro(cpuinfo_max_freq);
917cpufreq_freq_attr_ro(cpuinfo_transition_latency);
918cpufreq_freq_attr_ro(scaling_available_governors);
919cpufreq_freq_attr_ro(scaling_driver);
920cpufreq_freq_attr_ro(scaling_cur_freq);
921cpufreq_freq_attr_ro(bios_limit);
922cpufreq_freq_attr_ro(related_cpus);
923cpufreq_freq_attr_ro(affected_cpus);
924cpufreq_freq_attr_rw(scaling_min_freq);
925cpufreq_freq_attr_rw(scaling_max_freq);
926cpufreq_freq_attr_rw(scaling_governor);
927cpufreq_freq_attr_rw(scaling_setspeed);
928
929static struct attribute *default_attrs[] = {
930 &cpuinfo_min_freq.attr,
931 &cpuinfo_max_freq.attr,
932 &cpuinfo_transition_latency.attr,
933 &scaling_min_freq.attr,
934 &scaling_max_freq.attr,
935 &affected_cpus.attr,
936 &related_cpus.attr,
937 &scaling_governor.attr,
938 &scaling_driver.attr,
939 &scaling_available_governors.attr,
940 &scaling_setspeed.attr,
941 NULL
942};
943
944#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
945#define to_attr(a) container_of(a, struct freq_attr, attr)
946
947static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
948{
949 struct cpufreq_policy *policy = to_policy(kobj);
950 struct freq_attr *fattr = to_attr(attr);
951 ssize_t ret;
952
953 if (!fattr->show)
954 return -EIO;
955
956 down_read(&policy->rwsem);
957 ret = fattr->show(policy, buf);
958 up_read(&policy->rwsem);
959
960 return ret;
961}
962
963static ssize_t store(struct kobject *kobj, struct attribute *attr,
964 const char *buf, size_t count)
965{
966 struct cpufreq_policy *policy = to_policy(kobj);
967 struct freq_attr *fattr = to_attr(attr);
968 ssize_t ret = -EINVAL;
969
970 if (!fattr->store)
971 return -EIO;
972
973
974
975
976
977 if (!cpus_read_trylock())
978 return -EBUSY;
979
980 if (cpu_online(policy->cpu)) {
981 down_write(&policy->rwsem);
982 ret = fattr->store(policy, buf, count);
983 up_write(&policy->rwsem);
984 }
985
986 cpus_read_unlock();
987
988 return ret;
989}
990
991static void cpufreq_sysfs_release(struct kobject *kobj)
992{
993 struct cpufreq_policy *policy = to_policy(kobj);
994 pr_debug("last reference is dropped\n");
995 complete(&policy->kobj_unregister);
996}
997
998static const struct sysfs_ops sysfs_ops = {
999 .show = show,
1000 .store = store,
1001};
1002
1003static struct kobj_type ktype_cpufreq = {
1004 .sysfs_ops = &sysfs_ops,
1005 .default_attrs = default_attrs,
1006 .release = cpufreq_sysfs_release,
1007};
1008
1009static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1010{
1011 struct device *dev = get_cpu_device(cpu);
1012
1013 if (unlikely(!dev))
1014 return;
1015
1016 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1017 return;
1018
1019 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1020 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1021 dev_err(dev, "cpufreq symlink creation failed\n");
1022}
1023
1024static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1025 struct device *dev)
1026{
1027 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1028 sysfs_remove_link(&dev->kobj, "cpufreq");
1029}
1030
1031static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1032{
1033 struct freq_attr **drv_attr;
1034 int ret = 0;
1035
1036
1037 drv_attr = cpufreq_driver->attr;
1038 while (drv_attr && *drv_attr) {
1039 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1040 if (ret)
1041 return ret;
1042 drv_attr++;
1043 }
1044 if (cpufreq_driver->get) {
1045 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1046 if (ret)
1047 return ret;
1048 }
1049
1050 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1051 if (ret)
1052 return ret;
1053
1054 if (cpufreq_driver->bios_limit) {
1055 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1056 if (ret)
1057 return ret;
1058 }
1059
1060 return 0;
1061}
1062
1063static int cpufreq_init_policy(struct cpufreq_policy *policy)
1064{
1065 struct cpufreq_governor *gov = NULL;
1066 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1067 int ret;
1068
1069 if (has_target()) {
1070
1071 gov = get_governor(policy->last_governor);
1072 if (gov) {
1073 pr_debug("Restoring governor %s for cpu %d\n",
1074 gov->name, policy->cpu);
1075 } else {
1076 gov = get_governor(default_governor);
1077 }
1078
1079 if (!gov) {
1080 gov = cpufreq_default_governor();
1081 __module_get(gov->owner);
1082 }
1083
1084 } else {
1085
1086
1087 if (policy->last_policy) {
1088 pol = policy->last_policy;
1089 } else {
1090 pol = cpufreq_parse_policy(default_governor);
1091
1092
1093
1094
1095
1096 if (pol == CPUFREQ_POLICY_UNKNOWN)
1097 pol = policy->policy;
1098 }
1099 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1100 pol != CPUFREQ_POLICY_POWERSAVE)
1101 return -ENODATA;
1102 }
1103
1104 ret = cpufreq_set_policy(policy, gov, pol);
1105 if (gov)
1106 module_put(gov->owner);
1107
1108 return ret;
1109}
1110
1111static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1112{
1113 int ret = 0;
1114
1115
1116 if (cpumask_test_cpu(cpu, policy->cpus))
1117 return 0;
1118
1119 down_write(&policy->rwsem);
1120 if (has_target())
1121 cpufreq_stop_governor(policy);
1122
1123 cpumask_set_cpu(cpu, policy->cpus);
1124
1125 if (has_target()) {
1126 ret = cpufreq_start_governor(policy);
1127 if (ret)
1128 pr_err("%s: Failed to start governor\n", __func__);
1129 }
1130 up_write(&policy->rwsem);
1131 return ret;
1132}
1133
1134void refresh_frequency_limits(struct cpufreq_policy *policy)
1135{
1136 if (!policy_is_inactive(policy)) {
1137 pr_debug("updating policy for CPU %u\n", policy->cpu);
1138
1139 cpufreq_set_policy(policy, policy->governor, policy->policy);
1140 }
1141}
1142EXPORT_SYMBOL(refresh_frequency_limits);
1143
1144static void handle_update(struct work_struct *work)
1145{
1146 struct cpufreq_policy *policy =
1147 container_of(work, struct cpufreq_policy, update);
1148
1149 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1150 down_write(&policy->rwsem);
1151 refresh_frequency_limits(policy);
1152 up_write(&policy->rwsem);
1153}
1154
1155static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1156 void *data)
1157{
1158 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1159
1160 schedule_work(&policy->update);
1161 return 0;
1162}
1163
1164static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1165 void *data)
1166{
1167 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1168
1169 schedule_work(&policy->update);
1170 return 0;
1171}
1172
1173static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1174{
1175 struct kobject *kobj;
1176 struct completion *cmp;
1177
1178 down_write(&policy->rwsem);
1179 cpufreq_stats_free_table(policy);
1180 kobj = &policy->kobj;
1181 cmp = &policy->kobj_unregister;
1182 up_write(&policy->rwsem);
1183 kobject_put(kobj);
1184
1185
1186
1187
1188
1189
1190 pr_debug("waiting for dropping of refcount\n");
1191 wait_for_completion(cmp);
1192 pr_debug("wait complete\n");
1193}
1194
1195static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1196{
1197 struct cpufreq_policy *policy;
1198 struct device *dev = get_cpu_device(cpu);
1199 int ret;
1200
1201 if (!dev)
1202 return NULL;
1203
1204 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1205 if (!policy)
1206 return NULL;
1207
1208 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1209 goto err_free_policy;
1210
1211 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1212 goto err_free_cpumask;
1213
1214 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1215 goto err_free_rcpumask;
1216
1217 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1218 cpufreq_global_kobject, "policy%u", cpu);
1219 if (ret) {
1220 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1221
1222
1223
1224
1225
1226 kobject_put(&policy->kobj);
1227 goto err_free_real_cpus;
1228 }
1229
1230 freq_constraints_init(&policy->constraints);
1231
1232 policy->nb_min.notifier_call = cpufreq_notifier_min;
1233 policy->nb_max.notifier_call = cpufreq_notifier_max;
1234
1235 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1236 &policy->nb_min);
1237 if (ret) {
1238 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1239 ret, cpumask_pr_args(policy->cpus));
1240 goto err_kobj_remove;
1241 }
1242
1243 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1244 &policy->nb_max);
1245 if (ret) {
1246 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1247 ret, cpumask_pr_args(policy->cpus));
1248 goto err_min_qos_notifier;
1249 }
1250
1251 INIT_LIST_HEAD(&policy->policy_list);
1252 init_rwsem(&policy->rwsem);
1253 spin_lock_init(&policy->transition_lock);
1254 init_waitqueue_head(&policy->transition_wait);
1255 init_completion(&policy->kobj_unregister);
1256 INIT_WORK(&policy->update, handle_update);
1257
1258 policy->cpu = cpu;
1259 return policy;
1260
1261err_min_qos_notifier:
1262 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1263 &policy->nb_min);
1264err_kobj_remove:
1265 cpufreq_policy_put_kobj(policy);
1266err_free_real_cpus:
1267 free_cpumask_var(policy->real_cpus);
1268err_free_rcpumask:
1269 free_cpumask_var(policy->related_cpus);
1270err_free_cpumask:
1271 free_cpumask_var(policy->cpus);
1272err_free_policy:
1273 kfree(policy);
1274
1275 return NULL;
1276}
1277
1278static void cpufreq_policy_free(struct cpufreq_policy *policy)
1279{
1280 unsigned long flags;
1281 int cpu;
1282
1283
1284 write_lock_irqsave(&cpufreq_driver_lock, flags);
1285 list_del(&policy->policy_list);
1286
1287 for_each_cpu(cpu, policy->related_cpus)
1288 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1289 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1290
1291 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1292 &policy->nb_max);
1293 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1294 &policy->nb_min);
1295
1296
1297 cancel_work_sync(&policy->update);
1298
1299 if (policy->max_freq_req) {
1300
1301
1302
1303
1304 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1305 CPUFREQ_REMOVE_POLICY, policy);
1306 freq_qos_remove_request(policy->max_freq_req);
1307 }
1308
1309 freq_qos_remove_request(policy->min_freq_req);
1310 kfree(policy->min_freq_req);
1311
1312 cpufreq_policy_put_kobj(policy);
1313 free_cpumask_var(policy->real_cpus);
1314 free_cpumask_var(policy->related_cpus);
1315 free_cpumask_var(policy->cpus);
1316 kfree(policy);
1317}
1318
1319static int cpufreq_online(unsigned int cpu)
1320{
1321 struct cpufreq_policy *policy;
1322 bool new_policy;
1323 unsigned long flags;
1324 unsigned int j;
1325 int ret;
1326
1327 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1328
1329
1330 policy = per_cpu(cpufreq_cpu_data, cpu);
1331 if (policy) {
1332 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1333 if (!policy_is_inactive(policy))
1334 return cpufreq_add_policy_cpu(policy, cpu);
1335
1336
1337 new_policy = false;
1338 down_write(&policy->rwsem);
1339 policy->cpu = cpu;
1340 policy->governor = NULL;
1341 up_write(&policy->rwsem);
1342 } else {
1343 new_policy = true;
1344 policy = cpufreq_policy_alloc(cpu);
1345 if (!policy)
1346 return -ENOMEM;
1347 }
1348
1349 if (!new_policy && cpufreq_driver->online) {
1350 ret = cpufreq_driver->online(policy);
1351 if (ret) {
1352 pr_debug("%s: %d: initialization failed\n", __func__,
1353 __LINE__);
1354 goto out_exit_policy;
1355 }
1356
1357
1358 cpumask_copy(policy->cpus, policy->related_cpus);
1359 } else {
1360 cpumask_copy(policy->cpus, cpumask_of(cpu));
1361
1362
1363
1364
1365
1366 ret = cpufreq_driver->init(policy);
1367 if (ret) {
1368 pr_debug("%s: %d: initialization failed\n", __func__,
1369 __LINE__);
1370 goto out_free_policy;
1371 }
1372
1373 ret = cpufreq_table_validate_and_sort(policy);
1374 if (ret)
1375 goto out_exit_policy;
1376
1377
1378 cpumask_copy(policy->related_cpus, policy->cpus);
1379 }
1380
1381 down_write(&policy->rwsem);
1382
1383
1384
1385
1386 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1387
1388 if (new_policy) {
1389 for_each_cpu(j, policy->related_cpus) {
1390 per_cpu(cpufreq_cpu_data, j) = policy;
1391 add_cpu_dev_symlink(policy, j);
1392 }
1393
1394 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1395 GFP_KERNEL);
1396 if (!policy->min_freq_req) {
1397 ret = -ENOMEM;
1398 goto out_destroy_policy;
1399 }
1400
1401 ret = freq_qos_add_request(&policy->constraints,
1402 policy->min_freq_req, FREQ_QOS_MIN,
1403 policy->min);
1404 if (ret < 0) {
1405
1406
1407
1408
1409 kfree(policy->min_freq_req);
1410 policy->min_freq_req = NULL;
1411 goto out_destroy_policy;
1412 }
1413
1414
1415
1416
1417
1418
1419 policy->max_freq_req = policy->min_freq_req + 1;
1420
1421 ret = freq_qos_add_request(&policy->constraints,
1422 policy->max_freq_req, FREQ_QOS_MAX,
1423 policy->max);
1424 if (ret < 0) {
1425 policy->max_freq_req = NULL;
1426 goto out_destroy_policy;
1427 }
1428
1429 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1430 CPUFREQ_CREATE_POLICY, policy);
1431 }
1432
1433 if (cpufreq_driver->get && has_target()) {
1434 policy->cur = cpufreq_driver->get(policy->cpu);
1435 if (!policy->cur) {
1436 ret = -EIO;
1437 pr_err("%s: ->get() failed\n", __func__);
1438 goto out_destroy_policy;
1439 }
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1461 && has_target()) {
1462 unsigned int old_freq = policy->cur;
1463
1464
1465 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1466 if (ret == -EINVAL) {
1467 ret = __cpufreq_driver_target(policy, old_freq - 1,
1468 CPUFREQ_RELATION_L);
1469
1470
1471
1472
1473
1474
1475 BUG_ON(ret);
1476 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1477 __func__, policy->cpu, old_freq, policy->cur);
1478 }
1479 }
1480
1481 if (new_policy) {
1482 ret = cpufreq_add_dev_interface(policy);
1483 if (ret)
1484 goto out_destroy_policy;
1485
1486 cpufreq_stats_create_table(policy);
1487
1488 write_lock_irqsave(&cpufreq_driver_lock, flags);
1489 list_add(&policy->policy_list, &cpufreq_policy_list);
1490 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1491 }
1492
1493 ret = cpufreq_init_policy(policy);
1494 if (ret) {
1495 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1496 __func__, cpu, ret);
1497 goto out_destroy_policy;
1498 }
1499
1500 up_write(&policy->rwsem);
1501
1502 kobject_uevent(&policy->kobj, KOBJ_ADD);
1503
1504
1505 if (cpufreq_driver->ready)
1506 cpufreq_driver->ready(policy);
1507
1508 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1509 policy->cdev = of_cpufreq_cooling_register(policy);
1510
1511 pr_debug("initialization complete\n");
1512
1513 return 0;
1514
1515out_destroy_policy:
1516 for_each_cpu(j, policy->real_cpus)
1517 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1518
1519 up_write(&policy->rwsem);
1520
1521out_exit_policy:
1522 if (cpufreq_driver->exit)
1523 cpufreq_driver->exit(policy);
1524
1525out_free_policy:
1526 cpufreq_policy_free(policy);
1527 return ret;
1528}
1529
1530
1531
1532
1533
1534
1535static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1536{
1537 struct cpufreq_policy *policy;
1538 unsigned cpu = dev->id;
1539 int ret;
1540
1541 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1542
1543 if (cpu_online(cpu)) {
1544 ret = cpufreq_online(cpu);
1545 if (ret)
1546 return ret;
1547 }
1548
1549
1550 policy = per_cpu(cpufreq_cpu_data, cpu);
1551 if (policy)
1552 add_cpu_dev_symlink(policy, cpu);
1553
1554 return 0;
1555}
1556
1557static int cpufreq_offline(unsigned int cpu)
1558{
1559 struct cpufreq_policy *policy;
1560 int ret;
1561
1562 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1563
1564 policy = cpufreq_cpu_get_raw(cpu);
1565 if (!policy) {
1566 pr_debug("%s: No cpu_data found\n", __func__);
1567 return 0;
1568 }
1569
1570 down_write(&policy->rwsem);
1571 if (has_target())
1572 cpufreq_stop_governor(policy);
1573
1574 cpumask_clear_cpu(cpu, policy->cpus);
1575
1576 if (policy_is_inactive(policy)) {
1577 if (has_target())
1578 strncpy(policy->last_governor, policy->governor->name,
1579 CPUFREQ_NAME_LEN);
1580 else
1581 policy->last_policy = policy->policy;
1582 } else if (cpu == policy->cpu) {
1583
1584 policy->cpu = cpumask_any(policy->cpus);
1585 }
1586
1587
1588 if (!policy_is_inactive(policy)) {
1589 if (has_target()) {
1590 ret = cpufreq_start_governor(policy);
1591 if (ret)
1592 pr_err("%s: Failed to start governor\n", __func__);
1593 }
1594
1595 goto unlock;
1596 }
1597
1598 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1599 cpufreq_cooling_unregister(policy->cdev);
1600 policy->cdev = NULL;
1601 }
1602
1603 if (cpufreq_driver->stop_cpu)
1604 cpufreq_driver->stop_cpu(policy);
1605
1606 if (has_target())
1607 cpufreq_exit_governor(policy);
1608
1609
1610
1611
1612
1613 if (cpufreq_driver->offline) {
1614 cpufreq_driver->offline(policy);
1615 } else if (cpufreq_driver->exit) {
1616 cpufreq_driver->exit(policy);
1617 policy->freq_table = NULL;
1618 }
1619
1620unlock:
1621 up_write(&policy->rwsem);
1622 return 0;
1623}
1624
1625
1626
1627
1628
1629
1630static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1631{
1632 unsigned int cpu = dev->id;
1633 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1634
1635 if (!policy)
1636 return;
1637
1638 if (cpu_online(cpu))
1639 cpufreq_offline(cpu);
1640
1641 cpumask_clear_cpu(cpu, policy->real_cpus);
1642 remove_cpu_dev_symlink(policy, dev);
1643
1644 if (cpumask_empty(policy->real_cpus)) {
1645
1646 if (cpufreq_driver->offline)
1647 cpufreq_driver->exit(policy);
1648
1649 cpufreq_policy_free(policy);
1650 }
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1662 unsigned int new_freq)
1663{
1664 struct cpufreq_freqs freqs;
1665
1666 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1667 policy->cur, new_freq);
1668
1669 freqs.old = policy->cur;
1670 freqs.new = new_freq;
1671
1672 cpufreq_freq_transition_begin(policy, &freqs);
1673 cpufreq_freq_transition_end(policy, &freqs, 0);
1674}
1675
1676static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1677{
1678 unsigned int new_freq;
1679
1680 new_freq = cpufreq_driver->get(policy->cpu);
1681 if (!new_freq)
1682 return 0;
1683
1684
1685
1686
1687
1688 if (policy->fast_switch_enabled || !has_target())
1689 return new_freq;
1690
1691 if (policy->cur != new_freq) {
1692 cpufreq_out_of_sync(policy, new_freq);
1693 if (update)
1694 schedule_work(&policy->update);
1695 }
1696
1697 return new_freq;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707unsigned int cpufreq_quick_get(unsigned int cpu)
1708{
1709 struct cpufreq_policy *policy;
1710 unsigned int ret_freq = 0;
1711 unsigned long flags;
1712
1713 read_lock_irqsave(&cpufreq_driver_lock, flags);
1714
1715 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1716 ret_freq = cpufreq_driver->get(cpu);
1717 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1718 return ret_freq;
1719 }
1720
1721 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1722
1723 policy = cpufreq_cpu_get(cpu);
1724 if (policy) {
1725 ret_freq = policy->cur;
1726 cpufreq_cpu_put(policy);
1727 }
1728
1729 return ret_freq;
1730}
1731EXPORT_SYMBOL(cpufreq_quick_get);
1732
1733
1734
1735
1736
1737
1738
1739unsigned int cpufreq_quick_get_max(unsigned int cpu)
1740{
1741 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1742 unsigned int ret_freq = 0;
1743
1744 if (policy) {
1745 ret_freq = policy->max;
1746 cpufreq_cpu_put(policy);
1747 }
1748
1749 return ret_freq;
1750}
1751EXPORT_SYMBOL(cpufreq_quick_get_max);
1752
1753
1754
1755
1756
1757
1758
1759__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1760{
1761 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1762 unsigned int ret_freq = 0;
1763
1764 if (policy) {
1765 ret_freq = policy->cpuinfo.max_freq;
1766 cpufreq_cpu_put(policy);
1767 }
1768
1769 return ret_freq;
1770}
1771EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1772
1773static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1774{
1775 if (unlikely(policy_is_inactive(policy)))
1776 return 0;
1777
1778 return cpufreq_verify_current_freq(policy, true);
1779}
1780
1781
1782
1783
1784
1785
1786
1787unsigned int cpufreq_get(unsigned int cpu)
1788{
1789 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1790 unsigned int ret_freq = 0;
1791
1792 if (policy) {
1793 down_read(&policy->rwsem);
1794 if (cpufreq_driver->get)
1795 ret_freq = __cpufreq_get(policy);
1796 up_read(&policy->rwsem);
1797
1798 cpufreq_cpu_put(policy);
1799 }
1800
1801 return ret_freq;
1802}
1803EXPORT_SYMBOL(cpufreq_get);
1804
1805static struct subsys_interface cpufreq_interface = {
1806 .name = "cpufreq",
1807 .subsys = &cpu_subsys,
1808 .add_dev = cpufreq_add_dev,
1809 .remove_dev = cpufreq_remove_dev,
1810};
1811
1812
1813
1814
1815
1816int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1817{
1818 int ret;
1819
1820 if (!policy->suspend_freq) {
1821 pr_debug("%s: suspend_freq not defined\n", __func__);
1822 return 0;
1823 }
1824
1825 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1826 policy->suspend_freq);
1827
1828 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1829 CPUFREQ_RELATION_H);
1830 if (ret)
1831 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1832 __func__, policy->suspend_freq, ret);
1833
1834 return ret;
1835}
1836EXPORT_SYMBOL(cpufreq_generic_suspend);
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846void cpufreq_suspend(void)
1847{
1848 struct cpufreq_policy *policy;
1849
1850 if (!cpufreq_driver)
1851 return;
1852
1853 if (!has_target() && !cpufreq_driver->suspend)
1854 goto suspend;
1855
1856 pr_debug("%s: Suspending Governors\n", __func__);
1857
1858 for_each_active_policy(policy) {
1859 if (has_target()) {
1860 down_write(&policy->rwsem);
1861 cpufreq_stop_governor(policy);
1862 up_write(&policy->rwsem);
1863 }
1864
1865 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1866 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1867 cpufreq_driver->name);
1868 }
1869
1870suspend:
1871 cpufreq_suspended = true;
1872}
1873
1874
1875
1876
1877
1878
1879
1880void cpufreq_resume(void)
1881{
1882 struct cpufreq_policy *policy;
1883 int ret;
1884
1885 if (!cpufreq_driver)
1886 return;
1887
1888 if (unlikely(!cpufreq_suspended))
1889 return;
1890
1891 cpufreq_suspended = false;
1892
1893 if (!has_target() && !cpufreq_driver->resume)
1894 return;
1895
1896 pr_debug("%s: Resuming Governors\n", __func__);
1897
1898 for_each_active_policy(policy) {
1899 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1900 pr_err("%s: Failed to resume driver: %p\n", __func__,
1901 policy);
1902 } else if (has_target()) {
1903 down_write(&policy->rwsem);
1904 ret = cpufreq_start_governor(policy);
1905 up_write(&policy->rwsem);
1906
1907 if (ret)
1908 pr_err("%s: Failed to start governor for policy: %p\n",
1909 __func__, policy);
1910 }
1911 }
1912}
1913
1914
1915
1916
1917
1918
1919
1920
1921bool cpufreq_driver_test_flags(u16 flags)
1922{
1923 return !!(cpufreq_driver->flags & flags);
1924}
1925
1926
1927
1928
1929
1930
1931
1932const char *cpufreq_get_current_driver(void)
1933{
1934 if (cpufreq_driver)
1935 return cpufreq_driver->name;
1936
1937 return NULL;
1938}
1939EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1940
1941
1942
1943
1944
1945
1946
1947void *cpufreq_get_driver_data(void)
1948{
1949 if (cpufreq_driver)
1950 return cpufreq_driver->driver_data;
1951
1952 return NULL;
1953}
1954EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1973{
1974 int ret;
1975
1976 if (cpufreq_disabled())
1977 return -EINVAL;
1978
1979 switch (list) {
1980 case CPUFREQ_TRANSITION_NOTIFIER:
1981 mutex_lock(&cpufreq_fast_switch_lock);
1982
1983 if (cpufreq_fast_switch_count > 0) {
1984 mutex_unlock(&cpufreq_fast_switch_lock);
1985 return -EBUSY;
1986 }
1987 ret = srcu_notifier_chain_register(
1988 &cpufreq_transition_notifier_list, nb);
1989 if (!ret)
1990 cpufreq_fast_switch_count--;
1991
1992 mutex_unlock(&cpufreq_fast_switch_lock);
1993 break;
1994 case CPUFREQ_POLICY_NOTIFIER:
1995 ret = blocking_notifier_chain_register(
1996 &cpufreq_policy_notifier_list, nb);
1997 break;
1998 default:
1999 ret = -EINVAL;
2000 }
2001
2002 return ret;
2003}
2004EXPORT_SYMBOL(cpufreq_register_notifier);
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2017{
2018 int ret;
2019
2020 if (cpufreq_disabled())
2021 return -EINVAL;
2022
2023 switch (list) {
2024 case CPUFREQ_TRANSITION_NOTIFIER:
2025 mutex_lock(&cpufreq_fast_switch_lock);
2026
2027 ret = srcu_notifier_chain_unregister(
2028 &cpufreq_transition_notifier_list, nb);
2029 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2030 cpufreq_fast_switch_count++;
2031
2032 mutex_unlock(&cpufreq_fast_switch_lock);
2033 break;
2034 case CPUFREQ_POLICY_NOTIFIER:
2035 ret = blocking_notifier_chain_unregister(
2036 &cpufreq_policy_notifier_list, nb);
2037 break;
2038 default:
2039 ret = -EINVAL;
2040 }
2041
2042 return ret;
2043}
2044EXPORT_SYMBOL(cpufreq_unregister_notifier);
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2075 unsigned int target_freq)
2076{
2077 unsigned int freq;
2078 int cpu;
2079
2080 target_freq = clamp_val(target_freq, policy->min, policy->max);
2081 freq = cpufreq_driver->fast_switch(policy, target_freq);
2082
2083 if (!freq)
2084 return 0;
2085
2086 policy->cur = freq;
2087 arch_set_freq_scale(policy->related_cpus, freq,
2088 policy->cpuinfo.max_freq);
2089 cpufreq_stats_record_transition(policy, freq);
2090
2091 if (trace_cpu_frequency_enabled()) {
2092 for_each_cpu(cpu, policy->cpus)
2093 trace_cpu_frequency(freq, cpu);
2094 }
2095
2096 return freq;
2097}
2098EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121void cpufreq_driver_adjust_perf(unsigned int cpu,
2122 unsigned long min_perf,
2123 unsigned long target_perf,
2124 unsigned long capacity)
2125{
2126 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2127}
2128
2129
2130
2131
2132
2133
2134
2135bool cpufreq_driver_has_adjust_perf(void)
2136{
2137 return !!cpufreq_driver->adjust_perf;
2138}
2139
2140
2141static int __target_intermediate(struct cpufreq_policy *policy,
2142 struct cpufreq_freqs *freqs, int index)
2143{
2144 int ret;
2145
2146 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2147
2148
2149 if (!freqs->new)
2150 return 0;
2151
2152 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2153 __func__, policy->cpu, freqs->old, freqs->new);
2154
2155 cpufreq_freq_transition_begin(policy, freqs);
2156 ret = cpufreq_driver->target_intermediate(policy, index);
2157 cpufreq_freq_transition_end(policy, freqs, ret);
2158
2159 if (ret)
2160 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2161 __func__, ret);
2162
2163 return ret;
2164}
2165
2166static int __target_index(struct cpufreq_policy *policy, int index)
2167{
2168 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2169 unsigned int restore_freq, intermediate_freq = 0;
2170 unsigned int newfreq = policy->freq_table[index].frequency;
2171 int retval = -EINVAL;
2172 bool notify;
2173
2174 if (newfreq == policy->cur)
2175 return 0;
2176
2177
2178 restore_freq = policy->cur;
2179
2180 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2181 if (notify) {
2182
2183 if (cpufreq_driver->get_intermediate) {
2184 retval = __target_intermediate(policy, &freqs, index);
2185 if (retval)
2186 return retval;
2187
2188 intermediate_freq = freqs.new;
2189
2190 if (intermediate_freq)
2191 freqs.old = freqs.new;
2192 }
2193
2194 freqs.new = newfreq;
2195 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2196 __func__, policy->cpu, freqs.old, freqs.new);
2197
2198 cpufreq_freq_transition_begin(policy, &freqs);
2199 }
2200
2201 retval = cpufreq_driver->target_index(policy, index);
2202 if (retval)
2203 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2204 retval);
2205
2206 if (notify) {
2207 cpufreq_freq_transition_end(policy, &freqs, retval);
2208
2209
2210
2211
2212
2213
2214
2215 if (unlikely(retval && intermediate_freq)) {
2216 freqs.old = intermediate_freq;
2217 freqs.new = restore_freq;
2218 cpufreq_freq_transition_begin(policy, &freqs);
2219 cpufreq_freq_transition_end(policy, &freqs, 0);
2220 }
2221 }
2222
2223 return retval;
2224}
2225
2226int __cpufreq_driver_target(struct cpufreq_policy *policy,
2227 unsigned int target_freq,
2228 unsigned int relation)
2229{
2230 unsigned int old_target_freq = target_freq;
2231 int index;
2232
2233 if (cpufreq_disabled())
2234 return -ENODEV;
2235
2236
2237 target_freq = clamp_val(target_freq, policy->min, policy->max);
2238
2239 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2240 policy->cpu, target_freq, relation, old_target_freq);
2241
2242
2243
2244
2245
2246
2247
2248 if (target_freq == policy->cur &&
2249 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2250 return 0;
2251
2252 if (cpufreq_driver->target)
2253 return cpufreq_driver->target(policy, target_freq, relation);
2254
2255 if (!cpufreq_driver->target_index)
2256 return -EINVAL;
2257
2258 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2259
2260 return __target_index(policy, index);
2261}
2262EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2263
2264int cpufreq_driver_target(struct cpufreq_policy *policy,
2265 unsigned int target_freq,
2266 unsigned int relation)
2267{
2268 int ret;
2269
2270 down_write(&policy->rwsem);
2271
2272 ret = __cpufreq_driver_target(policy, target_freq, relation);
2273
2274 up_write(&policy->rwsem);
2275
2276 return ret;
2277}
2278EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2279
2280__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2281{
2282 return NULL;
2283}
2284
2285static int cpufreq_init_governor(struct cpufreq_policy *policy)
2286{
2287 int ret;
2288
2289
2290 if (cpufreq_suspended)
2291 return 0;
2292
2293
2294
2295
2296 if (!policy->governor)
2297 return -EINVAL;
2298
2299
2300 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2301 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2302 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2303
2304 if (gov) {
2305 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2306 policy->governor->name, gov->name);
2307 policy->governor = gov;
2308 } else {
2309 return -EINVAL;
2310 }
2311 }
2312
2313 if (!try_module_get(policy->governor->owner))
2314 return -EINVAL;
2315
2316 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2317
2318 if (policy->governor->init) {
2319 ret = policy->governor->init(policy);
2320 if (ret) {
2321 module_put(policy->governor->owner);
2322 return ret;
2323 }
2324 }
2325
2326 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2327
2328 return 0;
2329}
2330
2331static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2332{
2333 if (cpufreq_suspended || !policy->governor)
2334 return;
2335
2336 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2337
2338 if (policy->governor->exit)
2339 policy->governor->exit(policy);
2340
2341 module_put(policy->governor->owner);
2342}
2343
2344int cpufreq_start_governor(struct cpufreq_policy *policy)
2345{
2346 int ret;
2347
2348 if (cpufreq_suspended)
2349 return 0;
2350
2351 if (!policy->governor)
2352 return -EINVAL;
2353
2354 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2355
2356 if (cpufreq_driver->get)
2357 cpufreq_verify_current_freq(policy, false);
2358
2359 if (policy->governor->start) {
2360 ret = policy->governor->start(policy);
2361 if (ret)
2362 return ret;
2363 }
2364
2365 if (policy->governor->limits)
2366 policy->governor->limits(policy);
2367
2368 return 0;
2369}
2370
2371void cpufreq_stop_governor(struct cpufreq_policy *policy)
2372{
2373 if (cpufreq_suspended || !policy->governor)
2374 return;
2375
2376 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2377
2378 if (policy->governor->stop)
2379 policy->governor->stop(policy);
2380}
2381
2382static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2383{
2384 if (cpufreq_suspended || !policy->governor)
2385 return;
2386
2387 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2388
2389 if (policy->governor->limits)
2390 policy->governor->limits(policy);
2391}
2392
2393int cpufreq_register_governor(struct cpufreq_governor *governor)
2394{
2395 int err;
2396
2397 if (!governor)
2398 return -EINVAL;
2399
2400 if (cpufreq_disabled())
2401 return -ENODEV;
2402
2403 mutex_lock(&cpufreq_governor_mutex);
2404
2405 err = -EBUSY;
2406 if (!find_governor(governor->name)) {
2407 err = 0;
2408 list_add(&governor->governor_list, &cpufreq_governor_list);
2409 }
2410
2411 mutex_unlock(&cpufreq_governor_mutex);
2412 return err;
2413}
2414EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2415
2416void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2417{
2418 struct cpufreq_policy *policy;
2419 unsigned long flags;
2420
2421 if (!governor)
2422 return;
2423
2424 if (cpufreq_disabled())
2425 return;
2426
2427
2428 read_lock_irqsave(&cpufreq_driver_lock, flags);
2429 for_each_inactive_policy(policy) {
2430 if (!strcmp(policy->last_governor, governor->name)) {
2431 policy->governor = NULL;
2432 strcpy(policy->last_governor, "\0");
2433 }
2434 }
2435 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2436
2437 mutex_lock(&cpufreq_governor_mutex);
2438 list_del(&governor->governor_list);
2439 mutex_unlock(&cpufreq_governor_mutex);
2440}
2441EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2457{
2458 struct cpufreq_policy *cpu_policy;
2459 if (!policy)
2460 return -EINVAL;
2461
2462 cpu_policy = cpufreq_cpu_get(cpu);
2463 if (!cpu_policy)
2464 return -EINVAL;
2465
2466 memcpy(policy, cpu_policy, sizeof(*policy));
2467
2468 cpufreq_cpu_put(cpu_policy);
2469 return 0;
2470}
2471EXPORT_SYMBOL(cpufreq_get_policy);
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488static int cpufreq_set_policy(struct cpufreq_policy *policy,
2489 struct cpufreq_governor *new_gov,
2490 unsigned int new_pol)
2491{
2492 struct cpufreq_policy_data new_data;
2493 struct cpufreq_governor *old_gov;
2494 int ret;
2495
2496 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2497 new_data.freq_table = policy->freq_table;
2498 new_data.cpu = policy->cpu;
2499
2500
2501
2502
2503 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2504 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2505
2506 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2507 new_data.cpu, new_data.min, new_data.max);
2508
2509
2510
2511
2512
2513 ret = cpufreq_driver->verify(&new_data);
2514 if (ret)
2515 return ret;
2516
2517 policy->min = new_data.min;
2518 policy->max = new_data.max;
2519 trace_cpu_frequency_limits(policy);
2520
2521 policy->cached_target_freq = UINT_MAX;
2522
2523 pr_debug("new min and max freqs are %u - %u kHz\n",
2524 policy->min, policy->max);
2525
2526 if (cpufreq_driver->setpolicy) {
2527 policy->policy = new_pol;
2528 pr_debug("setting range\n");
2529 return cpufreq_driver->setpolicy(policy);
2530 }
2531
2532 if (new_gov == policy->governor) {
2533 pr_debug("governor limits update\n");
2534 cpufreq_governor_limits(policy);
2535 return 0;
2536 }
2537
2538 pr_debug("governor switch\n");
2539
2540
2541 old_gov = policy->governor;
2542
2543 if (old_gov) {
2544 cpufreq_stop_governor(policy);
2545 cpufreq_exit_governor(policy);
2546 }
2547
2548
2549 policy->governor = new_gov;
2550 ret = cpufreq_init_governor(policy);
2551 if (!ret) {
2552 ret = cpufreq_start_governor(policy);
2553 if (!ret) {
2554 pr_debug("governor change\n");
2555 sched_cpufreq_governor_change(policy, old_gov);
2556 return 0;
2557 }
2558 cpufreq_exit_governor(policy);
2559 }
2560
2561
2562 pr_debug("starting governor %s failed\n", policy->governor->name);
2563 if (old_gov) {
2564 policy->governor = old_gov;
2565 if (cpufreq_init_governor(policy))
2566 policy->governor = NULL;
2567 else
2568 cpufreq_start_governor(policy);
2569 }
2570
2571 return ret;
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583void cpufreq_update_policy(unsigned int cpu)
2584{
2585 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2586
2587 if (!policy)
2588 return;
2589
2590
2591
2592
2593
2594 if (cpufreq_driver->get && has_target() &&
2595 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2596 goto unlock;
2597
2598 refresh_frequency_limits(policy);
2599
2600unlock:
2601 cpufreq_cpu_release(policy);
2602}
2603EXPORT_SYMBOL(cpufreq_update_policy);
2604
2605
2606
2607
2608
2609
2610
2611
2612void cpufreq_update_limits(unsigned int cpu)
2613{
2614 if (cpufreq_driver->update_limits)
2615 cpufreq_driver->update_limits(cpu);
2616 else
2617 cpufreq_update_policy(cpu);
2618}
2619EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2620
2621
2622
2623
2624static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2625{
2626 int ret;
2627
2628 if (!policy->freq_table)
2629 return -ENXIO;
2630
2631 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2632 if (ret) {
2633 pr_err("%s: Policy frequency update failed\n", __func__);
2634 return ret;
2635 }
2636
2637 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2638 if (ret < 0)
2639 return ret;
2640
2641 return 0;
2642}
2643
2644int cpufreq_boost_trigger_state(int state)
2645{
2646 struct cpufreq_policy *policy;
2647 unsigned long flags;
2648 int ret = 0;
2649
2650 if (cpufreq_driver->boost_enabled == state)
2651 return 0;
2652
2653 write_lock_irqsave(&cpufreq_driver_lock, flags);
2654 cpufreq_driver->boost_enabled = state;
2655 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2656
2657 get_online_cpus();
2658 for_each_active_policy(policy) {
2659 ret = cpufreq_driver->set_boost(policy, state);
2660 if (ret)
2661 goto err_reset_state;
2662 }
2663 put_online_cpus();
2664
2665 return 0;
2666
2667err_reset_state:
2668 put_online_cpus();
2669
2670 write_lock_irqsave(&cpufreq_driver_lock, flags);
2671 cpufreq_driver->boost_enabled = !state;
2672 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2673
2674 pr_err("%s: Cannot %s BOOST\n",
2675 __func__, state ? "enable" : "disable");
2676
2677 return ret;
2678}
2679
2680static bool cpufreq_boost_supported(void)
2681{
2682 return cpufreq_driver->set_boost;
2683}
2684
2685static int create_boost_sysfs_file(void)
2686{
2687 int ret;
2688
2689 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2690 if (ret)
2691 pr_err("%s: cannot register global BOOST sysfs file\n",
2692 __func__);
2693
2694 return ret;
2695}
2696
2697static void remove_boost_sysfs_file(void)
2698{
2699 if (cpufreq_boost_supported())
2700 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2701}
2702
2703int cpufreq_enable_boost_support(void)
2704{
2705 if (!cpufreq_driver)
2706 return -EINVAL;
2707
2708 if (cpufreq_boost_supported())
2709 return 0;
2710
2711 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2712
2713
2714 return create_boost_sysfs_file();
2715}
2716EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2717
2718int cpufreq_boost_enabled(void)
2719{
2720 return cpufreq_driver->boost_enabled;
2721}
2722EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2723
2724
2725
2726
2727static enum cpuhp_state hp_online;
2728
2729static int cpuhp_cpufreq_online(unsigned int cpu)
2730{
2731 cpufreq_online(cpu);
2732
2733 return 0;
2734}
2735
2736static int cpuhp_cpufreq_offline(unsigned int cpu)
2737{
2738 cpufreq_offline(cpu);
2739
2740 return 0;
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2754{
2755 unsigned long flags;
2756 int ret;
2757
2758 if (cpufreq_disabled())
2759 return -ENODEV;
2760
2761
2762
2763
2764
2765 if (!get_cpu_device(0))
2766 return -EPROBE_DEFER;
2767
2768 if (!driver_data || !driver_data->verify || !driver_data->init ||
2769 !(driver_data->setpolicy || driver_data->target_index ||
2770 driver_data->target) ||
2771 (driver_data->setpolicy && (driver_data->target_index ||
2772 driver_data->target)) ||
2773 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2774 (!driver_data->online != !driver_data->offline))
2775 return -EINVAL;
2776
2777 pr_debug("trying to register driver %s\n", driver_data->name);
2778
2779
2780 cpus_read_lock();
2781
2782 write_lock_irqsave(&cpufreq_driver_lock, flags);
2783 if (cpufreq_driver) {
2784 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2785 ret = -EEXIST;
2786 goto out;
2787 }
2788 cpufreq_driver = driver_data;
2789 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2790
2791
2792
2793
2794
2795 if (!cpufreq_driver->setpolicy) {
2796 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2797 pr_debug("supports frequency invariance");
2798 }
2799
2800 if (driver_data->setpolicy)
2801 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2802
2803 if (cpufreq_boost_supported()) {
2804 ret = create_boost_sysfs_file();
2805 if (ret)
2806 goto err_null_driver;
2807 }
2808
2809 ret = subsys_interface_register(&cpufreq_interface);
2810 if (ret)
2811 goto err_boost_unreg;
2812
2813 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2814 list_empty(&cpufreq_policy_list)) {
2815
2816 ret = -ENODEV;
2817 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2818 driver_data->name);
2819 goto err_if_unreg;
2820 }
2821
2822 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2823 "cpufreq:online",
2824 cpuhp_cpufreq_online,
2825 cpuhp_cpufreq_offline);
2826 if (ret < 0)
2827 goto err_if_unreg;
2828 hp_online = ret;
2829 ret = 0;
2830
2831 pr_debug("driver %s up and running\n", driver_data->name);
2832 goto out;
2833
2834err_if_unreg:
2835 subsys_interface_unregister(&cpufreq_interface);
2836err_boost_unreg:
2837 remove_boost_sysfs_file();
2838err_null_driver:
2839 write_lock_irqsave(&cpufreq_driver_lock, flags);
2840 cpufreq_driver = NULL;
2841 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2842out:
2843 cpus_read_unlock();
2844 return ret;
2845}
2846EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2857{
2858 unsigned long flags;
2859
2860 if (!cpufreq_driver || (driver != cpufreq_driver))
2861 return -EINVAL;
2862
2863 pr_debug("unregistering driver %s\n", driver->name);
2864
2865
2866 cpus_read_lock();
2867 subsys_interface_unregister(&cpufreq_interface);
2868 remove_boost_sysfs_file();
2869 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2870 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2871
2872 write_lock_irqsave(&cpufreq_driver_lock, flags);
2873
2874 cpufreq_driver = NULL;
2875
2876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2877 cpus_read_unlock();
2878
2879 return 0;
2880}
2881EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2882
2883static int __init cpufreq_core_init(void)
2884{
2885 struct cpufreq_governor *gov = cpufreq_default_governor();
2886
2887 if (cpufreq_disabled())
2888 return -ENODEV;
2889
2890 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2891 BUG_ON(!cpufreq_global_kobject);
2892
2893 if (!strlen(default_governor))
2894 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2895
2896 return 0;
2897}
2898module_param(off, int, 0444);
2899module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2900core_initcall(cpufreq_core_init);
2901