1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/cpu.h>
18#include <linux/cpufreq.h>
19#include <linux/cpu_cooling.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/pm_qos.h>
27#include <linux/slab.h>
28#include <linux/suspend.h>
29#include <linux/syscore_ops.h>
30#include <linux/tick.h>
31#include <trace/events/power.h>
32
33static LIST_HEAD(cpufreq_policy_list);
34
35
36#define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
39
40#define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42#define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
44
45#define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
47
48
49static LIST_HEAD(cpufreq_governor_list);
50#define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
52
53
54
55
56
57
58static struct cpufreq_driver *cpufreq_driver;
59static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60static DEFINE_RWLOCK(cpufreq_driver_lock);
61
62
63static bool cpufreq_suspended;
64
65static inline bool has_target(void)
66{
67 return cpufreq_driver->target_index || cpufreq_driver->target;
68}
69
70
71static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
72static int cpufreq_init_governor(struct cpufreq_policy *policy);
73static void cpufreq_exit_governor(struct cpufreq_policy *policy);
74static int cpufreq_start_governor(struct cpufreq_policy *policy);
75static void cpufreq_stop_governor(struct cpufreq_policy *policy);
76static void cpufreq_governor_limits(struct cpufreq_policy *policy);
77
78
79
80
81
82
83
84
85static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
86SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
87
88static int off __read_mostly;
89static int cpufreq_disabled(void)
90{
91 return off;
92}
93void disable_cpufreq(void)
94{
95 off = 1;
96}
97static DEFINE_MUTEX(cpufreq_governor_mutex);
98
99bool have_governor_per_policy(void)
100{
101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102}
103EXPORT_SYMBOL_GPL(have_governor_per_policy);
104
105struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106{
107 if (have_governor_per_policy())
108 return &policy->kobj;
109 else
110 return cpufreq_global_kobject;
111}
112EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
114static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115{
116 u64 idle_time;
117 u64 cur_wall_time;
118 u64 busy_time;
119
120 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
121
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129 idle_time = cur_wall_time - busy_time;
130 if (wall)
131 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
132
133 return div_u64(idle_time, NSEC_PER_USEC);
134}
135
136u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137{
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
142 else if (!io_busy)
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145 return idle_time;
146}
147EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
149__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
150 unsigned long max_freq)
151{
152}
153EXPORT_SYMBOL_GPL(arch_set_freq_scale);
154
155
156
157
158
159
160
161
162void cpufreq_generic_init(struct cpufreq_policy *policy,
163 struct cpufreq_frequency_table *table,
164 unsigned int transition_latency)
165{
166 policy->freq_table = table;
167 policy->cpuinfo.transition_latency = transition_latency;
168
169
170
171
172
173 cpumask_setall(policy->cpus);
174}
175EXPORT_SYMBOL_GPL(cpufreq_generic_init);
176
177struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
178{
179 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
180
181 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
182}
183EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
184
185unsigned int cpufreq_generic_get(unsigned int cpu)
186{
187 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
188
189 if (!policy || IS_ERR(policy->clk)) {
190 pr_err("%s: No %s associated to cpu: %d\n",
191 __func__, policy ? "clk" : "policy", cpu);
192 return 0;
193 }
194
195 return clk_get_rate(policy->clk) / 1000;
196}
197EXPORT_SYMBOL_GPL(cpufreq_generic_get);
198
199
200
201
202
203
204
205
206
207
208
209
210struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
211{
212 struct cpufreq_policy *policy = NULL;
213 unsigned long flags;
214
215 if (WARN_ON(cpu >= nr_cpu_ids))
216 return NULL;
217
218
219 read_lock_irqsave(&cpufreq_driver_lock, flags);
220
221 if (cpufreq_driver) {
222
223 policy = cpufreq_cpu_get_raw(cpu);
224 if (policy)
225 kobject_get(&policy->kobj);
226 }
227
228 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
229
230 return policy;
231}
232EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
233
234
235
236
237
238void cpufreq_cpu_put(struct cpufreq_policy *policy)
239{
240 kobject_put(&policy->kobj);
241}
242EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
243
244
245
246
247
248void cpufreq_cpu_release(struct cpufreq_policy *policy)
249{
250 if (WARN_ON(!policy))
251 return;
252
253 lockdep_assert_held(&policy->rwsem);
254
255 up_write(&policy->rwsem);
256
257 cpufreq_cpu_put(policy);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
273{
274 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
275
276 if (!policy)
277 return NULL;
278
279 down_write(&policy->rwsem);
280
281 if (policy_is_inactive(policy)) {
282 cpufreq_cpu_release(policy);
283 return NULL;
284 }
285
286 return policy;
287}
288
289
290
291
292
293
294
295
296
297
298
299
300
301static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
302{
303#ifndef CONFIG_SMP
304 static unsigned long l_p_j_ref;
305 static unsigned int l_p_j_ref_freq;
306
307 if (ci->flags & CPUFREQ_CONST_LOOPS)
308 return;
309
310 if (!l_p_j_ref_freq) {
311 l_p_j_ref = loops_per_jiffy;
312 l_p_j_ref_freq = ci->old;
313 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
314 l_p_j_ref, l_p_j_ref_freq);
315 }
316 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
317 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
318 ci->new);
319 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
320 loops_per_jiffy, ci->new);
321 }
322#endif
323}
324
325
326
327
328
329
330
331
332
333
334
335static void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs,
337 unsigned int state)
338{
339 int cpu;
340
341 BUG_ON(irqs_disabled());
342
343 if (cpufreq_disabled())
344 return;
345
346 freqs->policy = policy;
347 freqs->flags = cpufreq_driver->flags;
348 pr_debug("notification %u of frequency transition to %u kHz\n",
349 state, freqs->new);
350
351 switch (state) {
352 case CPUFREQ_PRECHANGE:
353
354
355
356
357
358 if (policy->cur && policy->cur != freqs->old) {
359 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
360 freqs->old, policy->cur);
361 freqs->old = policy->cur;
362 }
363
364 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
365 CPUFREQ_PRECHANGE, freqs);
366
367 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
368 break;
369
370 case CPUFREQ_POSTCHANGE:
371 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
372 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
373 cpumask_pr_args(policy->cpus));
374
375 for_each_cpu(cpu, policy->cpus)
376 trace_cpu_frequency(freqs->new, cpu);
377
378 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
379 CPUFREQ_POSTCHANGE, freqs);
380
381 cpufreq_stats_record_transition(policy, freqs->new);
382 policy->cur = freqs->new;
383 }
384}
385
386
387static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
388 struct cpufreq_freqs *freqs, int transition_failed)
389{
390 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
391 if (!transition_failed)
392 return;
393
394 swap(freqs->old, freqs->new);
395 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
396 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
397}
398
399void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
400 struct cpufreq_freqs *freqs)
401{
402
403
404
405
406
407
408
409
410
411 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
412 && current == policy->transition_task);
413
414wait:
415 wait_event(policy->transition_wait, !policy->transition_ongoing);
416
417 spin_lock(&policy->transition_lock);
418
419 if (unlikely(policy->transition_ongoing)) {
420 spin_unlock(&policy->transition_lock);
421 goto wait;
422 }
423
424 policy->transition_ongoing = true;
425 policy->transition_task = current;
426
427 spin_unlock(&policy->transition_lock);
428
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430}
431EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
432
433void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs, int transition_failed)
435{
436 if (WARN_ON(!policy->transition_ongoing))
437 return;
438
439 cpufreq_notify_post_transition(policy, freqs, transition_failed);
440
441 policy->transition_ongoing = false;
442 policy->transition_task = NULL;
443
444 wake_up(&policy->transition_wait);
445}
446EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
447
448
449
450
451
452static int cpufreq_fast_switch_count;
453static DEFINE_MUTEX(cpufreq_fast_switch_lock);
454
455static void cpufreq_list_transition_notifiers(void)
456{
457 struct notifier_block *nb;
458
459 pr_info("Registered transition notifiers:\n");
460
461 mutex_lock(&cpufreq_transition_notifier_list.mutex);
462
463 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
464 pr_info("%pS\n", nb->notifier_call);
465
466 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
481{
482 lockdep_assert_held(&policy->rwsem);
483
484 if (!policy->fast_switch_possible)
485 return;
486
487 mutex_lock(&cpufreq_fast_switch_lock);
488 if (cpufreq_fast_switch_count >= 0) {
489 cpufreq_fast_switch_count++;
490 policy->fast_switch_enabled = true;
491 } else {
492 pr_warn("CPU%u: Fast frequency switching not enabled\n",
493 policy->cpu);
494 cpufreq_list_transition_notifiers();
495 }
496 mutex_unlock(&cpufreq_fast_switch_lock);
497}
498EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
499
500
501
502
503
504void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
505{
506 mutex_lock(&cpufreq_fast_switch_lock);
507 if (policy->fast_switch_enabled) {
508 policy->fast_switch_enabled = false;
509 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
510 cpufreq_fast_switch_count--;
511 }
512 mutex_unlock(&cpufreq_fast_switch_lock);
513}
514EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
515
516
517
518
519
520
521
522
523
524
525
526unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
527 unsigned int target_freq)
528{
529 target_freq = clamp_val(target_freq, policy->min, policy->max);
530 policy->cached_target_freq = target_freq;
531
532 if (cpufreq_driver->target_index) {
533 int idx;
534
535 idx = cpufreq_frequency_table_target(policy, target_freq,
536 CPUFREQ_RELATION_L);
537 policy->cached_resolved_idx = idx;
538 return policy->freq_table[idx].frequency;
539 }
540
541 if (cpufreq_driver->resolve_freq)
542 return cpufreq_driver->resolve_freq(policy, target_freq);
543
544 return target_freq;
545}
546EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
547
548unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
549{
550 unsigned int latency;
551
552 if (policy->transition_delay_us)
553 return policy->transition_delay_us;
554
555 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
556 if (latency) {
557
558
559
560
561
562
563
564
565
566
567 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
568 }
569
570 return LATENCY_MULTIPLIER;
571}
572EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
573
574
575
576
577static ssize_t show_boost(struct kobject *kobj,
578 struct kobj_attribute *attr, char *buf)
579{
580 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
581}
582
583static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
584 const char *buf, size_t count)
585{
586 int ret, enable;
587
588 ret = sscanf(buf, "%d", &enable);
589 if (ret != 1 || enable < 0 || enable > 1)
590 return -EINVAL;
591
592 if (cpufreq_boost_trigger_state(enable)) {
593 pr_err("%s: Cannot %s BOOST!\n",
594 __func__, enable ? "enable" : "disable");
595 return -EINVAL;
596 }
597
598 pr_debug("%s: cpufreq BOOST %s\n",
599 __func__, enable ? "enabled" : "disabled");
600
601 return count;
602}
603define_one_global_rw(boost);
604
605static struct cpufreq_governor *find_governor(const char *str_governor)
606{
607 struct cpufreq_governor *t;
608
609 for_each_governor(t)
610 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
611 return t;
612
613 return NULL;
614}
615
616static int cpufreq_parse_policy(char *str_governor,
617 struct cpufreq_policy *policy)
618{
619 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
620 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
621 return 0;
622 }
623 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
624 policy->policy = CPUFREQ_POLICY_POWERSAVE;
625 return 0;
626 }
627 return -EINVAL;
628}
629
630
631
632
633static int cpufreq_parse_governor(char *str_governor,
634 struct cpufreq_policy *policy)
635{
636 struct cpufreq_governor *t;
637
638 mutex_lock(&cpufreq_governor_mutex);
639
640 t = find_governor(str_governor);
641 if (!t) {
642 int ret;
643
644 mutex_unlock(&cpufreq_governor_mutex);
645
646 ret = request_module("cpufreq_%s", str_governor);
647 if (ret)
648 return -EINVAL;
649
650 mutex_lock(&cpufreq_governor_mutex);
651
652 t = find_governor(str_governor);
653 }
654 if (t && !try_module_get(t->owner))
655 t = NULL;
656
657 mutex_unlock(&cpufreq_governor_mutex);
658
659 if (t) {
660 policy->governor = t;
661 return 0;
662 }
663
664 return -EINVAL;
665}
666
667
668
669
670
671
672
673
674
675#define show_one(file_name, object) \
676static ssize_t show_##file_name \
677(struct cpufreq_policy *policy, char *buf) \
678{ \
679 return sprintf(buf, "%u\n", policy->object); \
680}
681
682show_one(cpuinfo_min_freq, cpuinfo.min_freq);
683show_one(cpuinfo_max_freq, cpuinfo.max_freq);
684show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
685show_one(scaling_min_freq, min);
686show_one(scaling_max_freq, max);
687
688__weak unsigned int arch_freq_get_on_cpu(int cpu)
689{
690 return 0;
691}
692
693static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
694{
695 ssize_t ret;
696 unsigned int freq;
697
698 freq = arch_freq_get_on_cpu(policy->cpu);
699 if (freq)
700 ret = sprintf(buf, "%u\n", freq);
701 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
702 cpufreq_driver->get)
703 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
704 else
705 ret = sprintf(buf, "%u\n", policy->cur);
706 return ret;
707}
708
709
710
711
712#define store_one(file_name, object) \
713static ssize_t store_##file_name \
714(struct cpufreq_policy *policy, const char *buf, size_t count) \
715{ \
716 unsigned long val; \
717 int ret; \
718 \
719 ret = sscanf(buf, "%lu", &val); \
720 if (ret != 1) \
721 return -EINVAL; \
722 \
723 ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
724 return ret >= 0 ? count : ret; \
725}
726
727store_one(scaling_min_freq, min);
728store_one(scaling_max_freq, max);
729
730
731
732
733static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
734 char *buf)
735{
736 unsigned int cur_freq = __cpufreq_get(policy);
737
738 if (cur_freq)
739 return sprintf(buf, "%u\n", cur_freq);
740
741 return sprintf(buf, "<unknown>\n");
742}
743
744
745
746
747static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
748{
749 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
750 return sprintf(buf, "powersave\n");
751 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
752 return sprintf(buf, "performance\n");
753 else if (policy->governor)
754 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
755 policy->governor->name);
756 return -EINVAL;
757}
758
759
760
761
762static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
763 const char *buf, size_t count)
764{
765 int ret;
766 char str_governor[16];
767 struct cpufreq_policy new_policy;
768
769 memcpy(&new_policy, policy, sizeof(*policy));
770
771 ret = sscanf(buf, "%15s", str_governor);
772 if (ret != 1)
773 return -EINVAL;
774
775 if (cpufreq_driver->setpolicy) {
776 if (cpufreq_parse_policy(str_governor, &new_policy))
777 return -EINVAL;
778 } else {
779 if (cpufreq_parse_governor(str_governor, &new_policy))
780 return -EINVAL;
781 }
782
783 ret = cpufreq_set_policy(policy, &new_policy);
784
785 if (new_policy.governor)
786 module_put(new_policy.governor->owner);
787
788 return ret ? ret : count;
789}
790
791
792
793
794static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
795{
796 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
797}
798
799
800
801
802static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
803 char *buf)
804{
805 ssize_t i = 0;
806 struct cpufreq_governor *t;
807
808 if (!has_target()) {
809 i += sprintf(buf, "performance powersave");
810 goto out;
811 }
812
813 for_each_governor(t) {
814 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
815 - (CPUFREQ_NAME_LEN + 2)))
816 goto out;
817 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
818 }
819out:
820 i += sprintf(&buf[i], "\n");
821 return i;
822}
823
824ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
825{
826 ssize_t i = 0;
827 unsigned int cpu;
828
829 for_each_cpu(cpu, mask) {
830 if (i)
831 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
832 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
833 if (i >= (PAGE_SIZE - 5))
834 break;
835 }
836 i += sprintf(&buf[i], "\n");
837 return i;
838}
839EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
840
841
842
843
844
845static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
846{
847 return cpufreq_show_cpus(policy->related_cpus, buf);
848}
849
850
851
852
853static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
854{
855 return cpufreq_show_cpus(policy->cpus, buf);
856}
857
858static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
859 const char *buf, size_t count)
860{
861 unsigned int freq = 0;
862 unsigned int ret;
863
864 if (!policy->governor || !policy->governor->store_setspeed)
865 return -EINVAL;
866
867 ret = sscanf(buf, "%u", &freq);
868 if (ret != 1)
869 return -EINVAL;
870
871 policy->governor->store_setspeed(policy, freq);
872
873 return count;
874}
875
876static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
877{
878 if (!policy->governor || !policy->governor->show_setspeed)
879 return sprintf(buf, "<unsupported>\n");
880
881 return policy->governor->show_setspeed(policy, buf);
882}
883
884
885
886
887static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
888{
889 unsigned int limit;
890 int ret;
891 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
892 if (!ret)
893 return sprintf(buf, "%u\n", limit);
894 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
895}
896
897cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
898cpufreq_freq_attr_ro(cpuinfo_min_freq);
899cpufreq_freq_attr_ro(cpuinfo_max_freq);
900cpufreq_freq_attr_ro(cpuinfo_transition_latency);
901cpufreq_freq_attr_ro(scaling_available_governors);
902cpufreq_freq_attr_ro(scaling_driver);
903cpufreq_freq_attr_ro(scaling_cur_freq);
904cpufreq_freq_attr_ro(bios_limit);
905cpufreq_freq_attr_ro(related_cpus);
906cpufreq_freq_attr_ro(affected_cpus);
907cpufreq_freq_attr_rw(scaling_min_freq);
908cpufreq_freq_attr_rw(scaling_max_freq);
909cpufreq_freq_attr_rw(scaling_governor);
910cpufreq_freq_attr_rw(scaling_setspeed);
911
912static struct attribute *default_attrs[] = {
913 &cpuinfo_min_freq.attr,
914 &cpuinfo_max_freq.attr,
915 &cpuinfo_transition_latency.attr,
916 &scaling_min_freq.attr,
917 &scaling_max_freq.attr,
918 &affected_cpus.attr,
919 &related_cpus.attr,
920 &scaling_governor.attr,
921 &scaling_driver.attr,
922 &scaling_available_governors.attr,
923 &scaling_setspeed.attr,
924 NULL
925};
926
927#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
928#define to_attr(a) container_of(a, struct freq_attr, attr)
929
930static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
931{
932 struct cpufreq_policy *policy = to_policy(kobj);
933 struct freq_attr *fattr = to_attr(attr);
934 ssize_t ret;
935
936 down_read(&policy->rwsem);
937 ret = fattr->show(policy, buf);
938 up_read(&policy->rwsem);
939
940 return ret;
941}
942
943static ssize_t store(struct kobject *kobj, struct attribute *attr,
944 const char *buf, size_t count)
945{
946 struct cpufreq_policy *policy = to_policy(kobj);
947 struct freq_attr *fattr = to_attr(attr);
948 ssize_t ret = -EINVAL;
949
950
951
952
953
954 if (!cpus_read_trylock())
955 return -EBUSY;
956
957 if (cpu_online(policy->cpu)) {
958 down_write(&policy->rwsem);
959 ret = fattr->store(policy, buf, count);
960 up_write(&policy->rwsem);
961 }
962
963 cpus_read_unlock();
964
965 return ret;
966}
967
968static void cpufreq_sysfs_release(struct kobject *kobj)
969{
970 struct cpufreq_policy *policy = to_policy(kobj);
971 pr_debug("last reference is dropped\n");
972 complete(&policy->kobj_unregister);
973}
974
975static const struct sysfs_ops sysfs_ops = {
976 .show = show,
977 .store = store,
978};
979
980static struct kobj_type ktype_cpufreq = {
981 .sysfs_ops = &sysfs_ops,
982 .default_attrs = default_attrs,
983 .release = cpufreq_sysfs_release,
984};
985
986static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
987{
988 struct device *dev = get_cpu_device(cpu);
989
990 if (unlikely(!dev))
991 return;
992
993 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
994 return;
995
996 dev_dbg(dev, "%s: Adding symlink\n", __func__);
997 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
998 dev_err(dev, "cpufreq symlink creation failed\n");
999}
1000
1001static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1002 struct device *dev)
1003{
1004 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1005 sysfs_remove_link(&dev->kobj, "cpufreq");
1006}
1007
1008static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1009{
1010 struct freq_attr **drv_attr;
1011 int ret = 0;
1012
1013
1014 drv_attr = cpufreq_driver->attr;
1015 while (drv_attr && *drv_attr) {
1016 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1017 if (ret)
1018 return ret;
1019 drv_attr++;
1020 }
1021 if (cpufreq_driver->get) {
1022 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1023 if (ret)
1024 return ret;
1025 }
1026
1027 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1028 if (ret)
1029 return ret;
1030
1031 if (cpufreq_driver->bios_limit) {
1032 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1033 if (ret)
1034 return ret;
1035 }
1036
1037 return 0;
1038}
1039
1040__weak struct cpufreq_governor *cpufreq_default_governor(void)
1041{
1042 return NULL;
1043}
1044
1045static int cpufreq_init_policy(struct cpufreq_policy *policy)
1046{
1047 struct cpufreq_governor *gov = NULL, *def_gov = NULL;
1048 struct cpufreq_policy new_policy;
1049
1050 memcpy(&new_policy, policy, sizeof(*policy));
1051
1052 def_gov = cpufreq_default_governor();
1053
1054 if (has_target()) {
1055
1056
1057
1058
1059 gov = find_governor(policy->last_governor);
1060 if (gov) {
1061 pr_debug("Restoring governor %s for cpu %d\n",
1062 policy->governor->name, policy->cpu);
1063 } else {
1064 if (!def_gov)
1065 return -ENODATA;
1066 gov = def_gov;
1067 }
1068 new_policy.governor = gov;
1069 } else {
1070
1071 if (policy->last_policy) {
1072 new_policy.policy = policy->last_policy;
1073 } else {
1074 if (!def_gov)
1075 return -ENODATA;
1076 cpufreq_parse_policy(def_gov->name, &new_policy);
1077 }
1078 }
1079
1080 return cpufreq_set_policy(policy, &new_policy);
1081}
1082
1083static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1084{
1085 int ret = 0;
1086
1087
1088 if (cpumask_test_cpu(cpu, policy->cpus))
1089 return 0;
1090
1091 down_write(&policy->rwsem);
1092 if (has_target())
1093 cpufreq_stop_governor(policy);
1094
1095 cpumask_set_cpu(cpu, policy->cpus);
1096
1097 if (has_target()) {
1098 ret = cpufreq_start_governor(policy);
1099 if (ret)
1100 pr_err("%s: Failed to start governor\n", __func__);
1101 }
1102 up_write(&policy->rwsem);
1103 return ret;
1104}
1105
1106void refresh_frequency_limits(struct cpufreq_policy *policy)
1107{
1108 struct cpufreq_policy new_policy;
1109
1110 if (!policy_is_inactive(policy)) {
1111 new_policy = *policy;
1112 pr_debug("updating policy for CPU %u\n", policy->cpu);
1113
1114 cpufreq_set_policy(policy, &new_policy);
1115 }
1116}
1117EXPORT_SYMBOL(refresh_frequency_limits);
1118
1119static void handle_update(struct work_struct *work)
1120{
1121 struct cpufreq_policy *policy =
1122 container_of(work, struct cpufreq_policy, update);
1123
1124 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1125 down_write(&policy->rwsem);
1126 refresh_frequency_limits(policy);
1127 up_write(&policy->rwsem);
1128}
1129
1130static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1131 void *data)
1132{
1133 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1134
1135 schedule_work(&policy->update);
1136 return 0;
1137}
1138
1139static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1140 void *data)
1141{
1142 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1143
1144 schedule_work(&policy->update);
1145 return 0;
1146}
1147
1148static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1149{
1150 struct kobject *kobj;
1151 struct completion *cmp;
1152
1153 down_write(&policy->rwsem);
1154 cpufreq_stats_free_table(policy);
1155 kobj = &policy->kobj;
1156 cmp = &policy->kobj_unregister;
1157 up_write(&policy->rwsem);
1158 kobject_put(kobj);
1159
1160
1161
1162
1163
1164
1165 pr_debug("waiting for dropping of refcount\n");
1166 wait_for_completion(cmp);
1167 pr_debug("wait complete\n");
1168}
1169
1170static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1171{
1172 struct cpufreq_policy *policy;
1173 struct device *dev = get_cpu_device(cpu);
1174 int ret;
1175
1176 if (!dev)
1177 return NULL;
1178
1179 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1180 if (!policy)
1181 return NULL;
1182
1183 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1184 goto err_free_policy;
1185
1186 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1187 goto err_free_cpumask;
1188
1189 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1190 goto err_free_rcpumask;
1191
1192 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1193 cpufreq_global_kobject, "policy%u", cpu);
1194 if (ret) {
1195 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1196
1197
1198
1199
1200
1201 kobject_put(&policy->kobj);
1202 goto err_free_real_cpus;
1203 }
1204
1205 policy->nb_min.notifier_call = cpufreq_notifier_min;
1206 policy->nb_max.notifier_call = cpufreq_notifier_max;
1207
1208 ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
1209 DEV_PM_QOS_MIN_FREQUENCY);
1210 if (ret) {
1211 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1212 ret, cpumask_pr_args(policy->cpus));
1213 goto err_kobj_remove;
1214 }
1215
1216 ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
1217 DEV_PM_QOS_MAX_FREQUENCY);
1218 if (ret) {
1219 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1220 ret, cpumask_pr_args(policy->cpus));
1221 goto err_min_qos_notifier;
1222 }
1223
1224 INIT_LIST_HEAD(&policy->policy_list);
1225 init_rwsem(&policy->rwsem);
1226 spin_lock_init(&policy->transition_lock);
1227 init_waitqueue_head(&policy->transition_wait);
1228 init_completion(&policy->kobj_unregister);
1229 INIT_WORK(&policy->update, handle_update);
1230
1231 policy->cpu = cpu;
1232 return policy;
1233
1234err_min_qos_notifier:
1235 dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1236 DEV_PM_QOS_MIN_FREQUENCY);
1237err_kobj_remove:
1238 cpufreq_policy_put_kobj(policy);
1239err_free_real_cpus:
1240 free_cpumask_var(policy->real_cpus);
1241err_free_rcpumask:
1242 free_cpumask_var(policy->related_cpus);
1243err_free_cpumask:
1244 free_cpumask_var(policy->cpus);
1245err_free_policy:
1246 kfree(policy);
1247
1248 return NULL;
1249}
1250
1251static void cpufreq_policy_free(struct cpufreq_policy *policy)
1252{
1253 struct device *dev = get_cpu_device(policy->cpu);
1254 unsigned long flags;
1255 int cpu;
1256
1257
1258 write_lock_irqsave(&cpufreq_driver_lock, flags);
1259 list_del(&policy->policy_list);
1260
1261 for_each_cpu(cpu, policy->related_cpus)
1262 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1263 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1264
1265 dev_pm_qos_remove_notifier(dev, &policy->nb_max,
1266 DEV_PM_QOS_MAX_FREQUENCY);
1267 dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1268 DEV_PM_QOS_MIN_FREQUENCY);
1269 dev_pm_qos_remove_request(policy->max_freq_req);
1270 dev_pm_qos_remove_request(policy->min_freq_req);
1271 kfree(policy->min_freq_req);
1272
1273 cpufreq_policy_put_kobj(policy);
1274 free_cpumask_var(policy->real_cpus);
1275 free_cpumask_var(policy->related_cpus);
1276 free_cpumask_var(policy->cpus);
1277 kfree(policy);
1278}
1279
1280static int cpufreq_online(unsigned int cpu)
1281{
1282 struct cpufreq_policy *policy;
1283 bool new_policy;
1284 unsigned long flags;
1285 unsigned int j;
1286 int ret;
1287
1288 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1289
1290
1291 policy = per_cpu(cpufreq_cpu_data, cpu);
1292 if (policy) {
1293 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1294 if (!policy_is_inactive(policy))
1295 return cpufreq_add_policy_cpu(policy, cpu);
1296
1297
1298 new_policy = false;
1299 down_write(&policy->rwsem);
1300 policy->cpu = cpu;
1301 policy->governor = NULL;
1302 up_write(&policy->rwsem);
1303 } else {
1304 new_policy = true;
1305 policy = cpufreq_policy_alloc(cpu);
1306 if (!policy)
1307 return -ENOMEM;
1308 }
1309
1310 if (!new_policy && cpufreq_driver->online) {
1311 ret = cpufreq_driver->online(policy);
1312 if (ret) {
1313 pr_debug("%s: %d: initialization failed\n", __func__,
1314 __LINE__);
1315 goto out_exit_policy;
1316 }
1317
1318
1319 cpumask_copy(policy->cpus, policy->related_cpus);
1320 } else {
1321 cpumask_copy(policy->cpus, cpumask_of(cpu));
1322
1323
1324
1325
1326
1327 ret = cpufreq_driver->init(policy);
1328 if (ret) {
1329 pr_debug("%s: %d: initialization failed\n", __func__,
1330 __LINE__);
1331 goto out_free_policy;
1332 }
1333
1334 ret = cpufreq_table_validate_and_sort(policy);
1335 if (ret)
1336 goto out_exit_policy;
1337
1338
1339 cpumask_copy(policy->related_cpus, policy->cpus);
1340 }
1341
1342 down_write(&policy->rwsem);
1343
1344
1345
1346
1347 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1348
1349 if (new_policy) {
1350 struct device *dev = get_cpu_device(cpu);
1351
1352 for_each_cpu(j, policy->related_cpus) {
1353 per_cpu(cpufreq_cpu_data, j) = policy;
1354 add_cpu_dev_symlink(policy, j);
1355 }
1356
1357 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1358 GFP_KERNEL);
1359 if (!policy->min_freq_req)
1360 goto out_destroy_policy;
1361
1362 ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
1363 DEV_PM_QOS_MIN_FREQUENCY,
1364 policy->min);
1365 if (ret < 0) {
1366
1367
1368
1369
1370 kfree(policy->min_freq_req);
1371 policy->min_freq_req = NULL;
1372
1373 dev_err(dev, "Failed to add min-freq constraint (%d)\n",
1374 ret);
1375 goto out_destroy_policy;
1376 }
1377
1378
1379
1380
1381
1382
1383 policy->max_freq_req = policy->min_freq_req + 1;
1384
1385 ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
1386 DEV_PM_QOS_MAX_FREQUENCY,
1387 policy->max);
1388 if (ret < 0) {
1389 policy->max_freq_req = NULL;
1390 dev_err(dev, "Failed to add max-freq constraint (%d)\n",
1391 ret);
1392 goto out_destroy_policy;
1393 }
1394 }
1395
1396 if (cpufreq_driver->get && has_target()) {
1397 policy->cur = cpufreq_driver->get(policy->cpu);
1398 if (!policy->cur) {
1399 pr_err("%s: ->get() failed\n", __func__);
1400 goto out_destroy_policy;
1401 }
1402 }
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1423 && has_target()) {
1424
1425 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1426 if (ret == -EINVAL) {
1427
1428 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1429 __func__, policy->cpu, policy->cur);
1430 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1431 CPUFREQ_RELATION_L);
1432
1433
1434
1435
1436
1437
1438 BUG_ON(ret);
1439 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1440 __func__, policy->cpu, policy->cur);
1441 }
1442 }
1443
1444 if (new_policy) {
1445 ret = cpufreq_add_dev_interface(policy);
1446 if (ret)
1447 goto out_destroy_policy;
1448
1449 cpufreq_stats_create_table(policy);
1450
1451 write_lock_irqsave(&cpufreq_driver_lock, flags);
1452 list_add(&policy->policy_list, &cpufreq_policy_list);
1453 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1454 }
1455
1456 ret = cpufreq_init_policy(policy);
1457 if (ret) {
1458 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1459 __func__, cpu, ret);
1460 goto out_destroy_policy;
1461 }
1462
1463 up_write(&policy->rwsem);
1464
1465 kobject_uevent(&policy->kobj, KOBJ_ADD);
1466
1467
1468 if (cpufreq_driver->ready)
1469 cpufreq_driver->ready(policy);
1470
1471 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1472 policy->cdev = of_cpufreq_cooling_register(policy);
1473
1474 pr_debug("initialization complete\n");
1475
1476 return 0;
1477
1478out_destroy_policy:
1479 for_each_cpu(j, policy->real_cpus)
1480 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1481
1482 up_write(&policy->rwsem);
1483
1484out_exit_policy:
1485 if (cpufreq_driver->exit)
1486 cpufreq_driver->exit(policy);
1487
1488out_free_policy:
1489 cpufreq_policy_free(policy);
1490 return ret;
1491}
1492
1493
1494
1495
1496
1497
1498static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1499{
1500 struct cpufreq_policy *policy;
1501 unsigned cpu = dev->id;
1502 int ret;
1503
1504 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1505
1506 if (cpu_online(cpu)) {
1507 ret = cpufreq_online(cpu);
1508 if (ret)
1509 return ret;
1510 }
1511
1512
1513 policy = per_cpu(cpufreq_cpu_data, cpu);
1514 if (policy)
1515 add_cpu_dev_symlink(policy, cpu);
1516
1517 return 0;
1518}
1519
1520static int cpufreq_offline(unsigned int cpu)
1521{
1522 struct cpufreq_policy *policy;
1523 int ret;
1524
1525 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1526
1527 policy = cpufreq_cpu_get_raw(cpu);
1528 if (!policy) {
1529 pr_debug("%s: No cpu_data found\n", __func__);
1530 return 0;
1531 }
1532
1533 down_write(&policy->rwsem);
1534 if (has_target())
1535 cpufreq_stop_governor(policy);
1536
1537 cpumask_clear_cpu(cpu, policy->cpus);
1538
1539 if (policy_is_inactive(policy)) {
1540 if (has_target())
1541 strncpy(policy->last_governor, policy->governor->name,
1542 CPUFREQ_NAME_LEN);
1543 else
1544 policy->last_policy = policy->policy;
1545 } else if (cpu == policy->cpu) {
1546
1547 policy->cpu = cpumask_any(policy->cpus);
1548 }
1549
1550
1551 if (!policy_is_inactive(policy)) {
1552 if (has_target()) {
1553 ret = cpufreq_start_governor(policy);
1554 if (ret)
1555 pr_err("%s: Failed to start governor\n", __func__);
1556 }
1557
1558 goto unlock;
1559 }
1560
1561 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1562 cpufreq_cooling_unregister(policy->cdev);
1563 policy->cdev = NULL;
1564 }
1565
1566 if (cpufreq_driver->stop_cpu)
1567 cpufreq_driver->stop_cpu(policy);
1568
1569 if (has_target())
1570 cpufreq_exit_governor(policy);
1571
1572
1573
1574
1575
1576 if (cpufreq_driver->offline) {
1577 cpufreq_driver->offline(policy);
1578 } else if (cpufreq_driver->exit) {
1579 cpufreq_driver->exit(policy);
1580 policy->freq_table = NULL;
1581 }
1582
1583unlock:
1584 up_write(&policy->rwsem);
1585 return 0;
1586}
1587
1588
1589
1590
1591
1592
1593static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1594{
1595 unsigned int cpu = dev->id;
1596 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1597
1598 if (!policy)
1599 return;
1600
1601 if (cpu_online(cpu))
1602 cpufreq_offline(cpu);
1603
1604 cpumask_clear_cpu(cpu, policy->real_cpus);
1605 remove_cpu_dev_symlink(policy, dev);
1606
1607 if (cpumask_empty(policy->real_cpus)) {
1608
1609 if (cpufreq_driver->offline)
1610 cpufreq_driver->exit(policy);
1611
1612 cpufreq_policy_free(policy);
1613 }
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1626 unsigned int new_freq)
1627{
1628 struct cpufreq_freqs freqs;
1629
1630 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1631 policy->cur, new_freq);
1632
1633 freqs.old = policy->cur;
1634 freqs.new = new_freq;
1635
1636 cpufreq_freq_transition_begin(policy, &freqs);
1637 cpufreq_freq_transition_end(policy, &freqs, 0);
1638}
1639
1640static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1641{
1642 unsigned int new_freq;
1643
1644 new_freq = cpufreq_driver->get(policy->cpu);
1645 if (!new_freq)
1646 return 0;
1647
1648
1649
1650
1651
1652 if (policy->fast_switch_enabled || !has_target())
1653 return new_freq;
1654
1655 if (policy->cur != new_freq) {
1656 cpufreq_out_of_sync(policy, new_freq);
1657 if (update)
1658 schedule_work(&policy->update);
1659 }
1660
1661 return new_freq;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671unsigned int cpufreq_quick_get(unsigned int cpu)
1672{
1673 struct cpufreq_policy *policy;
1674 unsigned int ret_freq = 0;
1675 unsigned long flags;
1676
1677 read_lock_irqsave(&cpufreq_driver_lock, flags);
1678
1679 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1680 ret_freq = cpufreq_driver->get(cpu);
1681 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1682 return ret_freq;
1683 }
1684
1685 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1686
1687 policy = cpufreq_cpu_get(cpu);
1688 if (policy) {
1689 ret_freq = policy->cur;
1690 cpufreq_cpu_put(policy);
1691 }
1692
1693 return ret_freq;
1694}
1695EXPORT_SYMBOL(cpufreq_quick_get);
1696
1697
1698
1699
1700
1701
1702
1703unsigned int cpufreq_quick_get_max(unsigned int cpu)
1704{
1705 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1706 unsigned int ret_freq = 0;
1707
1708 if (policy) {
1709 ret_freq = policy->max;
1710 cpufreq_cpu_put(policy);
1711 }
1712
1713 return ret_freq;
1714}
1715EXPORT_SYMBOL(cpufreq_quick_get_max);
1716
1717static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1718{
1719 if (unlikely(policy_is_inactive(policy)))
1720 return 0;
1721
1722 return cpufreq_verify_current_freq(policy, true);
1723}
1724
1725
1726
1727
1728
1729
1730
1731unsigned int cpufreq_get(unsigned int cpu)
1732{
1733 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1734 unsigned int ret_freq = 0;
1735
1736 if (policy) {
1737 down_read(&policy->rwsem);
1738 if (cpufreq_driver->get)
1739 ret_freq = __cpufreq_get(policy);
1740 up_read(&policy->rwsem);
1741
1742 cpufreq_cpu_put(policy);
1743 }
1744
1745 return ret_freq;
1746}
1747EXPORT_SYMBOL(cpufreq_get);
1748
1749static struct subsys_interface cpufreq_interface = {
1750 .name = "cpufreq",
1751 .subsys = &cpu_subsys,
1752 .add_dev = cpufreq_add_dev,
1753 .remove_dev = cpufreq_remove_dev,
1754};
1755
1756
1757
1758
1759
1760int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1761{
1762 int ret;
1763
1764 if (!policy->suspend_freq) {
1765 pr_debug("%s: suspend_freq not defined\n", __func__);
1766 return 0;
1767 }
1768
1769 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1770 policy->suspend_freq);
1771
1772 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1773 CPUFREQ_RELATION_H);
1774 if (ret)
1775 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1776 __func__, policy->suspend_freq, ret);
1777
1778 return ret;
1779}
1780EXPORT_SYMBOL(cpufreq_generic_suspend);
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790void cpufreq_suspend(void)
1791{
1792 struct cpufreq_policy *policy;
1793
1794 if (!cpufreq_driver)
1795 return;
1796
1797 if (!has_target() && !cpufreq_driver->suspend)
1798 goto suspend;
1799
1800 pr_debug("%s: Suspending Governors\n", __func__);
1801
1802 for_each_active_policy(policy) {
1803 if (has_target()) {
1804 down_write(&policy->rwsem);
1805 cpufreq_stop_governor(policy);
1806 up_write(&policy->rwsem);
1807 }
1808
1809 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1810 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1811 policy);
1812 }
1813
1814suspend:
1815 cpufreq_suspended = true;
1816}
1817
1818
1819
1820
1821
1822
1823
1824void cpufreq_resume(void)
1825{
1826 struct cpufreq_policy *policy;
1827 int ret;
1828
1829 if (!cpufreq_driver)
1830 return;
1831
1832 if (unlikely(!cpufreq_suspended))
1833 return;
1834
1835 cpufreq_suspended = false;
1836
1837 if (!has_target() && !cpufreq_driver->resume)
1838 return;
1839
1840 pr_debug("%s: Resuming Governors\n", __func__);
1841
1842 for_each_active_policy(policy) {
1843 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1844 pr_err("%s: Failed to resume driver: %p\n", __func__,
1845 policy);
1846 } else if (has_target()) {
1847 down_write(&policy->rwsem);
1848 ret = cpufreq_start_governor(policy);
1849 up_write(&policy->rwsem);
1850
1851 if (ret)
1852 pr_err("%s: Failed to start governor for policy: %p\n",
1853 __func__, policy);
1854 }
1855 }
1856}
1857
1858
1859
1860
1861
1862
1863
1864const char *cpufreq_get_current_driver(void)
1865{
1866 if (cpufreq_driver)
1867 return cpufreq_driver->name;
1868
1869 return NULL;
1870}
1871EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1872
1873
1874
1875
1876
1877
1878
1879void *cpufreq_get_driver_data(void)
1880{
1881 if (cpufreq_driver)
1882 return cpufreq_driver->driver_data;
1883
1884 return NULL;
1885}
1886EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1906{
1907 int ret;
1908
1909 if (cpufreq_disabled())
1910 return -EINVAL;
1911
1912 switch (list) {
1913 case CPUFREQ_TRANSITION_NOTIFIER:
1914 mutex_lock(&cpufreq_fast_switch_lock);
1915
1916 if (cpufreq_fast_switch_count > 0) {
1917 mutex_unlock(&cpufreq_fast_switch_lock);
1918 return -EBUSY;
1919 }
1920 ret = srcu_notifier_chain_register(
1921 &cpufreq_transition_notifier_list, nb);
1922 if (!ret)
1923 cpufreq_fast_switch_count--;
1924
1925 mutex_unlock(&cpufreq_fast_switch_lock);
1926 break;
1927 case CPUFREQ_POLICY_NOTIFIER:
1928 ret = blocking_notifier_chain_register(
1929 &cpufreq_policy_notifier_list, nb);
1930 break;
1931 default:
1932 ret = -EINVAL;
1933 }
1934
1935 return ret;
1936}
1937EXPORT_SYMBOL(cpufreq_register_notifier);
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1950{
1951 int ret;
1952
1953 if (cpufreq_disabled())
1954 return -EINVAL;
1955
1956 switch (list) {
1957 case CPUFREQ_TRANSITION_NOTIFIER:
1958 mutex_lock(&cpufreq_fast_switch_lock);
1959
1960 ret = srcu_notifier_chain_unregister(
1961 &cpufreq_transition_notifier_list, nb);
1962 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1963 cpufreq_fast_switch_count++;
1964
1965 mutex_unlock(&cpufreq_fast_switch_lock);
1966 break;
1967 case CPUFREQ_POLICY_NOTIFIER:
1968 ret = blocking_notifier_chain_unregister(
1969 &cpufreq_policy_notifier_list, nb);
1970 break;
1971 default:
1972 ret = -EINVAL;
1973 }
1974
1975 return ret;
1976}
1977EXPORT_SYMBOL(cpufreq_unregister_notifier);
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2008 unsigned int target_freq)
2009{
2010 target_freq = clamp_val(target_freq, policy->min, policy->max);
2011
2012 return cpufreq_driver->fast_switch(policy, target_freq);
2013}
2014EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2015
2016
2017static int __target_intermediate(struct cpufreq_policy *policy,
2018 struct cpufreq_freqs *freqs, int index)
2019{
2020 int ret;
2021
2022 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2023
2024
2025 if (!freqs->new)
2026 return 0;
2027
2028 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2029 __func__, policy->cpu, freqs->old, freqs->new);
2030
2031 cpufreq_freq_transition_begin(policy, freqs);
2032 ret = cpufreq_driver->target_intermediate(policy, index);
2033 cpufreq_freq_transition_end(policy, freqs, ret);
2034
2035 if (ret)
2036 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2037 __func__, ret);
2038
2039 return ret;
2040}
2041
2042static int __target_index(struct cpufreq_policy *policy, int index)
2043{
2044 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2045 unsigned int intermediate_freq = 0;
2046 unsigned int newfreq = policy->freq_table[index].frequency;
2047 int retval = -EINVAL;
2048 bool notify;
2049
2050 if (newfreq == policy->cur)
2051 return 0;
2052
2053 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2054 if (notify) {
2055
2056 if (cpufreq_driver->get_intermediate) {
2057 retval = __target_intermediate(policy, &freqs, index);
2058 if (retval)
2059 return retval;
2060
2061 intermediate_freq = freqs.new;
2062
2063 if (intermediate_freq)
2064 freqs.old = freqs.new;
2065 }
2066
2067 freqs.new = newfreq;
2068 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2069 __func__, policy->cpu, freqs.old, freqs.new);
2070
2071 cpufreq_freq_transition_begin(policy, &freqs);
2072 }
2073
2074 retval = cpufreq_driver->target_index(policy, index);
2075 if (retval)
2076 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2077 retval);
2078
2079 if (notify) {
2080 cpufreq_freq_transition_end(policy, &freqs, retval);
2081
2082
2083
2084
2085
2086
2087
2088 if (unlikely(retval && intermediate_freq)) {
2089 freqs.old = intermediate_freq;
2090 freqs.new = policy->restore_freq;
2091 cpufreq_freq_transition_begin(policy, &freqs);
2092 cpufreq_freq_transition_end(policy, &freqs, 0);
2093 }
2094 }
2095
2096 return retval;
2097}
2098
2099int __cpufreq_driver_target(struct cpufreq_policy *policy,
2100 unsigned int target_freq,
2101 unsigned int relation)
2102{
2103 unsigned int old_target_freq = target_freq;
2104 int index;
2105
2106 if (cpufreq_disabled())
2107 return -ENODEV;
2108
2109
2110 target_freq = clamp_val(target_freq, policy->min, policy->max);
2111
2112 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2113 policy->cpu, target_freq, relation, old_target_freq);
2114
2115
2116
2117
2118
2119
2120
2121 if (target_freq == policy->cur)
2122 return 0;
2123
2124
2125 policy->restore_freq = policy->cur;
2126
2127 if (cpufreq_driver->target)
2128 return cpufreq_driver->target(policy, target_freq, relation);
2129
2130 if (!cpufreq_driver->target_index)
2131 return -EINVAL;
2132
2133 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2134
2135 return __target_index(policy, index);
2136}
2137EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2138
2139int cpufreq_driver_target(struct cpufreq_policy *policy,
2140 unsigned int target_freq,
2141 unsigned int relation)
2142{
2143 int ret = -EINVAL;
2144
2145 down_write(&policy->rwsem);
2146
2147 ret = __cpufreq_driver_target(policy, target_freq, relation);
2148
2149 up_write(&policy->rwsem);
2150
2151 return ret;
2152}
2153EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2154
2155__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2156{
2157 return NULL;
2158}
2159
2160static int cpufreq_init_governor(struct cpufreq_policy *policy)
2161{
2162 int ret;
2163
2164
2165 if (cpufreq_suspended)
2166 return 0;
2167
2168
2169
2170
2171 if (!policy->governor)
2172 return -EINVAL;
2173
2174
2175 if (policy->governor->dynamic_switching &&
2176 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2177 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2178
2179 if (gov) {
2180 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2181 policy->governor->name, gov->name);
2182 policy->governor = gov;
2183 } else {
2184 return -EINVAL;
2185 }
2186 }
2187
2188 if (!try_module_get(policy->governor->owner))
2189 return -EINVAL;
2190
2191 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2192
2193 if (policy->governor->init) {
2194 ret = policy->governor->init(policy);
2195 if (ret) {
2196 module_put(policy->governor->owner);
2197 return ret;
2198 }
2199 }
2200
2201 return 0;
2202}
2203
2204static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2205{
2206 if (cpufreq_suspended || !policy->governor)
2207 return;
2208
2209 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2210
2211 if (policy->governor->exit)
2212 policy->governor->exit(policy);
2213
2214 module_put(policy->governor->owner);
2215}
2216
2217static int cpufreq_start_governor(struct cpufreq_policy *policy)
2218{
2219 int ret;
2220
2221 if (cpufreq_suspended)
2222 return 0;
2223
2224 if (!policy->governor)
2225 return -EINVAL;
2226
2227 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2228
2229 if (cpufreq_driver->get)
2230 cpufreq_verify_current_freq(policy, false);
2231
2232 if (policy->governor->start) {
2233 ret = policy->governor->start(policy);
2234 if (ret)
2235 return ret;
2236 }
2237
2238 if (policy->governor->limits)
2239 policy->governor->limits(policy);
2240
2241 return 0;
2242}
2243
2244static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2245{
2246 if (cpufreq_suspended || !policy->governor)
2247 return;
2248
2249 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2250
2251 if (policy->governor->stop)
2252 policy->governor->stop(policy);
2253}
2254
2255static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2256{
2257 if (cpufreq_suspended || !policy->governor)
2258 return;
2259
2260 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2261
2262 if (policy->governor->limits)
2263 policy->governor->limits(policy);
2264}
2265
2266int cpufreq_register_governor(struct cpufreq_governor *governor)
2267{
2268 int err;
2269
2270 if (!governor)
2271 return -EINVAL;
2272
2273 if (cpufreq_disabled())
2274 return -ENODEV;
2275
2276 mutex_lock(&cpufreq_governor_mutex);
2277
2278 err = -EBUSY;
2279 if (!find_governor(governor->name)) {
2280 err = 0;
2281 list_add(&governor->governor_list, &cpufreq_governor_list);
2282 }
2283
2284 mutex_unlock(&cpufreq_governor_mutex);
2285 return err;
2286}
2287EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2288
2289void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2290{
2291 struct cpufreq_policy *policy;
2292 unsigned long flags;
2293
2294 if (!governor)
2295 return;
2296
2297 if (cpufreq_disabled())
2298 return;
2299
2300
2301 read_lock_irqsave(&cpufreq_driver_lock, flags);
2302 for_each_inactive_policy(policy) {
2303 if (!strcmp(policy->last_governor, governor->name)) {
2304 policy->governor = NULL;
2305 strcpy(policy->last_governor, "\0");
2306 }
2307 }
2308 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2309
2310 mutex_lock(&cpufreq_governor_mutex);
2311 list_del(&governor->governor_list);
2312 mutex_unlock(&cpufreq_governor_mutex);
2313}
2314EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2329{
2330 struct cpufreq_policy *cpu_policy;
2331 if (!policy)
2332 return -EINVAL;
2333
2334 cpu_policy = cpufreq_cpu_get(cpu);
2335 if (!cpu_policy)
2336 return -EINVAL;
2337
2338 memcpy(policy, cpu_policy, sizeof(*policy));
2339
2340 cpufreq_cpu_put(cpu_policy);
2341 return 0;
2342}
2343EXPORT_SYMBOL(cpufreq_get_policy);
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362int cpufreq_set_policy(struct cpufreq_policy *policy,
2363 struct cpufreq_policy *new_policy)
2364{
2365 struct cpufreq_governor *old_gov;
2366 struct device *cpu_dev = get_cpu_device(policy->cpu);
2367 int ret;
2368
2369 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2370 new_policy->cpu, new_policy->min, new_policy->max);
2371
2372 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2373
2374
2375
2376
2377
2378 new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
2379 new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
2380
2381
2382 ret = cpufreq_driver->verify(new_policy);
2383 if (ret)
2384 return ret;
2385
2386
2387
2388
2389
2390
2391 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2392 CPUFREQ_ADJUST, new_policy);
2393
2394
2395
2396
2397
2398 ret = cpufreq_driver->verify(new_policy);
2399 if (ret)
2400 return ret;
2401
2402
2403 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2404 CPUFREQ_NOTIFY, new_policy);
2405
2406 policy->min = new_policy->min;
2407 policy->max = new_policy->max;
2408 trace_cpu_frequency_limits(policy);
2409
2410 policy->cached_target_freq = UINT_MAX;
2411
2412 pr_debug("new min and max freqs are %u - %u kHz\n",
2413 policy->min, policy->max);
2414
2415 if (cpufreq_driver->setpolicy) {
2416 policy->policy = new_policy->policy;
2417 pr_debug("setting range\n");
2418 return cpufreq_driver->setpolicy(policy);
2419 }
2420
2421 if (new_policy->governor == policy->governor) {
2422 pr_debug("governor limits update\n");
2423 cpufreq_governor_limits(policy);
2424 return 0;
2425 }
2426
2427 pr_debug("governor switch\n");
2428
2429
2430 old_gov = policy->governor;
2431
2432 if (old_gov) {
2433 cpufreq_stop_governor(policy);
2434 cpufreq_exit_governor(policy);
2435 }
2436
2437
2438 policy->governor = new_policy->governor;
2439 ret = cpufreq_init_governor(policy);
2440 if (!ret) {
2441 ret = cpufreq_start_governor(policy);
2442 if (!ret) {
2443 pr_debug("governor change\n");
2444 sched_cpufreq_governor_change(policy, old_gov);
2445 return 0;
2446 }
2447 cpufreq_exit_governor(policy);
2448 }
2449
2450
2451 pr_debug("starting governor %s failed\n", policy->governor->name);
2452 if (old_gov) {
2453 policy->governor = old_gov;
2454 if (cpufreq_init_governor(policy))
2455 policy->governor = NULL;
2456 else
2457 cpufreq_start_governor(policy);
2458 }
2459
2460 return ret;
2461}
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472void cpufreq_update_policy(unsigned int cpu)
2473{
2474 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2475
2476 if (!policy)
2477 return;
2478
2479
2480
2481
2482
2483 if (cpufreq_driver->get && has_target() &&
2484 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2485 goto unlock;
2486
2487 refresh_frequency_limits(policy);
2488
2489unlock:
2490 cpufreq_cpu_release(policy);
2491}
2492EXPORT_SYMBOL(cpufreq_update_policy);
2493
2494
2495
2496
2497
2498
2499
2500
2501void cpufreq_update_limits(unsigned int cpu)
2502{
2503 if (cpufreq_driver->update_limits)
2504 cpufreq_driver->update_limits(cpu);
2505 else
2506 cpufreq_update_policy(cpu);
2507}
2508EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2509
2510
2511
2512
2513static int cpufreq_boost_set_sw(int state)
2514{
2515 struct cpufreq_policy *policy;
2516 int ret = -EINVAL;
2517
2518 for_each_active_policy(policy) {
2519 if (!policy->freq_table)
2520 continue;
2521
2522 ret = cpufreq_frequency_table_cpuinfo(policy,
2523 policy->freq_table);
2524 if (ret) {
2525 pr_err("%s: Policy frequency update failed\n",
2526 __func__);
2527 break;
2528 }
2529
2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
2531 if (ret < 0)
2532 break;
2533 }
2534
2535 return ret;
2536}
2537
2538int cpufreq_boost_trigger_state(int state)
2539{
2540 unsigned long flags;
2541 int ret = 0;
2542
2543 if (cpufreq_driver->boost_enabled == state)
2544 return 0;
2545
2546 write_lock_irqsave(&cpufreq_driver_lock, flags);
2547 cpufreq_driver->boost_enabled = state;
2548 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2549
2550 ret = cpufreq_driver->set_boost(state);
2551 if (ret) {
2552 write_lock_irqsave(&cpufreq_driver_lock, flags);
2553 cpufreq_driver->boost_enabled = !state;
2554 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2555
2556 pr_err("%s: Cannot %s BOOST\n",
2557 __func__, state ? "enable" : "disable");
2558 }
2559
2560 return ret;
2561}
2562
2563static bool cpufreq_boost_supported(void)
2564{
2565 return cpufreq_driver->set_boost;
2566}
2567
2568static int create_boost_sysfs_file(void)
2569{
2570 int ret;
2571
2572 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2573 if (ret)
2574 pr_err("%s: cannot register global BOOST sysfs file\n",
2575 __func__);
2576
2577 return ret;
2578}
2579
2580static void remove_boost_sysfs_file(void)
2581{
2582 if (cpufreq_boost_supported())
2583 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2584}
2585
2586int cpufreq_enable_boost_support(void)
2587{
2588 if (!cpufreq_driver)
2589 return -EINVAL;
2590
2591 if (cpufreq_boost_supported())
2592 return 0;
2593
2594 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2595
2596
2597 return create_boost_sysfs_file();
2598}
2599EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2600
2601int cpufreq_boost_enabled(void)
2602{
2603 return cpufreq_driver->boost_enabled;
2604}
2605EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2606
2607
2608
2609
2610static enum cpuhp_state hp_online;
2611
2612static int cpuhp_cpufreq_online(unsigned int cpu)
2613{
2614 cpufreq_online(cpu);
2615
2616 return 0;
2617}
2618
2619static int cpuhp_cpufreq_offline(unsigned int cpu)
2620{
2621 cpufreq_offline(cpu);
2622
2623 return 0;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2637{
2638 unsigned long flags;
2639 int ret;
2640
2641 if (cpufreq_disabled())
2642 return -ENODEV;
2643
2644 if (!driver_data || !driver_data->verify || !driver_data->init ||
2645 !(driver_data->setpolicy || driver_data->target_index ||
2646 driver_data->target) ||
2647 (driver_data->setpolicy && (driver_data->target_index ||
2648 driver_data->target)) ||
2649 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2650 (!driver_data->online != !driver_data->offline))
2651 return -EINVAL;
2652
2653 pr_debug("trying to register driver %s\n", driver_data->name);
2654
2655
2656 cpus_read_lock();
2657
2658 write_lock_irqsave(&cpufreq_driver_lock, flags);
2659 if (cpufreq_driver) {
2660 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2661 ret = -EEXIST;
2662 goto out;
2663 }
2664 cpufreq_driver = driver_data;
2665 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2666
2667 if (driver_data->setpolicy)
2668 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2669
2670 if (cpufreq_boost_supported()) {
2671 ret = create_boost_sysfs_file();
2672 if (ret)
2673 goto err_null_driver;
2674 }
2675
2676 ret = subsys_interface_register(&cpufreq_interface);
2677 if (ret)
2678 goto err_boost_unreg;
2679
2680 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2681 list_empty(&cpufreq_policy_list)) {
2682
2683 ret = -ENODEV;
2684 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2685 driver_data->name);
2686 goto err_if_unreg;
2687 }
2688
2689 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2690 "cpufreq:online",
2691 cpuhp_cpufreq_online,
2692 cpuhp_cpufreq_offline);
2693 if (ret < 0)
2694 goto err_if_unreg;
2695 hp_online = ret;
2696 ret = 0;
2697
2698 pr_debug("driver %s up and running\n", driver_data->name);
2699 goto out;
2700
2701err_if_unreg:
2702 subsys_interface_unregister(&cpufreq_interface);
2703err_boost_unreg:
2704 remove_boost_sysfs_file();
2705err_null_driver:
2706 write_lock_irqsave(&cpufreq_driver_lock, flags);
2707 cpufreq_driver = NULL;
2708 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2709out:
2710 cpus_read_unlock();
2711 return ret;
2712}
2713EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2724{
2725 unsigned long flags;
2726
2727 if (!cpufreq_driver || (driver != cpufreq_driver))
2728 return -EINVAL;
2729
2730 pr_debug("unregistering driver %s\n", driver->name);
2731
2732
2733 cpus_read_lock();
2734 subsys_interface_unregister(&cpufreq_interface);
2735 remove_boost_sysfs_file();
2736 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2737
2738 write_lock_irqsave(&cpufreq_driver_lock, flags);
2739
2740 cpufreq_driver = NULL;
2741
2742 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2743 cpus_read_unlock();
2744
2745 return 0;
2746}
2747EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2748
2749
2750
2751
2752
2753static struct syscore_ops cpufreq_syscore_ops = {
2754 .shutdown = cpufreq_suspend,
2755};
2756
2757struct kobject *cpufreq_global_kobject;
2758EXPORT_SYMBOL(cpufreq_global_kobject);
2759
2760static int __init cpufreq_core_init(void)
2761{
2762 if (cpufreq_disabled())
2763 return -ENODEV;
2764
2765 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2766 BUG_ON(!cpufreq_global_kobject);
2767
2768 register_syscore_ops(&cpufreq_syscore_ops);
2769
2770 return 0;
2771}
2772module_param(off, int, 0444);
2773core_initcall(cpufreq_core_init);
2774