1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/cpu.h>
18#include <linux/cpufreq.h>
19#include <linux/cpu_cooling.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/kernel_stat.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/pm_qos.h>
27#include <linux/slab.h>
28#include <linux/suspend.h>
29#include <linux/syscore_ops.h>
30#include <linux/tick.h>
31#include <trace/events/power.h>
32
33static LIST_HEAD(cpufreq_policy_list);
34
35
36#define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
39
40#define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42#define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
44
45#define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
47
48
49static LIST_HEAD(cpufreq_governor_list);
50#define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
52
53
54
55
56
57
58static struct cpufreq_driver *cpufreq_driver;
59static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60static DEFINE_RWLOCK(cpufreq_driver_lock);
61
62
63static bool cpufreq_suspended;
64
65static inline bool has_target(void)
66{
67 return cpufreq_driver->target_index || cpufreq_driver->target;
68}
69
70
71static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
72static int cpufreq_init_governor(struct cpufreq_policy *policy);
73static void cpufreq_exit_governor(struct cpufreq_policy *policy);
74static int cpufreq_start_governor(struct cpufreq_policy *policy);
75static void cpufreq_stop_governor(struct cpufreq_policy *policy);
76static void cpufreq_governor_limits(struct cpufreq_policy *policy);
77static int cpufreq_set_policy(struct cpufreq_policy *policy,
78 struct cpufreq_governor *new_gov,
79 unsigned int new_pol);
80
81
82
83
84
85
86
87
88static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
89SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
90
91static int off __read_mostly;
92static int cpufreq_disabled(void)
93{
94 return off;
95}
96void disable_cpufreq(void)
97{
98 off = 1;
99}
100static DEFINE_MUTEX(cpufreq_governor_mutex);
101
102bool have_governor_per_policy(void)
103{
104 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105}
106EXPORT_SYMBOL_GPL(have_governor_per_policy);
107
108static struct kobject *cpufreq_global_kobject;
109
110struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
111{
112 if (have_governor_per_policy())
113 return &policy->kobj;
114 else
115 return cpufreq_global_kobject;
116}
117EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
118
119static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
120{
121 struct kernel_cpustat kcpustat;
122 u64 cur_wall_time;
123 u64 idle_time;
124 u64 busy_time;
125
126 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
127
128 kcpustat_cpu_fetch(&kcpustat, cpu);
129
130 busy_time = kcpustat.cpustat[CPUTIME_USER];
131 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
132 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
133 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
134 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
135 busy_time += kcpustat.cpustat[CPUTIME_NICE];
136
137 idle_time = cur_wall_time - busy_time;
138 if (wall)
139 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
140
141 return div_u64(idle_time, NSEC_PER_USEC);
142}
143
144u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
145{
146 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
147
148 if (idle_time == -1ULL)
149 return get_cpu_idle_time_jiffy(cpu, wall);
150 else if (!io_busy)
151 idle_time += get_cpu_iowait_time_us(cpu, wall);
152
153 return idle_time;
154}
155EXPORT_SYMBOL_GPL(get_cpu_idle_time);
156
157__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
158 unsigned long max_freq)
159{
160}
161EXPORT_SYMBOL_GPL(arch_set_freq_scale);
162
163
164
165
166
167
168
169
170void cpufreq_generic_init(struct cpufreq_policy *policy,
171 struct cpufreq_frequency_table *table,
172 unsigned int transition_latency)
173{
174 policy->freq_table = table;
175 policy->cpuinfo.transition_latency = transition_latency;
176
177
178
179
180
181 cpumask_setall(policy->cpus);
182}
183EXPORT_SYMBOL_GPL(cpufreq_generic_init);
184
185struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
186{
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
188
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
190}
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
192
193unsigned int cpufreq_generic_get(unsigned int cpu)
194{
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
196
197 if (!policy || IS_ERR(policy->clk)) {
198 pr_err("%s: No %s associated to cpu: %d\n",
199 __func__, policy ? "clk" : "policy", cpu);
200 return 0;
201 }
202
203 return clk_get_rate(policy->clk) / 1000;
204}
205EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206
207
208
209
210
211
212
213
214
215
216
217
218struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
219{
220 struct cpufreq_policy *policy = NULL;
221 unsigned long flags;
222
223 if (WARN_ON(cpu >= nr_cpu_ids))
224 return NULL;
225
226
227 read_lock_irqsave(&cpufreq_driver_lock, flags);
228
229 if (cpufreq_driver) {
230
231 policy = cpufreq_cpu_get_raw(cpu);
232 if (policy)
233 kobject_get(&policy->kobj);
234 }
235
236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
237
238 return policy;
239}
240EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242
243
244
245
246void cpufreq_cpu_put(struct cpufreq_policy *policy)
247{
248 kobject_put(&policy->kobj);
249}
250EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251
252
253
254
255
256void cpufreq_cpu_release(struct cpufreq_policy *policy)
257{
258 if (WARN_ON(!policy))
259 return;
260
261 lockdep_assert_held(&policy->rwsem);
262
263 up_write(&policy->rwsem);
264
265 cpufreq_cpu_put(policy);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
281{
282 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
283
284 if (!policy)
285 return NULL;
286
287 down_write(&policy->rwsem);
288
289 if (policy_is_inactive(policy)) {
290 cpufreq_cpu_release(policy);
291 return NULL;
292 }
293
294 return policy;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
310{
311#ifndef CONFIG_SMP
312 static unsigned long l_p_j_ref;
313 static unsigned int l_p_j_ref_freq;
314
315 if (ci->flags & CPUFREQ_CONST_LOOPS)
316 return;
317
318 if (!l_p_j_ref_freq) {
319 l_p_j_ref = loops_per_jiffy;
320 l_p_j_ref_freq = ci->old;
321 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
322 l_p_j_ref, l_p_j_ref_freq);
323 }
324 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
325 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
326 ci->new);
327 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
328 loops_per_jiffy, ci->new);
329 }
330#endif
331}
332
333
334
335
336
337
338
339
340
341
342
343static void cpufreq_notify_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs,
345 unsigned int state)
346{
347 int cpu;
348
349 BUG_ON(irqs_disabled());
350
351 if (cpufreq_disabled())
352 return;
353
354 freqs->policy = policy;
355 freqs->flags = cpufreq_driver->flags;
356 pr_debug("notification %u of frequency transition to %u kHz\n",
357 state, freqs->new);
358
359 switch (state) {
360 case CPUFREQ_PRECHANGE:
361
362
363
364
365
366 if (policy->cur && policy->cur != freqs->old) {
367 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
368 freqs->old, policy->cur);
369 freqs->old = policy->cur;
370 }
371
372 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
373 CPUFREQ_PRECHANGE, freqs);
374
375 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
376 break;
377
378 case CPUFREQ_POSTCHANGE:
379 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
380 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
381 cpumask_pr_args(policy->cpus));
382
383 for_each_cpu(cpu, policy->cpus)
384 trace_cpu_frequency(freqs->new, cpu);
385
386 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
387 CPUFREQ_POSTCHANGE, freqs);
388
389 cpufreq_stats_record_transition(policy, freqs->new);
390 policy->cur = freqs->new;
391 }
392}
393
394
395static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396 struct cpufreq_freqs *freqs, int transition_failed)
397{
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399 if (!transition_failed)
400 return;
401
402 swap(freqs->old, freqs->new);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405}
406
407void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408 struct cpufreq_freqs *freqs)
409{
410
411
412
413
414
415
416
417
418
419 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
420 && current == policy->transition_task);
421
422wait:
423 wait_event(policy->transition_wait, !policy->transition_ongoing);
424
425 spin_lock(&policy->transition_lock);
426
427 if (unlikely(policy->transition_ongoing)) {
428 spin_unlock(&policy->transition_lock);
429 goto wait;
430 }
431
432 policy->transition_ongoing = true;
433 policy->transition_task = current;
434
435 spin_unlock(&policy->transition_lock);
436
437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
438}
439EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
440
441void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
442 struct cpufreq_freqs *freqs, int transition_failed)
443{
444 if (WARN_ON(!policy->transition_ongoing))
445 return;
446
447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
448
449 policy->transition_ongoing = false;
450 policy->transition_task = NULL;
451
452 wake_up(&policy->transition_wait);
453}
454EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
455
456
457
458
459
460static int cpufreq_fast_switch_count;
461static DEFINE_MUTEX(cpufreq_fast_switch_lock);
462
463static void cpufreq_list_transition_notifiers(void)
464{
465 struct notifier_block *nb;
466
467 pr_info("Registered transition notifiers:\n");
468
469 mutex_lock(&cpufreq_transition_notifier_list.mutex);
470
471 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
472 pr_info("%pS\n", nb->notifier_call);
473
474 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
475}
476
477
478
479
480
481
482
483
484
485
486
487
488void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
489{
490 lockdep_assert_held(&policy->rwsem);
491
492 if (!policy->fast_switch_possible)
493 return;
494
495 mutex_lock(&cpufreq_fast_switch_lock);
496 if (cpufreq_fast_switch_count >= 0) {
497 cpufreq_fast_switch_count++;
498 policy->fast_switch_enabled = true;
499 } else {
500 pr_warn("CPU%u: Fast frequency switching not enabled\n",
501 policy->cpu);
502 cpufreq_list_transition_notifiers();
503 }
504 mutex_unlock(&cpufreq_fast_switch_lock);
505}
506EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
507
508
509
510
511
512void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
513{
514 mutex_lock(&cpufreq_fast_switch_lock);
515 if (policy->fast_switch_enabled) {
516 policy->fast_switch_enabled = false;
517 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
518 cpufreq_fast_switch_count--;
519 }
520 mutex_unlock(&cpufreq_fast_switch_lock);
521}
522EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
523
524
525
526
527
528
529
530
531
532
533
534unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
535 unsigned int target_freq)
536{
537 target_freq = clamp_val(target_freq, policy->min, policy->max);
538 policy->cached_target_freq = target_freq;
539
540 if (cpufreq_driver->target_index) {
541 int idx;
542
543 idx = cpufreq_frequency_table_target(policy, target_freq,
544 CPUFREQ_RELATION_L);
545 policy->cached_resolved_idx = idx;
546 return policy->freq_table[idx].frequency;
547 }
548
549 if (cpufreq_driver->resolve_freq)
550 return cpufreq_driver->resolve_freq(policy, target_freq);
551
552 return target_freq;
553}
554EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
555
556unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
557{
558 unsigned int latency;
559
560 if (policy->transition_delay_us)
561 return policy->transition_delay_us;
562
563 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
564 if (latency) {
565
566
567
568
569
570
571
572
573
574
575 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
576 }
577
578 return LATENCY_MULTIPLIER;
579}
580EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
581
582
583
584
585static ssize_t show_boost(struct kobject *kobj,
586 struct kobj_attribute *attr, char *buf)
587{
588 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
589}
590
591static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
592 const char *buf, size_t count)
593{
594 int ret, enable;
595
596 ret = sscanf(buf, "%d", &enable);
597 if (ret != 1 || enable < 0 || enable > 1)
598 return -EINVAL;
599
600 if (cpufreq_boost_trigger_state(enable)) {
601 pr_err("%s: Cannot %s BOOST!\n",
602 __func__, enable ? "enable" : "disable");
603 return -EINVAL;
604 }
605
606 pr_debug("%s: cpufreq BOOST %s\n",
607 __func__, enable ? "enabled" : "disabled");
608
609 return count;
610}
611define_one_global_rw(boost);
612
613static struct cpufreq_governor *find_governor(const char *str_governor)
614{
615 struct cpufreq_governor *t;
616
617 for_each_governor(t)
618 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
619 return t;
620
621 return NULL;
622}
623
624static unsigned int cpufreq_parse_policy(char *str_governor)
625{
626 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
627 return CPUFREQ_POLICY_PERFORMANCE;
628
629 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
630 return CPUFREQ_POLICY_POWERSAVE;
631
632 return CPUFREQ_POLICY_UNKNOWN;
633}
634
635
636
637
638
639static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
640{
641 struct cpufreq_governor *t;
642
643 mutex_lock(&cpufreq_governor_mutex);
644
645 t = find_governor(str_governor);
646 if (!t) {
647 int ret;
648
649 mutex_unlock(&cpufreq_governor_mutex);
650
651 ret = request_module("cpufreq_%s", str_governor);
652 if (ret)
653 return NULL;
654
655 mutex_lock(&cpufreq_governor_mutex);
656
657 t = find_governor(str_governor);
658 }
659 if (t && !try_module_get(t->owner))
660 t = NULL;
661
662 mutex_unlock(&cpufreq_governor_mutex);
663
664 return t;
665}
666
667
668
669
670
671
672
673
674
675#define show_one(file_name, object) \
676static ssize_t show_##file_name \
677(struct cpufreq_policy *policy, char *buf) \
678{ \
679 return sprintf(buf, "%u\n", policy->object); \
680}
681
682show_one(cpuinfo_min_freq, cpuinfo.min_freq);
683show_one(cpuinfo_max_freq, cpuinfo.max_freq);
684show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
685show_one(scaling_min_freq, min);
686show_one(scaling_max_freq, max);
687
688__weak unsigned int arch_freq_get_on_cpu(int cpu)
689{
690 return 0;
691}
692
693static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
694{
695 ssize_t ret;
696 unsigned int freq;
697
698 freq = arch_freq_get_on_cpu(policy->cpu);
699 if (freq)
700 ret = sprintf(buf, "%u\n", freq);
701 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
702 cpufreq_driver->get)
703 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
704 else
705 ret = sprintf(buf, "%u\n", policy->cur);
706 return ret;
707}
708
709
710
711
712#define store_one(file_name, object) \
713static ssize_t store_##file_name \
714(struct cpufreq_policy *policy, const char *buf, size_t count) \
715{ \
716 unsigned long val; \
717 int ret; \
718 \
719 ret = sscanf(buf, "%lu", &val); \
720 if (ret != 1) \
721 return -EINVAL; \
722 \
723 ret = freq_qos_update_request(policy->object##_freq_req, val);\
724 return ret >= 0 ? count : ret; \
725}
726
727store_one(scaling_min_freq, min);
728store_one(scaling_max_freq, max);
729
730
731
732
733static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
734 char *buf)
735{
736 unsigned int cur_freq = __cpufreq_get(policy);
737
738 if (cur_freq)
739 return sprintf(buf, "%u\n", cur_freq);
740
741 return sprintf(buf, "<unknown>\n");
742}
743
744
745
746
747static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
748{
749 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
750 return sprintf(buf, "powersave\n");
751 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
752 return sprintf(buf, "performance\n");
753 else if (policy->governor)
754 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
755 policy->governor->name);
756 return -EINVAL;
757}
758
759
760
761
762static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
763 const char *buf, size_t count)
764{
765 char str_governor[16];
766 int ret;
767
768 ret = sscanf(buf, "%15s", str_governor);
769 if (ret != 1)
770 return -EINVAL;
771
772 if (cpufreq_driver->setpolicy) {
773 unsigned int new_pol;
774
775 new_pol = cpufreq_parse_policy(str_governor);
776 if (!new_pol)
777 return -EINVAL;
778
779 ret = cpufreq_set_policy(policy, NULL, new_pol);
780 } else {
781 struct cpufreq_governor *new_gov;
782
783 new_gov = cpufreq_parse_governor(str_governor);
784 if (!new_gov)
785 return -EINVAL;
786
787 ret = cpufreq_set_policy(policy, new_gov,
788 CPUFREQ_POLICY_UNKNOWN);
789
790 module_put(new_gov->owner);
791 }
792
793 return ret ? ret : count;
794}
795
796
797
798
799static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
800{
801 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
802}
803
804
805
806
807static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
808 char *buf)
809{
810 ssize_t i = 0;
811 struct cpufreq_governor *t;
812
813 if (!has_target()) {
814 i += sprintf(buf, "performance powersave");
815 goto out;
816 }
817
818 for_each_governor(t) {
819 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
820 - (CPUFREQ_NAME_LEN + 2)))
821 goto out;
822 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
823 }
824out:
825 i += sprintf(&buf[i], "\n");
826 return i;
827}
828
829ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
830{
831 ssize_t i = 0;
832 unsigned int cpu;
833
834 for_each_cpu(cpu, mask) {
835 if (i)
836 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
837 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
838 if (i >= (PAGE_SIZE - 5))
839 break;
840 }
841 i += sprintf(&buf[i], "\n");
842 return i;
843}
844EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
845
846
847
848
849
850static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
851{
852 return cpufreq_show_cpus(policy->related_cpus, buf);
853}
854
855
856
857
858static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
859{
860 return cpufreq_show_cpus(policy->cpus, buf);
861}
862
863static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
864 const char *buf, size_t count)
865{
866 unsigned int freq = 0;
867 unsigned int ret;
868
869 if (!policy->governor || !policy->governor->store_setspeed)
870 return -EINVAL;
871
872 ret = sscanf(buf, "%u", &freq);
873 if (ret != 1)
874 return -EINVAL;
875
876 policy->governor->store_setspeed(policy, freq);
877
878 return count;
879}
880
881static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
882{
883 if (!policy->governor || !policy->governor->show_setspeed)
884 return sprintf(buf, "<unsupported>\n");
885
886 return policy->governor->show_setspeed(policy, buf);
887}
888
889
890
891
892static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
893{
894 unsigned int limit;
895 int ret;
896 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
897 if (!ret)
898 return sprintf(buf, "%u\n", limit);
899 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
900}
901
902cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
903cpufreq_freq_attr_ro(cpuinfo_min_freq);
904cpufreq_freq_attr_ro(cpuinfo_max_freq);
905cpufreq_freq_attr_ro(cpuinfo_transition_latency);
906cpufreq_freq_attr_ro(scaling_available_governors);
907cpufreq_freq_attr_ro(scaling_driver);
908cpufreq_freq_attr_ro(scaling_cur_freq);
909cpufreq_freq_attr_ro(bios_limit);
910cpufreq_freq_attr_ro(related_cpus);
911cpufreq_freq_attr_ro(affected_cpus);
912cpufreq_freq_attr_rw(scaling_min_freq);
913cpufreq_freq_attr_rw(scaling_max_freq);
914cpufreq_freq_attr_rw(scaling_governor);
915cpufreq_freq_attr_rw(scaling_setspeed);
916
917static struct attribute *default_attrs[] = {
918 &cpuinfo_min_freq.attr,
919 &cpuinfo_max_freq.attr,
920 &cpuinfo_transition_latency.attr,
921 &scaling_min_freq.attr,
922 &scaling_max_freq.attr,
923 &affected_cpus.attr,
924 &related_cpus.attr,
925 &scaling_governor.attr,
926 &scaling_driver.attr,
927 &scaling_available_governors.attr,
928 &scaling_setspeed.attr,
929 NULL
930};
931
932#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
933#define to_attr(a) container_of(a, struct freq_attr, attr)
934
935static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
936{
937 struct cpufreq_policy *policy = to_policy(kobj);
938 struct freq_attr *fattr = to_attr(attr);
939 ssize_t ret;
940
941 if (!fattr->show)
942 return -EIO;
943
944 down_read(&policy->rwsem);
945 ret = fattr->show(policy, buf);
946 up_read(&policy->rwsem);
947
948 return ret;
949}
950
951static ssize_t store(struct kobject *kobj, struct attribute *attr,
952 const char *buf, size_t count)
953{
954 struct cpufreq_policy *policy = to_policy(kobj);
955 struct freq_attr *fattr = to_attr(attr);
956 ssize_t ret = -EINVAL;
957
958 if (!fattr->store)
959 return -EIO;
960
961
962
963
964
965 if (!cpus_read_trylock())
966 return -EBUSY;
967
968 if (cpu_online(policy->cpu)) {
969 down_write(&policy->rwsem);
970 ret = fattr->store(policy, buf, count);
971 up_write(&policy->rwsem);
972 }
973
974 cpus_read_unlock();
975
976 return ret;
977}
978
979static void cpufreq_sysfs_release(struct kobject *kobj)
980{
981 struct cpufreq_policy *policy = to_policy(kobj);
982 pr_debug("last reference is dropped\n");
983 complete(&policy->kobj_unregister);
984}
985
986static const struct sysfs_ops sysfs_ops = {
987 .show = show,
988 .store = store,
989};
990
991static struct kobj_type ktype_cpufreq = {
992 .sysfs_ops = &sysfs_ops,
993 .default_attrs = default_attrs,
994 .release = cpufreq_sysfs_release,
995};
996
997static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
998{
999 struct device *dev = get_cpu_device(cpu);
1000
1001 if (unlikely(!dev))
1002 return;
1003
1004 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1005 return;
1006
1007 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1008 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1009 dev_err(dev, "cpufreq symlink creation failed\n");
1010}
1011
1012static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1013 struct device *dev)
1014{
1015 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1016 sysfs_remove_link(&dev->kobj, "cpufreq");
1017}
1018
1019static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1020{
1021 struct freq_attr **drv_attr;
1022 int ret = 0;
1023
1024
1025 drv_attr = cpufreq_driver->attr;
1026 while (drv_attr && *drv_attr) {
1027 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1028 if (ret)
1029 return ret;
1030 drv_attr++;
1031 }
1032 if (cpufreq_driver->get) {
1033 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1034 if (ret)
1035 return ret;
1036 }
1037
1038 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1039 if (ret)
1040 return ret;
1041
1042 if (cpufreq_driver->bios_limit) {
1043 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1044 if (ret)
1045 return ret;
1046 }
1047
1048 return 0;
1049}
1050
1051__weak struct cpufreq_governor *cpufreq_default_governor(void)
1052{
1053 return NULL;
1054}
1055
1056static int cpufreq_init_policy(struct cpufreq_policy *policy)
1057{
1058 struct cpufreq_governor *def_gov = cpufreq_default_governor();
1059 struct cpufreq_governor *gov = NULL;
1060 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1061
1062 if (has_target()) {
1063
1064 gov = find_governor(policy->last_governor);
1065 if (gov) {
1066 pr_debug("Restoring governor %s for cpu %d\n",
1067 policy->governor->name, policy->cpu);
1068 } else if (def_gov) {
1069 gov = def_gov;
1070 } else {
1071 return -ENODATA;
1072 }
1073 } else {
1074
1075 if (policy->last_policy) {
1076 pol = policy->last_policy;
1077 } else if (def_gov) {
1078 pol = cpufreq_parse_policy(def_gov->name);
1079
1080
1081
1082
1083
1084 if (pol == CPUFREQ_POLICY_UNKNOWN)
1085 pol = policy->policy;
1086 }
1087 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1088 pol != CPUFREQ_POLICY_POWERSAVE)
1089 return -ENODATA;
1090 }
1091
1092 return cpufreq_set_policy(policy, gov, pol);
1093}
1094
1095static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1096{
1097 int ret = 0;
1098
1099
1100 if (cpumask_test_cpu(cpu, policy->cpus))
1101 return 0;
1102
1103 down_write(&policy->rwsem);
1104 if (has_target())
1105 cpufreq_stop_governor(policy);
1106
1107 cpumask_set_cpu(cpu, policy->cpus);
1108
1109 if (has_target()) {
1110 ret = cpufreq_start_governor(policy);
1111 if (ret)
1112 pr_err("%s: Failed to start governor\n", __func__);
1113 }
1114 up_write(&policy->rwsem);
1115 return ret;
1116}
1117
1118void refresh_frequency_limits(struct cpufreq_policy *policy)
1119{
1120 if (!policy_is_inactive(policy)) {
1121 pr_debug("updating policy for CPU %u\n", policy->cpu);
1122
1123 cpufreq_set_policy(policy, policy->governor, policy->policy);
1124 }
1125}
1126EXPORT_SYMBOL(refresh_frequency_limits);
1127
1128static void handle_update(struct work_struct *work)
1129{
1130 struct cpufreq_policy *policy =
1131 container_of(work, struct cpufreq_policy, update);
1132
1133 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1134 down_write(&policy->rwsem);
1135 refresh_frequency_limits(policy);
1136 up_write(&policy->rwsem);
1137}
1138
1139static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1140 void *data)
1141{
1142 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1143
1144 schedule_work(&policy->update);
1145 return 0;
1146}
1147
1148static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1149 void *data)
1150{
1151 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1152
1153 schedule_work(&policy->update);
1154 return 0;
1155}
1156
1157static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1158{
1159 struct kobject *kobj;
1160 struct completion *cmp;
1161
1162 down_write(&policy->rwsem);
1163 cpufreq_stats_free_table(policy);
1164 kobj = &policy->kobj;
1165 cmp = &policy->kobj_unregister;
1166 up_write(&policy->rwsem);
1167 kobject_put(kobj);
1168
1169
1170
1171
1172
1173
1174 pr_debug("waiting for dropping of refcount\n");
1175 wait_for_completion(cmp);
1176 pr_debug("wait complete\n");
1177}
1178
1179static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1180{
1181 struct cpufreq_policy *policy;
1182 struct device *dev = get_cpu_device(cpu);
1183 int ret;
1184
1185 if (!dev)
1186 return NULL;
1187
1188 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1189 if (!policy)
1190 return NULL;
1191
1192 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1193 goto err_free_policy;
1194
1195 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1196 goto err_free_cpumask;
1197
1198 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1199 goto err_free_rcpumask;
1200
1201 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1202 cpufreq_global_kobject, "policy%u", cpu);
1203 if (ret) {
1204 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1205
1206
1207
1208
1209
1210 kobject_put(&policy->kobj);
1211 goto err_free_real_cpus;
1212 }
1213
1214 freq_constraints_init(&policy->constraints);
1215
1216 policy->nb_min.notifier_call = cpufreq_notifier_min;
1217 policy->nb_max.notifier_call = cpufreq_notifier_max;
1218
1219 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1220 &policy->nb_min);
1221 if (ret) {
1222 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1223 ret, cpumask_pr_args(policy->cpus));
1224 goto err_kobj_remove;
1225 }
1226
1227 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1228 &policy->nb_max);
1229 if (ret) {
1230 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1231 ret, cpumask_pr_args(policy->cpus));
1232 goto err_min_qos_notifier;
1233 }
1234
1235 INIT_LIST_HEAD(&policy->policy_list);
1236 init_rwsem(&policy->rwsem);
1237 spin_lock_init(&policy->transition_lock);
1238 init_waitqueue_head(&policy->transition_wait);
1239 init_completion(&policy->kobj_unregister);
1240 INIT_WORK(&policy->update, handle_update);
1241
1242 policy->cpu = cpu;
1243 return policy;
1244
1245err_min_qos_notifier:
1246 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1247 &policy->nb_min);
1248err_kobj_remove:
1249 cpufreq_policy_put_kobj(policy);
1250err_free_real_cpus:
1251 free_cpumask_var(policy->real_cpus);
1252err_free_rcpumask:
1253 free_cpumask_var(policy->related_cpus);
1254err_free_cpumask:
1255 free_cpumask_var(policy->cpus);
1256err_free_policy:
1257 kfree(policy);
1258
1259 return NULL;
1260}
1261
1262static void cpufreq_policy_free(struct cpufreq_policy *policy)
1263{
1264 unsigned long flags;
1265 int cpu;
1266
1267
1268 write_lock_irqsave(&cpufreq_driver_lock, flags);
1269 list_del(&policy->policy_list);
1270
1271 for_each_cpu(cpu, policy->related_cpus)
1272 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1273 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1274
1275 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1276 &policy->nb_max);
1277 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1278 &policy->nb_min);
1279
1280
1281 cancel_work_sync(&policy->update);
1282
1283 if (policy->max_freq_req) {
1284
1285
1286
1287
1288 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1289 CPUFREQ_REMOVE_POLICY, policy);
1290 freq_qos_remove_request(policy->max_freq_req);
1291 }
1292
1293 freq_qos_remove_request(policy->min_freq_req);
1294 kfree(policy->min_freq_req);
1295
1296 cpufreq_policy_put_kobj(policy);
1297 free_cpumask_var(policy->real_cpus);
1298 free_cpumask_var(policy->related_cpus);
1299 free_cpumask_var(policy->cpus);
1300 kfree(policy);
1301}
1302
1303static int cpufreq_online(unsigned int cpu)
1304{
1305 struct cpufreq_policy *policy;
1306 bool new_policy;
1307 unsigned long flags;
1308 unsigned int j;
1309 int ret;
1310
1311 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1312
1313
1314 policy = per_cpu(cpufreq_cpu_data, cpu);
1315 if (policy) {
1316 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1317 if (!policy_is_inactive(policy))
1318 return cpufreq_add_policy_cpu(policy, cpu);
1319
1320
1321 new_policy = false;
1322 down_write(&policy->rwsem);
1323 policy->cpu = cpu;
1324 policy->governor = NULL;
1325 up_write(&policy->rwsem);
1326 } else {
1327 new_policy = true;
1328 policy = cpufreq_policy_alloc(cpu);
1329 if (!policy)
1330 return -ENOMEM;
1331 }
1332
1333 if (!new_policy && cpufreq_driver->online) {
1334 ret = cpufreq_driver->online(policy);
1335 if (ret) {
1336 pr_debug("%s: %d: initialization failed\n", __func__,
1337 __LINE__);
1338 goto out_exit_policy;
1339 }
1340
1341
1342 cpumask_copy(policy->cpus, policy->related_cpus);
1343 } else {
1344 cpumask_copy(policy->cpus, cpumask_of(cpu));
1345
1346
1347
1348
1349
1350 ret = cpufreq_driver->init(policy);
1351 if (ret) {
1352 pr_debug("%s: %d: initialization failed\n", __func__,
1353 __LINE__);
1354 goto out_free_policy;
1355 }
1356
1357 ret = cpufreq_table_validate_and_sort(policy);
1358 if (ret)
1359 goto out_exit_policy;
1360
1361
1362 cpumask_copy(policy->related_cpus, policy->cpus);
1363 }
1364
1365 down_write(&policy->rwsem);
1366
1367
1368
1369
1370 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1371
1372 if (new_policy) {
1373 for_each_cpu(j, policy->related_cpus) {
1374 per_cpu(cpufreq_cpu_data, j) = policy;
1375 add_cpu_dev_symlink(policy, j);
1376 }
1377
1378 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1379 GFP_KERNEL);
1380 if (!policy->min_freq_req)
1381 goto out_destroy_policy;
1382
1383 ret = freq_qos_add_request(&policy->constraints,
1384 policy->min_freq_req, FREQ_QOS_MIN,
1385 policy->min);
1386 if (ret < 0) {
1387
1388
1389
1390
1391 kfree(policy->min_freq_req);
1392 policy->min_freq_req = NULL;
1393 goto out_destroy_policy;
1394 }
1395
1396
1397
1398
1399
1400
1401 policy->max_freq_req = policy->min_freq_req + 1;
1402
1403 ret = freq_qos_add_request(&policy->constraints,
1404 policy->max_freq_req, FREQ_QOS_MAX,
1405 policy->max);
1406 if (ret < 0) {
1407 policy->max_freq_req = NULL;
1408 goto out_destroy_policy;
1409 }
1410
1411 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1412 CPUFREQ_CREATE_POLICY, policy);
1413 }
1414
1415 if (cpufreq_driver->get && has_target()) {
1416 policy->cur = cpufreq_driver->get(policy->cpu);
1417 if (!policy->cur) {
1418 pr_err("%s: ->get() failed\n", __func__);
1419 goto out_destroy_policy;
1420 }
1421 }
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1442 && has_target()) {
1443
1444 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1445 if (ret == -EINVAL) {
1446
1447 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1448 __func__, policy->cpu, policy->cur);
1449 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1450 CPUFREQ_RELATION_L);
1451
1452
1453
1454
1455
1456
1457 BUG_ON(ret);
1458 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1459 __func__, policy->cpu, policy->cur);
1460 }
1461 }
1462
1463 if (new_policy) {
1464 ret = cpufreq_add_dev_interface(policy);
1465 if (ret)
1466 goto out_destroy_policy;
1467
1468 cpufreq_stats_create_table(policy);
1469
1470 write_lock_irqsave(&cpufreq_driver_lock, flags);
1471 list_add(&policy->policy_list, &cpufreq_policy_list);
1472 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1473 }
1474
1475 ret = cpufreq_init_policy(policy);
1476 if (ret) {
1477 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1478 __func__, cpu, ret);
1479 goto out_destroy_policy;
1480 }
1481
1482 up_write(&policy->rwsem);
1483
1484 kobject_uevent(&policy->kobj, KOBJ_ADD);
1485
1486
1487 if (cpufreq_driver->ready)
1488 cpufreq_driver->ready(policy);
1489
1490 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1491 policy->cdev = of_cpufreq_cooling_register(policy);
1492
1493 pr_debug("initialization complete\n");
1494
1495 return 0;
1496
1497out_destroy_policy:
1498 for_each_cpu(j, policy->real_cpus)
1499 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1500
1501 up_write(&policy->rwsem);
1502
1503out_exit_policy:
1504 if (cpufreq_driver->exit)
1505 cpufreq_driver->exit(policy);
1506
1507out_free_policy:
1508 cpufreq_policy_free(policy);
1509 return ret;
1510}
1511
1512
1513
1514
1515
1516
1517static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1518{
1519 struct cpufreq_policy *policy;
1520 unsigned cpu = dev->id;
1521 int ret;
1522
1523 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1524
1525 if (cpu_online(cpu)) {
1526 ret = cpufreq_online(cpu);
1527 if (ret)
1528 return ret;
1529 }
1530
1531
1532 policy = per_cpu(cpufreq_cpu_data, cpu);
1533 if (policy)
1534 add_cpu_dev_symlink(policy, cpu);
1535
1536 return 0;
1537}
1538
1539static int cpufreq_offline(unsigned int cpu)
1540{
1541 struct cpufreq_policy *policy;
1542 int ret;
1543
1544 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1545
1546 policy = cpufreq_cpu_get_raw(cpu);
1547 if (!policy) {
1548 pr_debug("%s: No cpu_data found\n", __func__);
1549 return 0;
1550 }
1551
1552 down_write(&policy->rwsem);
1553 if (has_target())
1554 cpufreq_stop_governor(policy);
1555
1556 cpumask_clear_cpu(cpu, policy->cpus);
1557
1558 if (policy_is_inactive(policy)) {
1559 if (has_target())
1560 strncpy(policy->last_governor, policy->governor->name,
1561 CPUFREQ_NAME_LEN);
1562 else
1563 policy->last_policy = policy->policy;
1564 } else if (cpu == policy->cpu) {
1565
1566 policy->cpu = cpumask_any(policy->cpus);
1567 }
1568
1569
1570 if (!policy_is_inactive(policy)) {
1571 if (has_target()) {
1572 ret = cpufreq_start_governor(policy);
1573 if (ret)
1574 pr_err("%s: Failed to start governor\n", __func__);
1575 }
1576
1577 goto unlock;
1578 }
1579
1580 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1581 cpufreq_cooling_unregister(policy->cdev);
1582 policy->cdev = NULL;
1583 }
1584
1585 if (cpufreq_driver->stop_cpu)
1586 cpufreq_driver->stop_cpu(policy);
1587
1588 if (has_target())
1589 cpufreq_exit_governor(policy);
1590
1591
1592
1593
1594
1595 if (cpufreq_driver->offline) {
1596 cpufreq_driver->offline(policy);
1597 } else if (cpufreq_driver->exit) {
1598 cpufreq_driver->exit(policy);
1599 policy->freq_table = NULL;
1600 }
1601
1602unlock:
1603 up_write(&policy->rwsem);
1604 return 0;
1605}
1606
1607
1608
1609
1610
1611
1612static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1613{
1614 unsigned int cpu = dev->id;
1615 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1616
1617 if (!policy)
1618 return;
1619
1620 if (cpu_online(cpu))
1621 cpufreq_offline(cpu);
1622
1623 cpumask_clear_cpu(cpu, policy->real_cpus);
1624 remove_cpu_dev_symlink(policy, dev);
1625
1626 if (cpumask_empty(policy->real_cpus)) {
1627
1628 if (cpufreq_driver->offline)
1629 cpufreq_driver->exit(policy);
1630
1631 cpufreq_policy_free(policy);
1632 }
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1645 unsigned int new_freq)
1646{
1647 struct cpufreq_freqs freqs;
1648
1649 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1650 policy->cur, new_freq);
1651
1652 freqs.old = policy->cur;
1653 freqs.new = new_freq;
1654
1655 cpufreq_freq_transition_begin(policy, &freqs);
1656 cpufreq_freq_transition_end(policy, &freqs, 0);
1657}
1658
1659static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1660{
1661 unsigned int new_freq;
1662
1663 new_freq = cpufreq_driver->get(policy->cpu);
1664 if (!new_freq)
1665 return 0;
1666
1667
1668
1669
1670
1671 if (policy->fast_switch_enabled || !has_target())
1672 return new_freq;
1673
1674 if (policy->cur != new_freq) {
1675 cpufreq_out_of_sync(policy, new_freq);
1676 if (update)
1677 schedule_work(&policy->update);
1678 }
1679
1680 return new_freq;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690unsigned int cpufreq_quick_get(unsigned int cpu)
1691{
1692 struct cpufreq_policy *policy;
1693 unsigned int ret_freq = 0;
1694 unsigned long flags;
1695
1696 read_lock_irqsave(&cpufreq_driver_lock, flags);
1697
1698 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1699 ret_freq = cpufreq_driver->get(cpu);
1700 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1701 return ret_freq;
1702 }
1703
1704 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1705
1706 policy = cpufreq_cpu_get(cpu);
1707 if (policy) {
1708 ret_freq = policy->cur;
1709 cpufreq_cpu_put(policy);
1710 }
1711
1712 return ret_freq;
1713}
1714EXPORT_SYMBOL(cpufreq_quick_get);
1715
1716
1717
1718
1719
1720
1721
1722unsigned int cpufreq_quick_get_max(unsigned int cpu)
1723{
1724 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1725 unsigned int ret_freq = 0;
1726
1727 if (policy) {
1728 ret_freq = policy->max;
1729 cpufreq_cpu_put(policy);
1730 }
1731
1732 return ret_freq;
1733}
1734EXPORT_SYMBOL(cpufreq_quick_get_max);
1735
1736
1737
1738
1739
1740
1741
1742__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1743{
1744 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1745 unsigned int ret_freq = 0;
1746
1747 if (policy) {
1748 ret_freq = policy->cpuinfo.max_freq;
1749 cpufreq_cpu_put(policy);
1750 }
1751
1752 return ret_freq;
1753}
1754EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1755
1756static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1757{
1758 if (unlikely(policy_is_inactive(policy)))
1759 return 0;
1760
1761 return cpufreq_verify_current_freq(policy, true);
1762}
1763
1764
1765
1766
1767
1768
1769
1770unsigned int cpufreq_get(unsigned int cpu)
1771{
1772 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1773 unsigned int ret_freq = 0;
1774
1775 if (policy) {
1776 down_read(&policy->rwsem);
1777 if (cpufreq_driver->get)
1778 ret_freq = __cpufreq_get(policy);
1779 up_read(&policy->rwsem);
1780
1781 cpufreq_cpu_put(policy);
1782 }
1783
1784 return ret_freq;
1785}
1786EXPORT_SYMBOL(cpufreq_get);
1787
1788static struct subsys_interface cpufreq_interface = {
1789 .name = "cpufreq",
1790 .subsys = &cpu_subsys,
1791 .add_dev = cpufreq_add_dev,
1792 .remove_dev = cpufreq_remove_dev,
1793};
1794
1795
1796
1797
1798
1799int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1800{
1801 int ret;
1802
1803 if (!policy->suspend_freq) {
1804 pr_debug("%s: suspend_freq not defined\n", __func__);
1805 return 0;
1806 }
1807
1808 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1809 policy->suspend_freq);
1810
1811 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1812 CPUFREQ_RELATION_H);
1813 if (ret)
1814 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1815 __func__, policy->suspend_freq, ret);
1816
1817 return ret;
1818}
1819EXPORT_SYMBOL(cpufreq_generic_suspend);
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829void cpufreq_suspend(void)
1830{
1831 struct cpufreq_policy *policy;
1832
1833 if (!cpufreq_driver)
1834 return;
1835
1836 if (!has_target() && !cpufreq_driver->suspend)
1837 goto suspend;
1838
1839 pr_debug("%s: Suspending Governors\n", __func__);
1840
1841 for_each_active_policy(policy) {
1842 if (has_target()) {
1843 down_write(&policy->rwsem);
1844 cpufreq_stop_governor(policy);
1845 up_write(&policy->rwsem);
1846 }
1847
1848 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1849 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1850 cpufreq_driver->name);
1851 }
1852
1853suspend:
1854 cpufreq_suspended = true;
1855}
1856
1857
1858
1859
1860
1861
1862
1863void cpufreq_resume(void)
1864{
1865 struct cpufreq_policy *policy;
1866 int ret;
1867
1868 if (!cpufreq_driver)
1869 return;
1870
1871 if (unlikely(!cpufreq_suspended))
1872 return;
1873
1874 cpufreq_suspended = false;
1875
1876 if (!has_target() && !cpufreq_driver->resume)
1877 return;
1878
1879 pr_debug("%s: Resuming Governors\n", __func__);
1880
1881 for_each_active_policy(policy) {
1882 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1883 pr_err("%s: Failed to resume driver: %p\n", __func__,
1884 policy);
1885 } else if (has_target()) {
1886 down_write(&policy->rwsem);
1887 ret = cpufreq_start_governor(policy);
1888 up_write(&policy->rwsem);
1889
1890 if (ret)
1891 pr_err("%s: Failed to start governor for policy: %p\n",
1892 __func__, policy);
1893 }
1894 }
1895}
1896
1897
1898
1899
1900
1901
1902
1903const char *cpufreq_get_current_driver(void)
1904{
1905 if (cpufreq_driver)
1906 return cpufreq_driver->name;
1907
1908 return NULL;
1909}
1910EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1911
1912
1913
1914
1915
1916
1917
1918void *cpufreq_get_driver_data(void)
1919{
1920 if (cpufreq_driver)
1921 return cpufreq_driver->driver_data;
1922
1923 return NULL;
1924}
1925EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1945{
1946 int ret;
1947
1948 if (cpufreq_disabled())
1949 return -EINVAL;
1950
1951 switch (list) {
1952 case CPUFREQ_TRANSITION_NOTIFIER:
1953 mutex_lock(&cpufreq_fast_switch_lock);
1954
1955 if (cpufreq_fast_switch_count > 0) {
1956 mutex_unlock(&cpufreq_fast_switch_lock);
1957 return -EBUSY;
1958 }
1959 ret = srcu_notifier_chain_register(
1960 &cpufreq_transition_notifier_list, nb);
1961 if (!ret)
1962 cpufreq_fast_switch_count--;
1963
1964 mutex_unlock(&cpufreq_fast_switch_lock);
1965 break;
1966 case CPUFREQ_POLICY_NOTIFIER:
1967 ret = blocking_notifier_chain_register(
1968 &cpufreq_policy_notifier_list, nb);
1969 break;
1970 default:
1971 ret = -EINVAL;
1972 }
1973
1974 return ret;
1975}
1976EXPORT_SYMBOL(cpufreq_register_notifier);
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1989{
1990 int ret;
1991
1992 if (cpufreq_disabled())
1993 return -EINVAL;
1994
1995 switch (list) {
1996 case CPUFREQ_TRANSITION_NOTIFIER:
1997 mutex_lock(&cpufreq_fast_switch_lock);
1998
1999 ret = srcu_notifier_chain_unregister(
2000 &cpufreq_transition_notifier_list, nb);
2001 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2002 cpufreq_fast_switch_count++;
2003
2004 mutex_unlock(&cpufreq_fast_switch_lock);
2005 break;
2006 case CPUFREQ_POLICY_NOTIFIER:
2007 ret = blocking_notifier_chain_unregister(
2008 &cpufreq_policy_notifier_list, nb);
2009 break;
2010 default:
2011 ret = -EINVAL;
2012 }
2013
2014 return ret;
2015}
2016EXPORT_SYMBOL(cpufreq_unregister_notifier);
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2047 unsigned int target_freq)
2048{
2049 target_freq = clamp_val(target_freq, policy->min, policy->max);
2050
2051 return cpufreq_driver->fast_switch(policy, target_freq);
2052}
2053EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2054
2055
2056static int __target_intermediate(struct cpufreq_policy *policy,
2057 struct cpufreq_freqs *freqs, int index)
2058{
2059 int ret;
2060
2061 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2062
2063
2064 if (!freqs->new)
2065 return 0;
2066
2067 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2068 __func__, policy->cpu, freqs->old, freqs->new);
2069
2070 cpufreq_freq_transition_begin(policy, freqs);
2071 ret = cpufreq_driver->target_intermediate(policy, index);
2072 cpufreq_freq_transition_end(policy, freqs, ret);
2073
2074 if (ret)
2075 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2076 __func__, ret);
2077
2078 return ret;
2079}
2080
2081static int __target_index(struct cpufreq_policy *policy, int index)
2082{
2083 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2084 unsigned int intermediate_freq = 0;
2085 unsigned int newfreq = policy->freq_table[index].frequency;
2086 int retval = -EINVAL;
2087 bool notify;
2088
2089 if (newfreq == policy->cur)
2090 return 0;
2091
2092 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2093 if (notify) {
2094
2095 if (cpufreq_driver->get_intermediate) {
2096 retval = __target_intermediate(policy, &freqs, index);
2097 if (retval)
2098 return retval;
2099
2100 intermediate_freq = freqs.new;
2101
2102 if (intermediate_freq)
2103 freqs.old = freqs.new;
2104 }
2105
2106 freqs.new = newfreq;
2107 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2108 __func__, policy->cpu, freqs.old, freqs.new);
2109
2110 cpufreq_freq_transition_begin(policy, &freqs);
2111 }
2112
2113 retval = cpufreq_driver->target_index(policy, index);
2114 if (retval)
2115 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2116 retval);
2117
2118 if (notify) {
2119 cpufreq_freq_transition_end(policy, &freqs, retval);
2120
2121
2122
2123
2124
2125
2126
2127 if (unlikely(retval && intermediate_freq)) {
2128 freqs.old = intermediate_freq;
2129 freqs.new = policy->restore_freq;
2130 cpufreq_freq_transition_begin(policy, &freqs);
2131 cpufreq_freq_transition_end(policy, &freqs, 0);
2132 }
2133 }
2134
2135 return retval;
2136}
2137
2138int __cpufreq_driver_target(struct cpufreq_policy *policy,
2139 unsigned int target_freq,
2140 unsigned int relation)
2141{
2142 unsigned int old_target_freq = target_freq;
2143 int index;
2144
2145 if (cpufreq_disabled())
2146 return -ENODEV;
2147
2148
2149 target_freq = clamp_val(target_freq, policy->min, policy->max);
2150
2151 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2152 policy->cpu, target_freq, relation, old_target_freq);
2153
2154
2155
2156
2157
2158
2159
2160 if (target_freq == policy->cur)
2161 return 0;
2162
2163
2164 policy->restore_freq = policy->cur;
2165
2166 if (cpufreq_driver->target)
2167 return cpufreq_driver->target(policy, target_freq, relation);
2168
2169 if (!cpufreq_driver->target_index)
2170 return -EINVAL;
2171
2172 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2173
2174 return __target_index(policy, index);
2175}
2176EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2177
2178int cpufreq_driver_target(struct cpufreq_policy *policy,
2179 unsigned int target_freq,
2180 unsigned int relation)
2181{
2182 int ret;
2183
2184 down_write(&policy->rwsem);
2185
2186 ret = __cpufreq_driver_target(policy, target_freq, relation);
2187
2188 up_write(&policy->rwsem);
2189
2190 return ret;
2191}
2192EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2193
2194__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2195{
2196 return NULL;
2197}
2198
2199static int cpufreq_init_governor(struct cpufreq_policy *policy)
2200{
2201 int ret;
2202
2203
2204 if (cpufreq_suspended)
2205 return 0;
2206
2207
2208
2209
2210 if (!policy->governor)
2211 return -EINVAL;
2212
2213
2214 if (policy->governor->dynamic_switching &&
2215 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2216 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2217
2218 if (gov) {
2219 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2220 policy->governor->name, gov->name);
2221 policy->governor = gov;
2222 } else {
2223 return -EINVAL;
2224 }
2225 }
2226
2227 if (!try_module_get(policy->governor->owner))
2228 return -EINVAL;
2229
2230 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2231
2232 if (policy->governor->init) {
2233 ret = policy->governor->init(policy);
2234 if (ret) {
2235 module_put(policy->governor->owner);
2236 return ret;
2237 }
2238 }
2239
2240 return 0;
2241}
2242
2243static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2244{
2245 if (cpufreq_suspended || !policy->governor)
2246 return;
2247
2248 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2249
2250 if (policy->governor->exit)
2251 policy->governor->exit(policy);
2252
2253 module_put(policy->governor->owner);
2254}
2255
2256static int cpufreq_start_governor(struct cpufreq_policy *policy)
2257{
2258 int ret;
2259
2260 if (cpufreq_suspended)
2261 return 0;
2262
2263 if (!policy->governor)
2264 return -EINVAL;
2265
2266 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2267
2268 if (cpufreq_driver->get)
2269 cpufreq_verify_current_freq(policy, false);
2270
2271 if (policy->governor->start) {
2272 ret = policy->governor->start(policy);
2273 if (ret)
2274 return ret;
2275 }
2276
2277 if (policy->governor->limits)
2278 policy->governor->limits(policy);
2279
2280 return 0;
2281}
2282
2283static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2284{
2285 if (cpufreq_suspended || !policy->governor)
2286 return;
2287
2288 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2289
2290 if (policy->governor->stop)
2291 policy->governor->stop(policy);
2292}
2293
2294static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2295{
2296 if (cpufreq_suspended || !policy->governor)
2297 return;
2298
2299 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2300
2301 if (policy->governor->limits)
2302 policy->governor->limits(policy);
2303}
2304
2305int cpufreq_register_governor(struct cpufreq_governor *governor)
2306{
2307 int err;
2308
2309 if (!governor)
2310 return -EINVAL;
2311
2312 if (cpufreq_disabled())
2313 return -ENODEV;
2314
2315 mutex_lock(&cpufreq_governor_mutex);
2316
2317 err = -EBUSY;
2318 if (!find_governor(governor->name)) {
2319 err = 0;
2320 list_add(&governor->governor_list, &cpufreq_governor_list);
2321 }
2322
2323 mutex_unlock(&cpufreq_governor_mutex);
2324 return err;
2325}
2326EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2327
2328void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2329{
2330 struct cpufreq_policy *policy;
2331 unsigned long flags;
2332
2333 if (!governor)
2334 return;
2335
2336 if (cpufreq_disabled())
2337 return;
2338
2339
2340 read_lock_irqsave(&cpufreq_driver_lock, flags);
2341 for_each_inactive_policy(policy) {
2342 if (!strcmp(policy->last_governor, governor->name)) {
2343 policy->governor = NULL;
2344 strcpy(policy->last_governor, "\0");
2345 }
2346 }
2347 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2348
2349 mutex_lock(&cpufreq_governor_mutex);
2350 list_del(&governor->governor_list);
2351 mutex_unlock(&cpufreq_governor_mutex);
2352}
2353EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2368{
2369 struct cpufreq_policy *cpu_policy;
2370 if (!policy)
2371 return -EINVAL;
2372
2373 cpu_policy = cpufreq_cpu_get(cpu);
2374 if (!cpu_policy)
2375 return -EINVAL;
2376
2377 memcpy(policy, cpu_policy, sizeof(*policy));
2378
2379 cpufreq_cpu_put(cpu_policy);
2380 return 0;
2381}
2382EXPORT_SYMBOL(cpufreq_get_policy);
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399static int cpufreq_set_policy(struct cpufreq_policy *policy,
2400 struct cpufreq_governor *new_gov,
2401 unsigned int new_pol)
2402{
2403 struct cpufreq_policy_data new_data;
2404 struct cpufreq_governor *old_gov;
2405 int ret;
2406
2407 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2408 new_data.freq_table = policy->freq_table;
2409 new_data.cpu = policy->cpu;
2410
2411
2412
2413
2414 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2415 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2416
2417 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2418 new_data.cpu, new_data.min, new_data.max);
2419
2420
2421
2422
2423
2424 ret = cpufreq_driver->verify(&new_data);
2425 if (ret)
2426 return ret;
2427
2428 policy->min = new_data.min;
2429 policy->max = new_data.max;
2430 trace_cpu_frequency_limits(policy);
2431
2432 policy->cached_target_freq = UINT_MAX;
2433
2434 pr_debug("new min and max freqs are %u - %u kHz\n",
2435 policy->min, policy->max);
2436
2437 if (cpufreq_driver->setpolicy) {
2438 policy->policy = new_pol;
2439 pr_debug("setting range\n");
2440 return cpufreq_driver->setpolicy(policy);
2441 }
2442
2443 if (new_gov == policy->governor) {
2444 pr_debug("governor limits update\n");
2445 cpufreq_governor_limits(policy);
2446 return 0;
2447 }
2448
2449 pr_debug("governor switch\n");
2450
2451
2452 old_gov = policy->governor;
2453
2454 if (old_gov) {
2455 cpufreq_stop_governor(policy);
2456 cpufreq_exit_governor(policy);
2457 }
2458
2459
2460 policy->governor = new_gov;
2461 ret = cpufreq_init_governor(policy);
2462 if (!ret) {
2463 ret = cpufreq_start_governor(policy);
2464 if (!ret) {
2465 pr_debug("governor change\n");
2466 sched_cpufreq_governor_change(policy, old_gov);
2467 return 0;
2468 }
2469 cpufreq_exit_governor(policy);
2470 }
2471
2472
2473 pr_debug("starting governor %s failed\n", policy->governor->name);
2474 if (old_gov) {
2475 policy->governor = old_gov;
2476 if (cpufreq_init_governor(policy))
2477 policy->governor = NULL;
2478 else
2479 cpufreq_start_governor(policy);
2480 }
2481
2482 return ret;
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494void cpufreq_update_policy(unsigned int cpu)
2495{
2496 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2497
2498 if (!policy)
2499 return;
2500
2501
2502
2503
2504
2505 if (cpufreq_driver->get && has_target() &&
2506 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2507 goto unlock;
2508
2509 refresh_frequency_limits(policy);
2510
2511unlock:
2512 cpufreq_cpu_release(policy);
2513}
2514EXPORT_SYMBOL(cpufreq_update_policy);
2515
2516
2517
2518
2519
2520
2521
2522
2523void cpufreq_update_limits(unsigned int cpu)
2524{
2525 if (cpufreq_driver->update_limits)
2526 cpufreq_driver->update_limits(cpu);
2527 else
2528 cpufreq_update_policy(cpu);
2529}
2530EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2531
2532
2533
2534
2535static int cpufreq_boost_set_sw(int state)
2536{
2537 struct cpufreq_policy *policy;
2538 int ret = -EINVAL;
2539
2540 for_each_active_policy(policy) {
2541 if (!policy->freq_table)
2542 continue;
2543
2544 ret = cpufreq_frequency_table_cpuinfo(policy,
2545 policy->freq_table);
2546 if (ret) {
2547 pr_err("%s: Policy frequency update failed\n",
2548 __func__);
2549 break;
2550 }
2551
2552 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2553 if (ret < 0)
2554 break;
2555 }
2556
2557 return ret;
2558}
2559
2560int cpufreq_boost_trigger_state(int state)
2561{
2562 unsigned long flags;
2563 int ret = 0;
2564
2565 if (cpufreq_driver->boost_enabled == state)
2566 return 0;
2567
2568 write_lock_irqsave(&cpufreq_driver_lock, flags);
2569 cpufreq_driver->boost_enabled = state;
2570 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2571
2572 ret = cpufreq_driver->set_boost(state);
2573 if (ret) {
2574 write_lock_irqsave(&cpufreq_driver_lock, flags);
2575 cpufreq_driver->boost_enabled = !state;
2576 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2577
2578 pr_err("%s: Cannot %s BOOST\n",
2579 __func__, state ? "enable" : "disable");
2580 }
2581
2582 return ret;
2583}
2584
2585static bool cpufreq_boost_supported(void)
2586{
2587 return cpufreq_driver->set_boost;
2588}
2589
2590static int create_boost_sysfs_file(void)
2591{
2592 int ret;
2593
2594 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2595 if (ret)
2596 pr_err("%s: cannot register global BOOST sysfs file\n",
2597 __func__);
2598
2599 return ret;
2600}
2601
2602static void remove_boost_sysfs_file(void)
2603{
2604 if (cpufreq_boost_supported())
2605 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2606}
2607
2608int cpufreq_enable_boost_support(void)
2609{
2610 if (!cpufreq_driver)
2611 return -EINVAL;
2612
2613 if (cpufreq_boost_supported())
2614 return 0;
2615
2616 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2617
2618
2619 return create_boost_sysfs_file();
2620}
2621EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2622
2623int cpufreq_boost_enabled(void)
2624{
2625 return cpufreq_driver->boost_enabled;
2626}
2627EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2628
2629
2630
2631
2632static enum cpuhp_state hp_online;
2633
2634static int cpuhp_cpufreq_online(unsigned int cpu)
2635{
2636 cpufreq_online(cpu);
2637
2638 return 0;
2639}
2640
2641static int cpuhp_cpufreq_offline(unsigned int cpu)
2642{
2643 cpufreq_offline(cpu);
2644
2645 return 0;
2646}
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2659{
2660 unsigned long flags;
2661 int ret;
2662
2663 if (cpufreq_disabled())
2664 return -ENODEV;
2665
2666
2667
2668
2669
2670 if (!get_cpu_device(0))
2671 return -EPROBE_DEFER;
2672
2673 if (!driver_data || !driver_data->verify || !driver_data->init ||
2674 !(driver_data->setpolicy || driver_data->target_index ||
2675 driver_data->target) ||
2676 (driver_data->setpolicy && (driver_data->target_index ||
2677 driver_data->target)) ||
2678 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2679 (!driver_data->online != !driver_data->offline))
2680 return -EINVAL;
2681
2682 pr_debug("trying to register driver %s\n", driver_data->name);
2683
2684
2685 cpus_read_lock();
2686
2687 write_lock_irqsave(&cpufreq_driver_lock, flags);
2688 if (cpufreq_driver) {
2689 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2690 ret = -EEXIST;
2691 goto out;
2692 }
2693 cpufreq_driver = driver_data;
2694 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2695
2696 if (driver_data->setpolicy)
2697 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2698
2699 if (cpufreq_boost_supported()) {
2700 ret = create_boost_sysfs_file();
2701 if (ret)
2702 goto err_null_driver;
2703 }
2704
2705 ret = subsys_interface_register(&cpufreq_interface);
2706 if (ret)
2707 goto err_boost_unreg;
2708
2709 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2710 list_empty(&cpufreq_policy_list)) {
2711
2712 ret = -ENODEV;
2713 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2714 driver_data->name);
2715 goto err_if_unreg;
2716 }
2717
2718 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2719 "cpufreq:online",
2720 cpuhp_cpufreq_online,
2721 cpuhp_cpufreq_offline);
2722 if (ret < 0)
2723 goto err_if_unreg;
2724 hp_online = ret;
2725 ret = 0;
2726
2727 pr_debug("driver %s up and running\n", driver_data->name);
2728 goto out;
2729
2730err_if_unreg:
2731 subsys_interface_unregister(&cpufreq_interface);
2732err_boost_unreg:
2733 remove_boost_sysfs_file();
2734err_null_driver:
2735 write_lock_irqsave(&cpufreq_driver_lock, flags);
2736 cpufreq_driver = NULL;
2737 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2738out:
2739 cpus_read_unlock();
2740 return ret;
2741}
2742EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2753{
2754 unsigned long flags;
2755
2756 if (!cpufreq_driver || (driver != cpufreq_driver))
2757 return -EINVAL;
2758
2759 pr_debug("unregistering driver %s\n", driver->name);
2760
2761
2762 cpus_read_lock();
2763 subsys_interface_unregister(&cpufreq_interface);
2764 remove_boost_sysfs_file();
2765 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2766
2767 write_lock_irqsave(&cpufreq_driver_lock, flags);
2768
2769 cpufreq_driver = NULL;
2770
2771 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2772 cpus_read_unlock();
2773
2774 return 0;
2775}
2776EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2777
2778static int __init cpufreq_core_init(void)
2779{
2780 if (cpufreq_disabled())
2781 return -ENODEV;
2782
2783 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2784 BUG_ON(!cpufreq_global_kobject);
2785
2786 return 0;
2787}
2788module_param(off, int, 0444);
2789core_initcall(cpufreq_core_init);
2790