1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/slab.h>
29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
31#include <linux/tick.h>
32#include <trace/events/power.h>
33
34static LIST_HEAD(cpufreq_policy_list);
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41
42#define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
45
46#define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48#define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
50
51#define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
53
54
55static LIST_HEAD(cpufreq_governor_list);
56#define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
58
59
60
61
62
63
64static struct cpufreq_driver *cpufreq_driver;
65static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66static DEFINE_RWLOCK(cpufreq_driver_lock);
67
68
69static bool cpufreq_suspended;
70
71static inline bool has_target(void)
72{
73 return cpufreq_driver->target_index || cpufreq_driver->target;
74}
75
76
77static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78static int cpufreq_init_governor(struct cpufreq_policy *policy);
79static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80static int cpufreq_start_governor(struct cpufreq_policy *policy);
81static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82static void cpufreq_governor_limits(struct cpufreq_policy *policy);
83
84
85
86
87
88
89
90
91static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
93
94static int off __read_mostly;
95static int cpufreq_disabled(void)
96{
97 return off;
98}
99void disable_cpufreq(void)
100{
101 off = 1;
102}
103static DEFINE_MUTEX(cpufreq_governor_mutex);
104
105bool have_governor_per_policy(void)
106{
107 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
108}
109EXPORT_SYMBOL_GPL(have_governor_per_policy);
110
111struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
112{
113 if (have_governor_per_policy())
114 return &policy->kobj;
115 else
116 return cpufreq_global_kobject;
117}
118EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
119
120static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
121{
122 u64 idle_time;
123 u64 cur_wall_time;
124 u64 busy_time;
125
126 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
127
128 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
134
135 idle_time = cur_wall_time - busy_time;
136 if (wall)
137 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
138
139 return div_u64(idle_time, NSEC_PER_USEC);
140}
141
142u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
143{
144 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
145
146 if (idle_time == -1ULL)
147 return get_cpu_idle_time_jiffy(cpu, wall);
148 else if (!io_busy)
149 idle_time += get_cpu_iowait_time_us(cpu, wall);
150
151 return idle_time;
152}
153EXPORT_SYMBOL_GPL(get_cpu_idle_time);
154
155__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
156 unsigned long max_freq)
157{
158}
159EXPORT_SYMBOL_GPL(arch_set_freq_scale);
160
161
162
163
164
165
166
167
168int cpufreq_generic_init(struct cpufreq_policy *policy,
169 struct cpufreq_frequency_table *table,
170 unsigned int transition_latency)
171{
172 policy->freq_table = table;
173 policy->cpuinfo.transition_latency = transition_latency;
174
175
176
177
178
179 cpumask_setall(policy->cpus);
180
181 return 0;
182}
183EXPORT_SYMBOL_GPL(cpufreq_generic_init);
184
185struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
186{
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
188
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
190}
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
192
193unsigned int cpufreq_generic_get(unsigned int cpu)
194{
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
196
197 if (!policy || IS_ERR(policy->clk)) {
198 pr_err("%s: No %s associated to cpu: %d\n",
199 __func__, policy ? "clk" : "policy", cpu);
200 return 0;
201 }
202
203 return clk_get_rate(policy->clk) / 1000;
204}
205EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
221{
222 struct cpufreq_policy *policy = NULL;
223 unsigned long flags;
224
225 if (WARN_ON(cpu >= nr_cpu_ids))
226 return NULL;
227
228
229 read_lock_irqsave(&cpufreq_driver_lock, flags);
230
231 if (cpufreq_driver) {
232
233 policy = cpufreq_cpu_get_raw(cpu);
234 if (policy)
235 kobject_get(&policy->kobj);
236 }
237
238 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
239
240 return policy;
241}
242EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
243
244
245
246
247
248
249
250
251
252void cpufreq_cpu_put(struct cpufreq_policy *policy)
253{
254 kobject_put(&policy->kobj);
255}
256EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
257
258
259
260
261
262
263
264
265
266
267
268
269
270static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
271{
272#ifndef CONFIG_SMP
273 static unsigned long l_p_j_ref;
274 static unsigned int l_p_j_ref_freq;
275
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
277 return;
278
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
282 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
283 l_p_j_ref, l_p_j_ref_freq);
284 }
285 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
286 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
287 ci->new);
288 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
289 loops_per_jiffy, ci->new);
290 }
291#endif
292}
293
294
295
296
297
298
299
300
301
302
303
304static void cpufreq_notify_transition(struct cpufreq_policy *policy,
305 struct cpufreq_freqs *freqs,
306 unsigned int state)
307{
308 BUG_ON(irqs_disabled());
309
310 if (cpufreq_disabled())
311 return;
312
313 freqs->flags = cpufreq_driver->flags;
314 pr_debug("notification %u of frequency transition to %u kHz\n",
315 state, freqs->new);
316
317 switch (state) {
318 case CPUFREQ_PRECHANGE:
319
320
321
322
323
324 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
325 if (policy->cur && (policy->cur != freqs->old)) {
326 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327 freqs->old, policy->cur);
328 freqs->old = policy->cur;
329 }
330 }
331
332 for_each_cpu(freqs->cpu, policy->cpus) {
333 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
334 CPUFREQ_PRECHANGE, freqs);
335 }
336
337 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
338 break;
339
340 case CPUFREQ_POSTCHANGE:
341 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
342 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
343 cpumask_pr_args(policy->cpus));
344
345 for_each_cpu(freqs->cpu, policy->cpus) {
346 trace_cpu_frequency(freqs->new, freqs->cpu);
347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
348 CPUFREQ_POSTCHANGE, freqs);
349 }
350
351 cpufreq_stats_record_transition(policy, freqs->new);
352 policy->cur = freqs->new;
353 }
354}
355
356
357static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
358 struct cpufreq_freqs *freqs, int transition_failed)
359{
360 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
361 if (!transition_failed)
362 return;
363
364 swap(freqs->old, freqs->new);
365 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
366 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
367}
368
369void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
370 struct cpufreq_freqs *freqs)
371{
372
373
374
375
376
377
378
379
380
381 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
382 && current == policy->transition_task);
383
384wait:
385 wait_event(policy->transition_wait, !policy->transition_ongoing);
386
387 spin_lock(&policy->transition_lock);
388
389 if (unlikely(policy->transition_ongoing)) {
390 spin_unlock(&policy->transition_lock);
391 goto wait;
392 }
393
394 policy->transition_ongoing = true;
395 policy->transition_task = current;
396
397 spin_unlock(&policy->transition_lock);
398
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
400}
401EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
402
403void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
404 struct cpufreq_freqs *freqs, int transition_failed)
405{
406 if (unlikely(WARN_ON(!policy->transition_ongoing)))
407 return;
408
409 cpufreq_notify_post_transition(policy, freqs, transition_failed);
410
411 policy->transition_ongoing = false;
412 policy->transition_task = NULL;
413
414 wake_up(&policy->transition_wait);
415}
416EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
417
418
419
420
421
422static int cpufreq_fast_switch_count;
423static DEFINE_MUTEX(cpufreq_fast_switch_lock);
424
425static void cpufreq_list_transition_notifiers(void)
426{
427 struct notifier_block *nb;
428
429 pr_info("Registered transition notifiers:\n");
430
431 mutex_lock(&cpufreq_transition_notifier_list.mutex);
432
433 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
434 pr_info("%pF\n", nb->notifier_call);
435
436 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
437}
438
439
440
441
442
443
444
445
446
447
448
449
450void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
451{
452 lockdep_assert_held(&policy->rwsem);
453
454 if (!policy->fast_switch_possible)
455 return;
456
457 mutex_lock(&cpufreq_fast_switch_lock);
458 if (cpufreq_fast_switch_count >= 0) {
459 cpufreq_fast_switch_count++;
460 policy->fast_switch_enabled = true;
461 } else {
462 pr_warn("CPU%u: Fast frequency switching not enabled\n",
463 policy->cpu);
464 cpufreq_list_transition_notifiers();
465 }
466 mutex_unlock(&cpufreq_fast_switch_lock);
467}
468EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
469
470
471
472
473
474void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
475{
476 mutex_lock(&cpufreq_fast_switch_lock);
477 if (policy->fast_switch_enabled) {
478 policy->fast_switch_enabled = false;
479 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
480 cpufreq_fast_switch_count--;
481 }
482 mutex_unlock(&cpufreq_fast_switch_lock);
483}
484EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
485
486
487
488
489
490
491
492
493
494
495
496unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
497 unsigned int target_freq)
498{
499 target_freq = clamp_val(target_freq, policy->min, policy->max);
500 policy->cached_target_freq = target_freq;
501
502 if (cpufreq_driver->target_index) {
503 int idx;
504
505 idx = cpufreq_frequency_table_target(policy, target_freq,
506 CPUFREQ_RELATION_L);
507 policy->cached_resolved_idx = idx;
508 return policy->freq_table[idx].frequency;
509 }
510
511 if (cpufreq_driver->resolve_freq)
512 return cpufreq_driver->resolve_freq(policy, target_freq);
513
514 return target_freq;
515}
516EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
517
518unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
519{
520 unsigned int latency;
521
522 if (policy->transition_delay_us)
523 return policy->transition_delay_us;
524
525 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
526 if (latency) {
527
528
529
530
531
532
533
534
535
536
537 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
538 }
539
540 return LATENCY_MULTIPLIER;
541}
542EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
543
544
545
546
547static ssize_t show_boost(struct kobject *kobj,
548 struct attribute *attr, char *buf)
549{
550 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
551}
552
553static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
554 const char *buf, size_t count)
555{
556 int ret, enable;
557
558 ret = sscanf(buf, "%d", &enable);
559 if (ret != 1 || enable < 0 || enable > 1)
560 return -EINVAL;
561
562 if (cpufreq_boost_trigger_state(enable)) {
563 pr_err("%s: Cannot %s BOOST!\n",
564 __func__, enable ? "enable" : "disable");
565 return -EINVAL;
566 }
567
568 pr_debug("%s: cpufreq BOOST %s\n",
569 __func__, enable ? "enabled" : "disabled");
570
571 return count;
572}
573define_one_global_rw(boost);
574
575static struct cpufreq_governor *find_governor(const char *str_governor)
576{
577 struct cpufreq_governor *t;
578
579 for_each_governor(t)
580 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
581 return t;
582
583 return NULL;
584}
585
586
587
588
589static int cpufreq_parse_governor(char *str_governor,
590 struct cpufreq_policy *policy)
591{
592 if (cpufreq_driver->setpolicy) {
593 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
594 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
595 return 0;
596 }
597
598 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
599 policy->policy = CPUFREQ_POLICY_POWERSAVE;
600 return 0;
601 }
602 } else {
603 struct cpufreq_governor *t;
604
605 mutex_lock(&cpufreq_governor_mutex);
606
607 t = find_governor(str_governor);
608 if (!t) {
609 int ret;
610
611 mutex_unlock(&cpufreq_governor_mutex);
612
613 ret = request_module("cpufreq_%s", str_governor);
614 if (ret)
615 return -EINVAL;
616
617 mutex_lock(&cpufreq_governor_mutex);
618
619 t = find_governor(str_governor);
620 }
621 if (t && !try_module_get(t->owner))
622 t = NULL;
623
624 mutex_unlock(&cpufreq_governor_mutex);
625
626 if (t) {
627 policy->governor = t;
628 return 0;
629 }
630 }
631
632 return -EINVAL;
633}
634
635
636
637
638
639
640
641
642
643#define show_one(file_name, object) \
644static ssize_t show_##file_name \
645(struct cpufreq_policy *policy, char *buf) \
646{ \
647 return sprintf(buf, "%u\n", policy->object); \
648}
649
650show_one(cpuinfo_min_freq, cpuinfo.min_freq);
651show_one(cpuinfo_max_freq, cpuinfo.max_freq);
652show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
653show_one(scaling_min_freq, min);
654show_one(scaling_max_freq, max);
655
656__weak unsigned int arch_freq_get_on_cpu(int cpu)
657{
658 return 0;
659}
660
661static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
662{
663 ssize_t ret;
664 unsigned int freq;
665
666 freq = arch_freq_get_on_cpu(policy->cpu);
667 if (freq)
668 ret = sprintf(buf, "%u\n", freq);
669 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
670 cpufreq_driver->get)
671 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
672 else
673 ret = sprintf(buf, "%u\n", policy->cur);
674 return ret;
675}
676
677static int cpufreq_set_policy(struct cpufreq_policy *policy,
678 struct cpufreq_policy *new_policy);
679
680
681
682
683#define store_one(file_name, object) \
684static ssize_t store_##file_name \
685(struct cpufreq_policy *policy, const char *buf, size_t count) \
686{ \
687 int ret, temp; \
688 struct cpufreq_policy new_policy; \
689 \
690 memcpy(&new_policy, policy, sizeof(*policy)); \
691 new_policy.min = policy->user_policy.min; \
692 new_policy.max = policy->user_policy.max; \
693 \
694 ret = sscanf(buf, "%u", &new_policy.object); \
695 if (ret != 1) \
696 return -EINVAL; \
697 \
698 temp = new_policy.object; \
699 ret = cpufreq_set_policy(policy, &new_policy); \
700 if (!ret) \
701 policy->user_policy.object = temp; \
702 \
703 return ret ? ret : count; \
704}
705
706store_one(scaling_min_freq, min);
707store_one(scaling_max_freq, max);
708
709
710
711
712static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
713 char *buf)
714{
715 unsigned int cur_freq = __cpufreq_get(policy);
716
717 if (cur_freq)
718 return sprintf(buf, "%u\n", cur_freq);
719
720 return sprintf(buf, "<unknown>\n");
721}
722
723
724
725
726static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
727{
728 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
729 return sprintf(buf, "powersave\n");
730 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
731 return sprintf(buf, "performance\n");
732 else if (policy->governor)
733 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
734 policy->governor->name);
735 return -EINVAL;
736}
737
738
739
740
741static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
742 const char *buf, size_t count)
743{
744 int ret;
745 char str_governor[16];
746 struct cpufreq_policy new_policy;
747
748 memcpy(&new_policy, policy, sizeof(*policy));
749
750 ret = sscanf(buf, "%15s", str_governor);
751 if (ret != 1)
752 return -EINVAL;
753
754 if (cpufreq_parse_governor(str_governor, &new_policy))
755 return -EINVAL;
756
757 ret = cpufreq_set_policy(policy, &new_policy);
758
759 if (new_policy.governor)
760 module_put(new_policy.governor->owner);
761
762 return ret ? ret : count;
763}
764
765
766
767
768static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
769{
770 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
771}
772
773
774
775
776static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
777 char *buf)
778{
779 ssize_t i = 0;
780 struct cpufreq_governor *t;
781
782 if (!has_target()) {
783 i += sprintf(buf, "performance powersave");
784 goto out;
785 }
786
787 for_each_governor(t) {
788 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
789 - (CPUFREQ_NAME_LEN + 2)))
790 goto out;
791 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
792 }
793out:
794 i += sprintf(&buf[i], "\n");
795 return i;
796}
797
798ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
799{
800 ssize_t i = 0;
801 unsigned int cpu;
802
803 for_each_cpu(cpu, mask) {
804 if (i)
805 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
806 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
807 if (i >= (PAGE_SIZE - 5))
808 break;
809 }
810 i += sprintf(&buf[i], "\n");
811 return i;
812}
813EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
814
815
816
817
818
819static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
820{
821 return cpufreq_show_cpus(policy->related_cpus, buf);
822}
823
824
825
826
827static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
828{
829 return cpufreq_show_cpus(policy->cpus, buf);
830}
831
832static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
833 const char *buf, size_t count)
834{
835 unsigned int freq = 0;
836 unsigned int ret;
837
838 if (!policy->governor || !policy->governor->store_setspeed)
839 return -EINVAL;
840
841 ret = sscanf(buf, "%u", &freq);
842 if (ret != 1)
843 return -EINVAL;
844
845 policy->governor->store_setspeed(policy, freq);
846
847 return count;
848}
849
850static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
851{
852 if (!policy->governor || !policy->governor->show_setspeed)
853 return sprintf(buf, "<unsupported>\n");
854
855 return policy->governor->show_setspeed(policy, buf);
856}
857
858
859
860
861static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
862{
863 unsigned int limit;
864 int ret;
865 if (cpufreq_driver->bios_limit) {
866 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
867 if (!ret)
868 return sprintf(buf, "%u\n", limit);
869 }
870 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
871}
872
873cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
874cpufreq_freq_attr_ro(cpuinfo_min_freq);
875cpufreq_freq_attr_ro(cpuinfo_max_freq);
876cpufreq_freq_attr_ro(cpuinfo_transition_latency);
877cpufreq_freq_attr_ro(scaling_available_governors);
878cpufreq_freq_attr_ro(scaling_driver);
879cpufreq_freq_attr_ro(scaling_cur_freq);
880cpufreq_freq_attr_ro(bios_limit);
881cpufreq_freq_attr_ro(related_cpus);
882cpufreq_freq_attr_ro(affected_cpus);
883cpufreq_freq_attr_rw(scaling_min_freq);
884cpufreq_freq_attr_rw(scaling_max_freq);
885cpufreq_freq_attr_rw(scaling_governor);
886cpufreq_freq_attr_rw(scaling_setspeed);
887
888static struct attribute *default_attrs[] = {
889 &cpuinfo_min_freq.attr,
890 &cpuinfo_max_freq.attr,
891 &cpuinfo_transition_latency.attr,
892 &scaling_min_freq.attr,
893 &scaling_max_freq.attr,
894 &affected_cpus.attr,
895 &related_cpus.attr,
896 &scaling_governor.attr,
897 &scaling_driver.attr,
898 &scaling_available_governors.attr,
899 &scaling_setspeed.attr,
900 NULL
901};
902
903#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
904#define to_attr(a) container_of(a, struct freq_attr, attr)
905
906static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
907{
908 struct cpufreq_policy *policy = to_policy(kobj);
909 struct freq_attr *fattr = to_attr(attr);
910 ssize_t ret;
911
912 down_read(&policy->rwsem);
913 ret = fattr->show(policy, buf);
914 up_read(&policy->rwsem);
915
916 return ret;
917}
918
919static ssize_t store(struct kobject *kobj, struct attribute *attr,
920 const char *buf, size_t count)
921{
922 struct cpufreq_policy *policy = to_policy(kobj);
923 struct freq_attr *fattr = to_attr(attr);
924 ssize_t ret = -EINVAL;
925
926
927
928
929
930 if (!cpus_read_trylock())
931 return -EBUSY;
932
933 if (cpu_online(policy->cpu)) {
934 down_write(&policy->rwsem);
935 ret = fattr->store(policy, buf, count);
936 up_write(&policy->rwsem);
937 }
938
939 cpus_read_unlock();
940
941 return ret;
942}
943
944static void cpufreq_sysfs_release(struct kobject *kobj)
945{
946 struct cpufreq_policy *policy = to_policy(kobj);
947 pr_debug("last reference is dropped\n");
948 complete(&policy->kobj_unregister);
949}
950
951static const struct sysfs_ops sysfs_ops = {
952 .show = show,
953 .store = store,
954};
955
956static struct kobj_type ktype_cpufreq = {
957 .sysfs_ops = &sysfs_ops,
958 .default_attrs = default_attrs,
959 .release = cpufreq_sysfs_release,
960};
961
962static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
963{
964 struct device *dev = get_cpu_device(cpu);
965
966 if (!dev)
967 return;
968
969 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
970 return;
971
972 dev_dbg(dev, "%s: Adding symlink\n", __func__);
973 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
974 dev_err(dev, "cpufreq symlink creation failed\n");
975}
976
977static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
978 struct device *dev)
979{
980 dev_dbg(dev, "%s: Removing symlink\n", __func__);
981 sysfs_remove_link(&dev->kobj, "cpufreq");
982}
983
984static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
985{
986 struct freq_attr **drv_attr;
987 int ret = 0;
988
989
990 drv_attr = cpufreq_driver->attr;
991 while (drv_attr && *drv_attr) {
992 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
993 if (ret)
994 return ret;
995 drv_attr++;
996 }
997 if (cpufreq_driver->get) {
998 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
999 if (ret)
1000 return ret;
1001 }
1002
1003 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1004 if (ret)
1005 return ret;
1006
1007 if (cpufreq_driver->bios_limit) {
1008 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1009 if (ret)
1010 return ret;
1011 }
1012
1013 return 0;
1014}
1015
1016__weak struct cpufreq_governor *cpufreq_default_governor(void)
1017{
1018 return NULL;
1019}
1020
1021static int cpufreq_init_policy(struct cpufreq_policy *policy)
1022{
1023 struct cpufreq_governor *gov = NULL;
1024 struct cpufreq_policy new_policy;
1025
1026 memcpy(&new_policy, policy, sizeof(*policy));
1027
1028
1029 gov = find_governor(policy->last_governor);
1030 if (gov) {
1031 pr_debug("Restoring governor %s for cpu %d\n",
1032 policy->governor->name, policy->cpu);
1033 } else {
1034 gov = cpufreq_default_governor();
1035 if (!gov)
1036 return -ENODATA;
1037 }
1038
1039 new_policy.governor = gov;
1040
1041
1042 if (cpufreq_driver->setpolicy) {
1043 if (policy->last_policy)
1044 new_policy.policy = policy->last_policy;
1045 else
1046 cpufreq_parse_governor(gov->name, &new_policy);
1047 }
1048
1049 return cpufreq_set_policy(policy, &new_policy);
1050}
1051
1052static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1053{
1054 int ret = 0;
1055
1056
1057 if (cpumask_test_cpu(cpu, policy->cpus))
1058 return 0;
1059
1060 down_write(&policy->rwsem);
1061 if (has_target())
1062 cpufreq_stop_governor(policy);
1063
1064 cpumask_set_cpu(cpu, policy->cpus);
1065
1066 if (has_target()) {
1067 ret = cpufreq_start_governor(policy);
1068 if (ret)
1069 pr_err("%s: Failed to start governor\n", __func__);
1070 }
1071 up_write(&policy->rwsem);
1072 return ret;
1073}
1074
1075static void handle_update(struct work_struct *work)
1076{
1077 struct cpufreq_policy *policy =
1078 container_of(work, struct cpufreq_policy, update);
1079 unsigned int cpu = policy->cpu;
1080 pr_debug("handle_update for cpu %u called\n", cpu);
1081 cpufreq_update_policy(cpu);
1082}
1083
1084static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1085{
1086 struct cpufreq_policy *policy;
1087 int ret;
1088
1089 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1090 if (!policy)
1091 return NULL;
1092
1093 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1094 goto err_free_policy;
1095
1096 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1097 goto err_free_cpumask;
1098
1099 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1100 goto err_free_rcpumask;
1101
1102 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1103 cpufreq_global_kobject, "policy%u", cpu);
1104 if (ret) {
1105 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1106 goto err_free_real_cpus;
1107 }
1108
1109 INIT_LIST_HEAD(&policy->policy_list);
1110 init_rwsem(&policy->rwsem);
1111 spin_lock_init(&policy->transition_lock);
1112 init_waitqueue_head(&policy->transition_wait);
1113 init_completion(&policy->kobj_unregister);
1114 INIT_WORK(&policy->update, handle_update);
1115
1116 policy->cpu = cpu;
1117 return policy;
1118
1119err_free_real_cpus:
1120 free_cpumask_var(policy->real_cpus);
1121err_free_rcpumask:
1122 free_cpumask_var(policy->related_cpus);
1123err_free_cpumask:
1124 free_cpumask_var(policy->cpus);
1125err_free_policy:
1126 kfree(policy);
1127
1128 return NULL;
1129}
1130
1131static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1132{
1133 struct kobject *kobj;
1134 struct completion *cmp;
1135
1136 down_write(&policy->rwsem);
1137 cpufreq_stats_free_table(policy);
1138 kobj = &policy->kobj;
1139 cmp = &policy->kobj_unregister;
1140 up_write(&policy->rwsem);
1141 kobject_put(kobj);
1142
1143
1144
1145
1146
1147
1148 pr_debug("waiting for dropping of refcount\n");
1149 wait_for_completion(cmp);
1150 pr_debug("wait complete\n");
1151}
1152
1153static void cpufreq_policy_free(struct cpufreq_policy *policy)
1154{
1155 unsigned long flags;
1156 int cpu;
1157
1158
1159 write_lock_irqsave(&cpufreq_driver_lock, flags);
1160 list_del(&policy->policy_list);
1161
1162 for_each_cpu(cpu, policy->related_cpus)
1163 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1164 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1165
1166 cpufreq_policy_put_kobj(policy);
1167 free_cpumask_var(policy->real_cpus);
1168 free_cpumask_var(policy->related_cpus);
1169 free_cpumask_var(policy->cpus);
1170 kfree(policy);
1171}
1172
1173static int cpufreq_online(unsigned int cpu)
1174{
1175 struct cpufreq_policy *policy;
1176 bool new_policy;
1177 unsigned long flags;
1178 unsigned int j;
1179 int ret;
1180
1181 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1182
1183
1184 policy = per_cpu(cpufreq_cpu_data, cpu);
1185 if (policy) {
1186 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1187 if (!policy_is_inactive(policy))
1188 return cpufreq_add_policy_cpu(policy, cpu);
1189
1190
1191 new_policy = false;
1192 down_write(&policy->rwsem);
1193 policy->cpu = cpu;
1194 policy->governor = NULL;
1195 up_write(&policy->rwsem);
1196 } else {
1197 new_policy = true;
1198 policy = cpufreq_policy_alloc(cpu);
1199 if (!policy)
1200 return -ENOMEM;
1201 }
1202
1203 cpumask_copy(policy->cpus, cpumask_of(cpu));
1204
1205
1206
1207
1208 ret = cpufreq_driver->init(policy);
1209 if (ret) {
1210 pr_debug("initialization failed\n");
1211 goto out_free_policy;
1212 }
1213
1214 ret = cpufreq_table_validate_and_sort(policy);
1215 if (ret)
1216 goto out_exit_policy;
1217
1218 down_write(&policy->rwsem);
1219
1220 if (new_policy) {
1221
1222 cpumask_copy(policy->related_cpus, policy->cpus);
1223 }
1224
1225
1226
1227
1228
1229 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1230
1231 if (new_policy) {
1232 policy->user_policy.min = policy->min;
1233 policy->user_policy.max = policy->max;
1234
1235 for_each_cpu(j, policy->related_cpus) {
1236 per_cpu(cpufreq_cpu_data, j) = policy;
1237 add_cpu_dev_symlink(policy, j);
1238 }
1239 } else {
1240 policy->min = policy->user_policy.min;
1241 policy->max = policy->user_policy.max;
1242 }
1243
1244 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1245 policy->cur = cpufreq_driver->get(policy->cpu);
1246 if (!policy->cur) {
1247 pr_err("%s: ->get() failed\n", __func__);
1248 goto out_destroy_policy;
1249 }
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1271 && has_target()) {
1272
1273 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1274 if (ret == -EINVAL) {
1275
1276 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1277 __func__, policy->cpu, policy->cur);
1278 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1279 CPUFREQ_RELATION_L);
1280
1281
1282
1283
1284
1285
1286 BUG_ON(ret);
1287 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1288 __func__, policy->cpu, policy->cur);
1289 }
1290 }
1291
1292 if (new_policy) {
1293 ret = cpufreq_add_dev_interface(policy);
1294 if (ret)
1295 goto out_destroy_policy;
1296
1297 cpufreq_stats_create_table(policy);
1298
1299 write_lock_irqsave(&cpufreq_driver_lock, flags);
1300 list_add(&policy->policy_list, &cpufreq_policy_list);
1301 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1302 }
1303
1304 ret = cpufreq_init_policy(policy);
1305 if (ret) {
1306 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1307 __func__, cpu, ret);
1308
1309 new_policy = false;
1310 goto out_destroy_policy;
1311 }
1312
1313 up_write(&policy->rwsem);
1314
1315 kobject_uevent(&policy->kobj, KOBJ_ADD);
1316
1317
1318 if (cpufreq_driver->ready)
1319 cpufreq_driver->ready(policy);
1320
1321 pr_debug("initialization complete\n");
1322
1323 return 0;
1324
1325out_destroy_policy:
1326 for_each_cpu(j, policy->real_cpus)
1327 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1328
1329 up_write(&policy->rwsem);
1330
1331out_exit_policy:
1332 if (cpufreq_driver->exit)
1333 cpufreq_driver->exit(policy);
1334
1335out_free_policy:
1336 cpufreq_policy_free(policy);
1337 return ret;
1338}
1339
1340
1341
1342
1343
1344
1345static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1346{
1347 struct cpufreq_policy *policy;
1348 unsigned cpu = dev->id;
1349 int ret;
1350
1351 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1352
1353 if (cpu_online(cpu)) {
1354 ret = cpufreq_online(cpu);
1355 if (ret)
1356 return ret;
1357 }
1358
1359
1360 policy = per_cpu(cpufreq_cpu_data, cpu);
1361 if (policy)
1362 add_cpu_dev_symlink(policy, cpu);
1363
1364 return 0;
1365}
1366
1367static int cpufreq_offline(unsigned int cpu)
1368{
1369 struct cpufreq_policy *policy;
1370 int ret;
1371
1372 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1373
1374 policy = cpufreq_cpu_get_raw(cpu);
1375 if (!policy) {
1376 pr_debug("%s: No cpu_data found\n", __func__);
1377 return 0;
1378 }
1379
1380 down_write(&policy->rwsem);
1381 if (has_target())
1382 cpufreq_stop_governor(policy);
1383
1384 cpumask_clear_cpu(cpu, policy->cpus);
1385
1386 if (policy_is_inactive(policy)) {
1387 if (has_target())
1388 strncpy(policy->last_governor, policy->governor->name,
1389 CPUFREQ_NAME_LEN);
1390 else
1391 policy->last_policy = policy->policy;
1392 } else if (cpu == policy->cpu) {
1393
1394 policy->cpu = cpumask_any(policy->cpus);
1395 }
1396
1397
1398 if (!policy_is_inactive(policy)) {
1399 if (has_target()) {
1400 ret = cpufreq_start_governor(policy);
1401 if (ret)
1402 pr_err("%s: Failed to start governor\n", __func__);
1403 }
1404
1405 goto unlock;
1406 }
1407
1408 if (cpufreq_driver->stop_cpu)
1409 cpufreq_driver->stop_cpu(policy);
1410
1411 if (has_target())
1412 cpufreq_exit_governor(policy);
1413
1414
1415
1416
1417
1418
1419 if (cpufreq_driver->exit) {
1420 cpufreq_driver->exit(policy);
1421 policy->freq_table = NULL;
1422 }
1423
1424unlock:
1425 up_write(&policy->rwsem);
1426 return 0;
1427}
1428
1429
1430
1431
1432
1433
1434static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1435{
1436 unsigned int cpu = dev->id;
1437 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1438
1439 if (!policy)
1440 return;
1441
1442 if (cpu_online(cpu))
1443 cpufreq_offline(cpu);
1444
1445 cpumask_clear_cpu(cpu, policy->real_cpus);
1446 remove_cpu_dev_symlink(policy, dev);
1447
1448 if (cpumask_empty(policy->real_cpus))
1449 cpufreq_policy_free(policy);
1450}
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1462 unsigned int new_freq)
1463{
1464 struct cpufreq_freqs freqs;
1465
1466 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1467 policy->cur, new_freq);
1468
1469 freqs.old = policy->cur;
1470 freqs.new = new_freq;
1471
1472 cpufreq_freq_transition_begin(policy, &freqs);
1473 cpufreq_freq_transition_end(policy, &freqs, 0);
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483unsigned int cpufreq_quick_get(unsigned int cpu)
1484{
1485 struct cpufreq_policy *policy;
1486 unsigned int ret_freq = 0;
1487 unsigned long flags;
1488
1489 read_lock_irqsave(&cpufreq_driver_lock, flags);
1490
1491 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1492 ret_freq = cpufreq_driver->get(cpu);
1493 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1494 return ret_freq;
1495 }
1496
1497 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1498
1499 policy = cpufreq_cpu_get(cpu);
1500 if (policy) {
1501 ret_freq = policy->cur;
1502 cpufreq_cpu_put(policy);
1503 }
1504
1505 return ret_freq;
1506}
1507EXPORT_SYMBOL(cpufreq_quick_get);
1508
1509
1510
1511
1512
1513
1514
1515unsigned int cpufreq_quick_get_max(unsigned int cpu)
1516{
1517 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1518 unsigned int ret_freq = 0;
1519
1520 if (policy) {
1521 ret_freq = policy->max;
1522 cpufreq_cpu_put(policy);
1523 }
1524
1525 return ret_freq;
1526}
1527EXPORT_SYMBOL(cpufreq_quick_get_max);
1528
1529static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1530{
1531 unsigned int ret_freq = 0;
1532
1533 if (!cpufreq_driver->get)
1534 return ret_freq;
1535
1536 ret_freq = cpufreq_driver->get(policy->cpu);
1537
1538
1539
1540
1541
1542
1543 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1544 return ret_freq;
1545
1546 if (ret_freq && policy->cur &&
1547 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1548
1549
1550 if (unlikely(ret_freq != policy->cur)) {
1551 cpufreq_out_of_sync(policy, ret_freq);
1552 schedule_work(&policy->update);
1553 }
1554 }
1555
1556 return ret_freq;
1557}
1558
1559
1560
1561
1562
1563
1564
1565unsigned int cpufreq_get(unsigned int cpu)
1566{
1567 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1568 unsigned int ret_freq = 0;
1569
1570 if (policy) {
1571 down_read(&policy->rwsem);
1572
1573 if (!policy_is_inactive(policy))
1574 ret_freq = __cpufreq_get(policy);
1575
1576 up_read(&policy->rwsem);
1577
1578 cpufreq_cpu_put(policy);
1579 }
1580
1581 return ret_freq;
1582}
1583EXPORT_SYMBOL(cpufreq_get);
1584
1585static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1586{
1587 unsigned int new_freq;
1588
1589 new_freq = cpufreq_driver->get(policy->cpu);
1590 if (!new_freq)
1591 return 0;
1592
1593 if (!policy->cur) {
1594 pr_debug("cpufreq: Driver did not initialize current freq\n");
1595 policy->cur = new_freq;
1596 } else if (policy->cur != new_freq && has_target()) {
1597 cpufreq_out_of_sync(policy, new_freq);
1598 }
1599
1600 return new_freq;
1601}
1602
1603static struct subsys_interface cpufreq_interface = {
1604 .name = "cpufreq",
1605 .subsys = &cpu_subsys,
1606 .add_dev = cpufreq_add_dev,
1607 .remove_dev = cpufreq_remove_dev,
1608};
1609
1610
1611
1612
1613
1614int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1615{
1616 int ret;
1617
1618 if (!policy->suspend_freq) {
1619 pr_debug("%s: suspend_freq not defined\n", __func__);
1620 return 0;
1621 }
1622
1623 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1624 policy->suspend_freq);
1625
1626 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1627 CPUFREQ_RELATION_H);
1628 if (ret)
1629 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1630 __func__, policy->suspend_freq, ret);
1631
1632 return ret;
1633}
1634EXPORT_SYMBOL(cpufreq_generic_suspend);
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644void cpufreq_suspend(void)
1645{
1646 struct cpufreq_policy *policy;
1647
1648 if (!cpufreq_driver)
1649 return;
1650
1651 if (!has_target() && !cpufreq_driver->suspend)
1652 goto suspend;
1653
1654 pr_debug("%s: Suspending Governors\n", __func__);
1655
1656 for_each_active_policy(policy) {
1657 if (has_target()) {
1658 down_write(&policy->rwsem);
1659 cpufreq_stop_governor(policy);
1660 up_write(&policy->rwsem);
1661 }
1662
1663 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1664 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1665 policy);
1666 }
1667
1668suspend:
1669 cpufreq_suspended = true;
1670}
1671
1672
1673
1674
1675
1676
1677
1678void cpufreq_resume(void)
1679{
1680 struct cpufreq_policy *policy;
1681 int ret;
1682
1683 if (!cpufreq_driver)
1684 return;
1685
1686 if (unlikely(!cpufreq_suspended))
1687 return;
1688
1689 cpufreq_suspended = false;
1690
1691 if (!has_target() && !cpufreq_driver->resume)
1692 return;
1693
1694 pr_debug("%s: Resuming Governors\n", __func__);
1695
1696 for_each_active_policy(policy) {
1697 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1698 pr_err("%s: Failed to resume driver: %p\n", __func__,
1699 policy);
1700 } else if (has_target()) {
1701 down_write(&policy->rwsem);
1702 ret = cpufreq_start_governor(policy);
1703 up_write(&policy->rwsem);
1704
1705 if (ret)
1706 pr_err("%s: Failed to start governor for policy: %p\n",
1707 __func__, policy);
1708 }
1709 }
1710}
1711
1712
1713
1714
1715
1716
1717
1718const char *cpufreq_get_current_driver(void)
1719{
1720 if (cpufreq_driver)
1721 return cpufreq_driver->name;
1722
1723 return NULL;
1724}
1725EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1726
1727
1728
1729
1730
1731
1732
1733void *cpufreq_get_driver_data(void)
1734{
1735 if (cpufreq_driver)
1736 return cpufreq_driver->driver_data;
1737
1738 return NULL;
1739}
1740EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1760{
1761 int ret;
1762
1763 if (cpufreq_disabled())
1764 return -EINVAL;
1765
1766 switch (list) {
1767 case CPUFREQ_TRANSITION_NOTIFIER:
1768 mutex_lock(&cpufreq_fast_switch_lock);
1769
1770 if (cpufreq_fast_switch_count > 0) {
1771 mutex_unlock(&cpufreq_fast_switch_lock);
1772 return -EBUSY;
1773 }
1774 ret = srcu_notifier_chain_register(
1775 &cpufreq_transition_notifier_list, nb);
1776 if (!ret)
1777 cpufreq_fast_switch_count--;
1778
1779 mutex_unlock(&cpufreq_fast_switch_lock);
1780 break;
1781 case CPUFREQ_POLICY_NOTIFIER:
1782 ret = blocking_notifier_chain_register(
1783 &cpufreq_policy_notifier_list, nb);
1784 break;
1785 default:
1786 ret = -EINVAL;
1787 }
1788
1789 return ret;
1790}
1791EXPORT_SYMBOL(cpufreq_register_notifier);
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1804{
1805 int ret;
1806
1807 if (cpufreq_disabled())
1808 return -EINVAL;
1809
1810 switch (list) {
1811 case CPUFREQ_TRANSITION_NOTIFIER:
1812 mutex_lock(&cpufreq_fast_switch_lock);
1813
1814 ret = srcu_notifier_chain_unregister(
1815 &cpufreq_transition_notifier_list, nb);
1816 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1817 cpufreq_fast_switch_count++;
1818
1819 mutex_unlock(&cpufreq_fast_switch_lock);
1820 break;
1821 case CPUFREQ_POLICY_NOTIFIER:
1822 ret = blocking_notifier_chain_unregister(
1823 &cpufreq_policy_notifier_list, nb);
1824 break;
1825 default:
1826 ret = -EINVAL;
1827 }
1828
1829 return ret;
1830}
1831EXPORT_SYMBOL(cpufreq_unregister_notifier);
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1862 unsigned int target_freq)
1863{
1864 target_freq = clamp_val(target_freq, policy->min, policy->max);
1865
1866 return cpufreq_driver->fast_switch(policy, target_freq);
1867}
1868EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1869
1870
1871static int __target_intermediate(struct cpufreq_policy *policy,
1872 struct cpufreq_freqs *freqs, int index)
1873{
1874 int ret;
1875
1876 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1877
1878
1879 if (!freqs->new)
1880 return 0;
1881
1882 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1883 __func__, policy->cpu, freqs->old, freqs->new);
1884
1885 cpufreq_freq_transition_begin(policy, freqs);
1886 ret = cpufreq_driver->target_intermediate(policy, index);
1887 cpufreq_freq_transition_end(policy, freqs, ret);
1888
1889 if (ret)
1890 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1891 __func__, ret);
1892
1893 return ret;
1894}
1895
1896static int __target_index(struct cpufreq_policy *policy, int index)
1897{
1898 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1899 unsigned int intermediate_freq = 0;
1900 unsigned int newfreq = policy->freq_table[index].frequency;
1901 int retval = -EINVAL;
1902 bool notify;
1903
1904 if (newfreq == policy->cur)
1905 return 0;
1906
1907 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1908 if (notify) {
1909
1910 if (cpufreq_driver->get_intermediate) {
1911 retval = __target_intermediate(policy, &freqs, index);
1912 if (retval)
1913 return retval;
1914
1915 intermediate_freq = freqs.new;
1916
1917 if (intermediate_freq)
1918 freqs.old = freqs.new;
1919 }
1920
1921 freqs.new = newfreq;
1922 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1923 __func__, policy->cpu, freqs.old, freqs.new);
1924
1925 cpufreq_freq_transition_begin(policy, &freqs);
1926 }
1927
1928 retval = cpufreq_driver->target_index(policy, index);
1929 if (retval)
1930 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1931 retval);
1932
1933 if (notify) {
1934 cpufreq_freq_transition_end(policy, &freqs, retval);
1935
1936
1937
1938
1939
1940
1941
1942 if (unlikely(retval && intermediate_freq)) {
1943 freqs.old = intermediate_freq;
1944 freqs.new = policy->restore_freq;
1945 cpufreq_freq_transition_begin(policy, &freqs);
1946 cpufreq_freq_transition_end(policy, &freqs, 0);
1947 }
1948 }
1949
1950 return retval;
1951}
1952
1953int __cpufreq_driver_target(struct cpufreq_policy *policy,
1954 unsigned int target_freq,
1955 unsigned int relation)
1956{
1957 unsigned int old_target_freq = target_freq;
1958 int index;
1959
1960 if (cpufreq_disabled())
1961 return -ENODEV;
1962
1963
1964 target_freq = clamp_val(target_freq, policy->min, policy->max);
1965
1966 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1967 policy->cpu, target_freq, relation, old_target_freq);
1968
1969
1970
1971
1972
1973
1974
1975 if (target_freq == policy->cur)
1976 return 0;
1977
1978
1979 policy->restore_freq = policy->cur;
1980
1981 if (cpufreq_driver->target)
1982 return cpufreq_driver->target(policy, target_freq, relation);
1983
1984 if (!cpufreq_driver->target_index)
1985 return -EINVAL;
1986
1987 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1988
1989 return __target_index(policy, index);
1990}
1991EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1992
1993int cpufreq_driver_target(struct cpufreq_policy *policy,
1994 unsigned int target_freq,
1995 unsigned int relation)
1996{
1997 int ret = -EINVAL;
1998
1999 down_write(&policy->rwsem);
2000
2001 ret = __cpufreq_driver_target(policy, target_freq, relation);
2002
2003 up_write(&policy->rwsem);
2004
2005 return ret;
2006}
2007EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2008
2009__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2010{
2011 return NULL;
2012}
2013
2014static int cpufreq_init_governor(struct cpufreq_policy *policy)
2015{
2016 int ret;
2017
2018
2019 if (cpufreq_suspended)
2020 return 0;
2021
2022
2023
2024
2025 if (!policy->governor)
2026 return -EINVAL;
2027
2028
2029 if (policy->governor->dynamic_switching &&
2030 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2031 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2032
2033 if (gov) {
2034 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2035 policy->governor->name, gov->name);
2036 policy->governor = gov;
2037 } else {
2038 return -EINVAL;
2039 }
2040 }
2041
2042 if (!try_module_get(policy->governor->owner))
2043 return -EINVAL;
2044
2045 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2046
2047 if (policy->governor->init) {
2048 ret = policy->governor->init(policy);
2049 if (ret) {
2050 module_put(policy->governor->owner);
2051 return ret;
2052 }
2053 }
2054
2055 return 0;
2056}
2057
2058static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2059{
2060 if (cpufreq_suspended || !policy->governor)
2061 return;
2062
2063 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2064
2065 if (policy->governor->exit)
2066 policy->governor->exit(policy);
2067
2068 module_put(policy->governor->owner);
2069}
2070
2071static int cpufreq_start_governor(struct cpufreq_policy *policy)
2072{
2073 int ret;
2074
2075 if (cpufreq_suspended)
2076 return 0;
2077
2078 if (!policy->governor)
2079 return -EINVAL;
2080
2081 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2082
2083 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2084 cpufreq_update_current_freq(policy);
2085
2086 if (policy->governor->start) {
2087 ret = policy->governor->start(policy);
2088 if (ret)
2089 return ret;
2090 }
2091
2092 if (policy->governor->limits)
2093 policy->governor->limits(policy);
2094
2095 return 0;
2096}
2097
2098static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2099{
2100 if (cpufreq_suspended || !policy->governor)
2101 return;
2102
2103 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2104
2105 if (policy->governor->stop)
2106 policy->governor->stop(policy);
2107}
2108
2109static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2110{
2111 if (cpufreq_suspended || !policy->governor)
2112 return;
2113
2114 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2115
2116 if (policy->governor->limits)
2117 policy->governor->limits(policy);
2118}
2119
2120int cpufreq_register_governor(struct cpufreq_governor *governor)
2121{
2122 int err;
2123
2124 if (!governor)
2125 return -EINVAL;
2126
2127 if (cpufreq_disabled())
2128 return -ENODEV;
2129
2130 mutex_lock(&cpufreq_governor_mutex);
2131
2132 err = -EBUSY;
2133 if (!find_governor(governor->name)) {
2134 err = 0;
2135 list_add(&governor->governor_list, &cpufreq_governor_list);
2136 }
2137
2138 mutex_unlock(&cpufreq_governor_mutex);
2139 return err;
2140}
2141EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2142
2143void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2144{
2145 struct cpufreq_policy *policy;
2146 unsigned long flags;
2147
2148 if (!governor)
2149 return;
2150
2151 if (cpufreq_disabled())
2152 return;
2153
2154
2155 read_lock_irqsave(&cpufreq_driver_lock, flags);
2156 for_each_inactive_policy(policy) {
2157 if (!strcmp(policy->last_governor, governor->name)) {
2158 policy->governor = NULL;
2159 strcpy(policy->last_governor, "\0");
2160 }
2161 }
2162 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2163
2164 mutex_lock(&cpufreq_governor_mutex);
2165 list_del(&governor->governor_list);
2166 mutex_unlock(&cpufreq_governor_mutex);
2167}
2168EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2183{
2184 struct cpufreq_policy *cpu_policy;
2185 if (!policy)
2186 return -EINVAL;
2187
2188 cpu_policy = cpufreq_cpu_get(cpu);
2189 if (!cpu_policy)
2190 return -EINVAL;
2191
2192 memcpy(policy, cpu_policy, sizeof(*policy));
2193
2194 cpufreq_cpu_put(cpu_policy);
2195 return 0;
2196}
2197EXPORT_SYMBOL(cpufreq_get_policy);
2198
2199
2200
2201
2202
2203static int cpufreq_set_policy(struct cpufreq_policy *policy,
2204 struct cpufreq_policy *new_policy)
2205{
2206 struct cpufreq_governor *old_gov;
2207 int ret;
2208
2209 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2210 new_policy->cpu, new_policy->min, new_policy->max);
2211
2212 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2213
2214
2215
2216
2217
2218 if (new_policy->min > new_policy->max)
2219 return -EINVAL;
2220
2221
2222 ret = cpufreq_driver->verify(new_policy);
2223 if (ret)
2224 return ret;
2225
2226
2227 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2228 CPUFREQ_ADJUST, new_policy);
2229
2230
2231
2232
2233
2234 ret = cpufreq_driver->verify(new_policy);
2235 if (ret)
2236 return ret;
2237
2238
2239 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2240 CPUFREQ_NOTIFY, new_policy);
2241
2242 policy->min = new_policy->min;
2243 policy->max = new_policy->max;
2244 trace_cpu_frequency_limits(policy);
2245
2246 policy->cached_target_freq = UINT_MAX;
2247
2248 pr_debug("new min and max freqs are %u - %u kHz\n",
2249 policy->min, policy->max);
2250
2251 if (cpufreq_driver->setpolicy) {
2252 policy->policy = new_policy->policy;
2253 pr_debug("setting range\n");
2254 return cpufreq_driver->setpolicy(new_policy);
2255 }
2256
2257 if (new_policy->governor == policy->governor) {
2258 pr_debug("cpufreq: governor limits update\n");
2259 cpufreq_governor_limits(policy);
2260 return 0;
2261 }
2262
2263 pr_debug("governor switch\n");
2264
2265
2266 old_gov = policy->governor;
2267
2268 if (old_gov) {
2269 cpufreq_stop_governor(policy);
2270 cpufreq_exit_governor(policy);
2271 }
2272
2273
2274 policy->governor = new_policy->governor;
2275 ret = cpufreq_init_governor(policy);
2276 if (!ret) {
2277 ret = cpufreq_start_governor(policy);
2278 if (!ret) {
2279 pr_debug("cpufreq: governor change\n");
2280 return 0;
2281 }
2282 cpufreq_exit_governor(policy);
2283 }
2284
2285
2286 pr_debug("starting governor %s failed\n", policy->governor->name);
2287 if (old_gov) {
2288 policy->governor = old_gov;
2289 if (cpufreq_init_governor(policy))
2290 policy->governor = NULL;
2291 else
2292 cpufreq_start_governor(policy);
2293 }
2294
2295 return ret;
2296}
2297
2298
2299
2300
2301
2302
2303
2304
2305void cpufreq_update_policy(unsigned int cpu)
2306{
2307 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2308 struct cpufreq_policy new_policy;
2309
2310 if (!policy)
2311 return;
2312
2313 down_write(&policy->rwsem);
2314
2315 if (policy_is_inactive(policy))
2316 goto unlock;
2317
2318 pr_debug("updating policy for CPU %u\n", cpu);
2319 memcpy(&new_policy, policy, sizeof(*policy));
2320 new_policy.min = policy->user_policy.min;
2321 new_policy.max = policy->user_policy.max;
2322
2323
2324
2325
2326
2327 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2328 if (cpufreq_suspended)
2329 goto unlock;
2330
2331 new_policy.cur = cpufreq_update_current_freq(policy);
2332 if (WARN_ON(!new_policy.cur))
2333 goto unlock;
2334 }
2335
2336 cpufreq_set_policy(policy, &new_policy);
2337
2338unlock:
2339 up_write(&policy->rwsem);
2340
2341 cpufreq_cpu_put(policy);
2342}
2343EXPORT_SYMBOL(cpufreq_update_policy);
2344
2345
2346
2347
2348static int cpufreq_boost_set_sw(int state)
2349{
2350 struct cpufreq_policy *policy;
2351 int ret = -EINVAL;
2352
2353 for_each_active_policy(policy) {
2354 if (!policy->freq_table)
2355 continue;
2356
2357 ret = cpufreq_frequency_table_cpuinfo(policy,
2358 policy->freq_table);
2359 if (ret) {
2360 pr_err("%s: Policy frequency update failed\n",
2361 __func__);
2362 break;
2363 }
2364
2365 down_write(&policy->rwsem);
2366 policy->user_policy.max = policy->max;
2367 cpufreq_governor_limits(policy);
2368 up_write(&policy->rwsem);
2369 }
2370
2371 return ret;
2372}
2373
2374int cpufreq_boost_trigger_state(int state)
2375{
2376 unsigned long flags;
2377 int ret = 0;
2378
2379 if (cpufreq_driver->boost_enabled == state)
2380 return 0;
2381
2382 write_lock_irqsave(&cpufreq_driver_lock, flags);
2383 cpufreq_driver->boost_enabled = state;
2384 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2385
2386 ret = cpufreq_driver->set_boost(state);
2387 if (ret) {
2388 write_lock_irqsave(&cpufreq_driver_lock, flags);
2389 cpufreq_driver->boost_enabled = !state;
2390 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2391
2392 pr_err("%s: Cannot %s BOOST\n",
2393 __func__, state ? "enable" : "disable");
2394 }
2395
2396 return ret;
2397}
2398
2399static bool cpufreq_boost_supported(void)
2400{
2401 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2402}
2403
2404static int create_boost_sysfs_file(void)
2405{
2406 int ret;
2407
2408 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2409 if (ret)
2410 pr_err("%s: cannot register global BOOST sysfs file\n",
2411 __func__);
2412
2413 return ret;
2414}
2415
2416static void remove_boost_sysfs_file(void)
2417{
2418 if (cpufreq_boost_supported())
2419 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2420}
2421
2422int cpufreq_enable_boost_support(void)
2423{
2424 if (!cpufreq_driver)
2425 return -EINVAL;
2426
2427 if (cpufreq_boost_supported())
2428 return 0;
2429
2430 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2431
2432
2433 return create_boost_sysfs_file();
2434}
2435EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2436
2437int cpufreq_boost_enabled(void)
2438{
2439 return cpufreq_driver->boost_enabled;
2440}
2441EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2442
2443
2444
2445
2446static enum cpuhp_state hp_online;
2447
2448static int cpuhp_cpufreq_online(unsigned int cpu)
2449{
2450 cpufreq_online(cpu);
2451
2452 return 0;
2453}
2454
2455static int cpuhp_cpufreq_offline(unsigned int cpu)
2456{
2457 cpufreq_offline(cpu);
2458
2459 return 0;
2460}
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2473{
2474 unsigned long flags;
2475 int ret;
2476
2477 if (cpufreq_disabled())
2478 return -ENODEV;
2479
2480 if (!driver_data || !driver_data->verify || !driver_data->init ||
2481 !(driver_data->setpolicy || driver_data->target_index ||
2482 driver_data->target) ||
2483 (driver_data->setpolicy && (driver_data->target_index ||
2484 driver_data->target)) ||
2485 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2486 return -EINVAL;
2487
2488 pr_debug("trying to register driver %s\n", driver_data->name);
2489
2490
2491 cpus_read_lock();
2492
2493 write_lock_irqsave(&cpufreq_driver_lock, flags);
2494 if (cpufreq_driver) {
2495 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2496 ret = -EEXIST;
2497 goto out;
2498 }
2499 cpufreq_driver = driver_data;
2500 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2501
2502 if (driver_data->setpolicy)
2503 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2504
2505 if (cpufreq_boost_supported()) {
2506 ret = create_boost_sysfs_file();
2507 if (ret)
2508 goto err_null_driver;
2509 }
2510
2511 ret = subsys_interface_register(&cpufreq_interface);
2512 if (ret)
2513 goto err_boost_unreg;
2514
2515 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2516 list_empty(&cpufreq_policy_list)) {
2517
2518 ret = -ENODEV;
2519 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2520 driver_data->name);
2521 goto err_if_unreg;
2522 }
2523
2524 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2525 "cpufreq:online",
2526 cpuhp_cpufreq_online,
2527 cpuhp_cpufreq_offline);
2528 if (ret < 0)
2529 goto err_if_unreg;
2530 hp_online = ret;
2531 ret = 0;
2532
2533 pr_debug("driver %s up and running\n", driver_data->name);
2534 goto out;
2535
2536err_if_unreg:
2537 subsys_interface_unregister(&cpufreq_interface);
2538err_boost_unreg:
2539 remove_boost_sysfs_file();
2540err_null_driver:
2541 write_lock_irqsave(&cpufreq_driver_lock, flags);
2542 cpufreq_driver = NULL;
2543 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2544out:
2545 cpus_read_unlock();
2546 return ret;
2547}
2548EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2559{
2560 unsigned long flags;
2561
2562 if (!cpufreq_driver || (driver != cpufreq_driver))
2563 return -EINVAL;
2564
2565 pr_debug("unregistering driver %s\n", driver->name);
2566
2567
2568 cpus_read_lock();
2569 subsys_interface_unregister(&cpufreq_interface);
2570 remove_boost_sysfs_file();
2571 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2572
2573 write_lock_irqsave(&cpufreq_driver_lock, flags);
2574
2575 cpufreq_driver = NULL;
2576
2577 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2578 cpus_read_unlock();
2579
2580 return 0;
2581}
2582EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2583
2584
2585
2586
2587
2588static struct syscore_ops cpufreq_syscore_ops = {
2589 .shutdown = cpufreq_suspend,
2590};
2591
2592struct kobject *cpufreq_global_kobject;
2593EXPORT_SYMBOL(cpufreq_global_kobject);
2594
2595static int __init cpufreq_core_init(void)
2596{
2597 if (cpufreq_disabled())
2598 return -ENODEV;
2599
2600 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2601 BUG_ON(!cpufreq_global_kobject);
2602
2603 register_syscore_ops(&cpufreq_syscore_ops);
2604
2605 return 0;
2606}
2607module_param(off, int, 0444);
2608core_initcall(cpufreq_core_init);
2609