1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/slab.h>
29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
31#include <linux/tick.h>
32#include <trace/events/power.h>
33
34
35
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
40
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
45
46
47
48
49
50static struct cpufreq_driver *cpufreq_driver;
51static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
52static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
53static DEFINE_RWLOCK(cpufreq_driver_lock);
54DEFINE_MUTEX(cpufreq_governor_lock);
55
56
57static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
58
59
60static bool cpufreq_suspended;
61
62static inline bool has_target(void)
63{
64 return cpufreq_driver->target_index || cpufreq_driver->target;
65}
66
67
68
69
70
71static DECLARE_RWSEM(cpufreq_rwsem);
72
73
74static int __cpufreq_governor(struct cpufreq_policy *policy,
75 unsigned int event);
76static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
77static void handle_update(struct work_struct *work);
78
79
80
81
82
83
84
85
86static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
87static struct srcu_notifier_head cpufreq_transition_notifier_list;
88
89static bool init_cpufreq_transition_notifier_list_called;
90static int __init init_cpufreq_transition_notifier_list(void)
91{
92 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
93 init_cpufreq_transition_notifier_list_called = true;
94 return 0;
95}
96pure_initcall(init_cpufreq_transition_notifier_list);
97
98static int off __read_mostly;
99static int cpufreq_disabled(void)
100{
101 return off;
102}
103void disable_cpufreq(void)
104{
105 off = 1;
106}
107static DEFINE_MUTEX(cpufreq_governor_mutex);
108
109bool have_governor_per_policy(void)
110{
111 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
112}
113EXPORT_SYMBOL_GPL(have_governor_per_policy);
114
115struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
116{
117 if (have_governor_per_policy())
118 return &policy->kobj;
119 else
120 return cpufreq_global_kobject;
121}
122EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
123
124static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
125{
126 u64 idle_time;
127 u64 cur_wall_time;
128 u64 busy_time;
129
130 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131
132 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
137 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138
139 idle_time = cur_wall_time - busy_time;
140 if (wall)
141 *wall = cputime_to_usecs(cur_wall_time);
142
143 return cputime_to_usecs(idle_time);
144}
145
146u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
147{
148 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
149
150 if (idle_time == -1ULL)
151 return get_cpu_idle_time_jiffy(cpu, wall);
152 else if (!io_busy)
153 idle_time += get_cpu_iowait_time_us(cpu, wall);
154
155 return idle_time;
156}
157EXPORT_SYMBOL_GPL(get_cpu_idle_time);
158
159
160
161
162
163
164
165
166int cpufreq_generic_init(struct cpufreq_policy *policy,
167 struct cpufreq_frequency_table *table,
168 unsigned int transition_latency)
169{
170 int ret;
171
172 ret = cpufreq_table_validate_and_show(policy, table);
173 if (ret) {
174 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
175 return ret;
176 }
177
178 policy->cpuinfo.transition_latency = transition_latency;
179
180
181
182
183
184 cpumask_setall(policy->cpus);
185
186 return 0;
187}
188EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189
190unsigned int cpufreq_generic_get(unsigned int cpu)
191{
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193
194 if (!policy || IS_ERR(policy->clk)) {
195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
197 return 0;
198 }
199
200 return clk_get_rate(policy->clk) / 1000;
201}
202EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
204
205struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
206{
207 return per_cpu(cpufreq_cpu_data, cpu);
208}
209
210struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
211{
212 struct cpufreq_policy *policy = NULL;
213 unsigned long flags;
214
215 if (cpu >= nr_cpu_ids)
216 return NULL;
217
218 if (!down_read_trylock(&cpufreq_rwsem))
219 return NULL;
220
221
222 read_lock_irqsave(&cpufreq_driver_lock, flags);
223
224 if (cpufreq_driver) {
225
226 policy = per_cpu(cpufreq_cpu_data, cpu);
227 if (policy)
228 kobject_get(&policy->kobj);
229 }
230
231 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
232
233 if (!policy)
234 up_read(&cpufreq_rwsem);
235
236 return policy;
237}
238EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
239
240void cpufreq_cpu_put(struct cpufreq_policy *policy)
241{
242 kobject_put(&policy->kobj);
243 up_read(&cpufreq_rwsem);
244}
245EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
246
247
248
249
250
251
252
253
254
255
256
257
258
259static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
260{
261#ifndef CONFIG_SMP
262 static unsigned long l_p_j_ref;
263 static unsigned int l_p_j_ref_freq;
264
265 if (ci->flags & CPUFREQ_CONST_LOOPS)
266 return;
267
268 if (!l_p_j_ref_freq) {
269 l_p_j_ref = loops_per_jiffy;
270 l_p_j_ref_freq = ci->old;
271 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
272 l_p_j_ref, l_p_j_ref_freq);
273 }
274 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
275 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
276 ci->new);
277 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
278 loops_per_jiffy, ci->new);
279 }
280#endif
281}
282
283static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
284 struct cpufreq_freqs *freqs, unsigned int state)
285{
286 BUG_ON(irqs_disabled());
287
288 if (cpufreq_disabled())
289 return;
290
291 freqs->flags = cpufreq_driver->flags;
292 pr_debug("notification %u of frequency transition to %u kHz\n",
293 state, freqs->new);
294
295 switch (state) {
296
297 case CPUFREQ_PRECHANGE:
298
299
300
301
302 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
303 if ((policy) && (policy->cpu == freqs->cpu) &&
304 (policy->cur) && (policy->cur != freqs->old)) {
305 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
306 freqs->old, policy->cur);
307 freqs->old = policy->cur;
308 }
309 }
310 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
311 CPUFREQ_PRECHANGE, freqs);
312 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
313 break;
314
315 case CPUFREQ_POSTCHANGE:
316 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
317 pr_debug("FREQ: %lu - CPU: %lu\n",
318 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
319 trace_cpu_frequency(freqs->new, freqs->cpu);
320 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
321 CPUFREQ_POSTCHANGE, freqs);
322 if (likely(policy) && likely(policy->cpu == freqs->cpu))
323 policy->cur = freqs->new;
324 break;
325 }
326}
327
328
329
330
331
332
333
334
335
336static void cpufreq_notify_transition(struct cpufreq_policy *policy,
337 struct cpufreq_freqs *freqs, unsigned int state)
338{
339 for_each_cpu(freqs->cpu, policy->cpus)
340 __cpufreq_notify_transition(policy, freqs, state);
341}
342
343
344static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
345 struct cpufreq_freqs *freqs, int transition_failed)
346{
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348 if (!transition_failed)
349 return;
350
351 swap(freqs->old, freqs->new);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354}
355
356void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
357 struct cpufreq_freqs *freqs)
358{
359
360
361
362
363
364
365
366
367
368 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
369 && current == policy->transition_task);
370
371wait:
372 wait_event(policy->transition_wait, !policy->transition_ongoing);
373
374 spin_lock(&policy->transition_lock);
375
376 if (unlikely(policy->transition_ongoing)) {
377 spin_unlock(&policy->transition_lock);
378 goto wait;
379 }
380
381 policy->transition_ongoing = true;
382 policy->transition_task = current;
383
384 spin_unlock(&policy->transition_lock);
385
386 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
387}
388EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
389
390void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
391 struct cpufreq_freqs *freqs, int transition_failed)
392{
393 if (unlikely(WARN_ON(!policy->transition_ongoing)))
394 return;
395
396 cpufreq_notify_post_transition(policy, freqs, transition_failed);
397
398 policy->transition_ongoing = false;
399 policy->transition_task = NULL;
400
401 wake_up(&policy->transition_wait);
402}
403EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
404
405
406
407
408
409static ssize_t show_boost(struct kobject *kobj,
410 struct attribute *attr, char *buf)
411{
412 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
413}
414
415static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
416 const char *buf, size_t count)
417{
418 int ret, enable;
419
420 ret = sscanf(buf, "%d", &enable);
421 if (ret != 1 || enable < 0 || enable > 1)
422 return -EINVAL;
423
424 if (cpufreq_boost_trigger_state(enable)) {
425 pr_err("%s: Cannot %s BOOST!\n",
426 __func__, enable ? "enable" : "disable");
427 return -EINVAL;
428 }
429
430 pr_debug("%s: cpufreq BOOST %s\n",
431 __func__, enable ? "enabled" : "disabled");
432
433 return count;
434}
435define_one_global_rw(boost);
436
437static struct cpufreq_governor *find_governor(const char *str_governor)
438{
439 struct cpufreq_governor *t;
440
441 for_each_governor(t)
442 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
443 return t;
444
445 return NULL;
446}
447
448
449
450
451static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
452 struct cpufreq_governor **governor)
453{
454 int err = -EINVAL;
455
456 if (!cpufreq_driver)
457 goto out;
458
459 if (cpufreq_driver->setpolicy) {
460 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
461 *policy = CPUFREQ_POLICY_PERFORMANCE;
462 err = 0;
463 } else if (!strncasecmp(str_governor, "powersave",
464 CPUFREQ_NAME_LEN)) {
465 *policy = CPUFREQ_POLICY_POWERSAVE;
466 err = 0;
467 }
468 } else {
469 struct cpufreq_governor *t;
470
471 mutex_lock(&cpufreq_governor_mutex);
472
473 t = find_governor(str_governor);
474
475 if (t == NULL) {
476 int ret;
477
478 mutex_unlock(&cpufreq_governor_mutex);
479 ret = request_module("cpufreq_%s", str_governor);
480 mutex_lock(&cpufreq_governor_mutex);
481
482 if (ret == 0)
483 t = find_governor(str_governor);
484 }
485
486 if (t != NULL) {
487 *governor = t;
488 err = 0;
489 }
490
491 mutex_unlock(&cpufreq_governor_mutex);
492 }
493out:
494 return err;
495}
496
497
498
499
500
501
502
503
504
505#define show_one(file_name, object) \
506static ssize_t show_##file_name \
507(struct cpufreq_policy *policy, char *buf) \
508{ \
509 return sprintf(buf, "%u\n", policy->object); \
510}
511
512show_one(cpuinfo_min_freq, cpuinfo.min_freq);
513show_one(cpuinfo_max_freq, cpuinfo.max_freq);
514show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
515show_one(scaling_min_freq, min);
516show_one(scaling_max_freq, max);
517
518static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
519{
520 ssize_t ret;
521
522 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
523 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
524 else
525 ret = sprintf(buf, "%u\n", policy->cur);
526 return ret;
527}
528
529static int cpufreq_set_policy(struct cpufreq_policy *policy,
530 struct cpufreq_policy *new_policy);
531
532
533
534
535#define store_one(file_name, object) \
536static ssize_t store_##file_name \
537(struct cpufreq_policy *policy, const char *buf, size_t count) \
538{ \
539 int ret, temp; \
540 struct cpufreq_policy new_policy; \
541 \
542 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
543 if (ret) \
544 return -EINVAL; \
545 \
546 ret = sscanf(buf, "%u", &new_policy.object); \
547 if (ret != 1) \
548 return -EINVAL; \
549 \
550 temp = new_policy.object; \
551 ret = cpufreq_set_policy(policy, &new_policy); \
552 if (!ret) \
553 policy->user_policy.object = temp; \
554 \
555 return ret ? ret : count; \
556}
557
558store_one(scaling_min_freq, min);
559store_one(scaling_max_freq, max);
560
561
562
563
564static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
565 char *buf)
566{
567 unsigned int cur_freq = __cpufreq_get(policy);
568 if (!cur_freq)
569 return sprintf(buf, "<unknown>");
570 return sprintf(buf, "%u\n", cur_freq);
571}
572
573
574
575
576static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
577{
578 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
579 return sprintf(buf, "powersave\n");
580 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
581 return sprintf(buf, "performance\n");
582 else if (policy->governor)
583 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
584 policy->governor->name);
585 return -EINVAL;
586}
587
588
589
590
591static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
592 const char *buf, size_t count)
593{
594 int ret;
595 char str_governor[16];
596 struct cpufreq_policy new_policy;
597
598 ret = cpufreq_get_policy(&new_policy, policy->cpu);
599 if (ret)
600 return ret;
601
602 ret = sscanf(buf, "%15s", str_governor);
603 if (ret != 1)
604 return -EINVAL;
605
606 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
607 &new_policy.governor))
608 return -EINVAL;
609
610 ret = cpufreq_set_policy(policy, &new_policy);
611
612 policy->user_policy.policy = policy->policy;
613 policy->user_policy.governor = policy->governor;
614
615 if (ret)
616 return ret;
617 else
618 return count;
619}
620
621
622
623
624static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
625{
626 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
627}
628
629
630
631
632static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
633 char *buf)
634{
635 ssize_t i = 0;
636 struct cpufreq_governor *t;
637
638 if (!has_target()) {
639 i += sprintf(buf, "performance powersave");
640 goto out;
641 }
642
643 for_each_governor(t) {
644 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
645 - (CPUFREQ_NAME_LEN + 2)))
646 goto out;
647 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
648 }
649out:
650 i += sprintf(&buf[i], "\n");
651 return i;
652}
653
654ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
655{
656 ssize_t i = 0;
657 unsigned int cpu;
658
659 for_each_cpu(cpu, mask) {
660 if (i)
661 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
662 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
663 if (i >= (PAGE_SIZE - 5))
664 break;
665 }
666 i += sprintf(&buf[i], "\n");
667 return i;
668}
669EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
670
671
672
673
674
675static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
676{
677 return cpufreq_show_cpus(policy->related_cpus, buf);
678}
679
680
681
682
683static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
684{
685 return cpufreq_show_cpus(policy->cpus, buf);
686}
687
688static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
689 const char *buf, size_t count)
690{
691 unsigned int freq = 0;
692 unsigned int ret;
693
694 if (!policy->governor || !policy->governor->store_setspeed)
695 return -EINVAL;
696
697 ret = sscanf(buf, "%u", &freq);
698 if (ret != 1)
699 return -EINVAL;
700
701 policy->governor->store_setspeed(policy, freq);
702
703 return count;
704}
705
706static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
707{
708 if (!policy->governor || !policy->governor->show_setspeed)
709 return sprintf(buf, "<unsupported>\n");
710
711 return policy->governor->show_setspeed(policy, buf);
712}
713
714
715
716
717static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
718{
719 unsigned int limit;
720 int ret;
721 if (cpufreq_driver->bios_limit) {
722 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
723 if (!ret)
724 return sprintf(buf, "%u\n", limit);
725 }
726 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
727}
728
729cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
730cpufreq_freq_attr_ro(cpuinfo_min_freq);
731cpufreq_freq_attr_ro(cpuinfo_max_freq);
732cpufreq_freq_attr_ro(cpuinfo_transition_latency);
733cpufreq_freq_attr_ro(scaling_available_governors);
734cpufreq_freq_attr_ro(scaling_driver);
735cpufreq_freq_attr_ro(scaling_cur_freq);
736cpufreq_freq_attr_ro(bios_limit);
737cpufreq_freq_attr_ro(related_cpus);
738cpufreq_freq_attr_ro(affected_cpus);
739cpufreq_freq_attr_rw(scaling_min_freq);
740cpufreq_freq_attr_rw(scaling_max_freq);
741cpufreq_freq_attr_rw(scaling_governor);
742cpufreq_freq_attr_rw(scaling_setspeed);
743
744static struct attribute *default_attrs[] = {
745 &cpuinfo_min_freq.attr,
746 &cpuinfo_max_freq.attr,
747 &cpuinfo_transition_latency.attr,
748 &scaling_min_freq.attr,
749 &scaling_max_freq.attr,
750 &affected_cpus.attr,
751 &related_cpus.attr,
752 &scaling_governor.attr,
753 &scaling_driver.attr,
754 &scaling_available_governors.attr,
755 &scaling_setspeed.attr,
756 NULL
757};
758
759#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
760#define to_attr(a) container_of(a, struct freq_attr, attr)
761
762static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
763{
764 struct cpufreq_policy *policy = to_policy(kobj);
765 struct freq_attr *fattr = to_attr(attr);
766 ssize_t ret;
767
768 if (!down_read_trylock(&cpufreq_rwsem))
769 return -EINVAL;
770
771 down_read(&policy->rwsem);
772
773 if (fattr->show)
774 ret = fattr->show(policy, buf);
775 else
776 ret = -EIO;
777
778 up_read(&policy->rwsem);
779 up_read(&cpufreq_rwsem);
780
781 return ret;
782}
783
784static ssize_t store(struct kobject *kobj, struct attribute *attr,
785 const char *buf, size_t count)
786{
787 struct cpufreq_policy *policy = to_policy(kobj);
788 struct freq_attr *fattr = to_attr(attr);
789 ssize_t ret = -EINVAL;
790
791 get_online_cpus();
792
793 if (!cpu_online(policy->cpu))
794 goto unlock;
795
796 if (!down_read_trylock(&cpufreq_rwsem))
797 goto unlock;
798
799 down_write(&policy->rwsem);
800
801 if (fattr->store)
802 ret = fattr->store(policy, buf, count);
803 else
804 ret = -EIO;
805
806 up_write(&policy->rwsem);
807
808 up_read(&cpufreq_rwsem);
809unlock:
810 put_online_cpus();
811
812 return ret;
813}
814
815static void cpufreq_sysfs_release(struct kobject *kobj)
816{
817 struct cpufreq_policy *policy = to_policy(kobj);
818 pr_debug("last reference is dropped\n");
819 complete(&policy->kobj_unregister);
820}
821
822static const struct sysfs_ops sysfs_ops = {
823 .show = show,
824 .store = store,
825};
826
827static struct kobj_type ktype_cpufreq = {
828 .sysfs_ops = &sysfs_ops,
829 .default_attrs = default_attrs,
830 .release = cpufreq_sysfs_release,
831};
832
833struct kobject *cpufreq_global_kobject;
834EXPORT_SYMBOL(cpufreq_global_kobject);
835
836static int cpufreq_global_kobject_usage;
837
838int cpufreq_get_global_kobject(void)
839{
840 if (!cpufreq_global_kobject_usage++)
841 return kobject_add(cpufreq_global_kobject,
842 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
843
844 return 0;
845}
846EXPORT_SYMBOL(cpufreq_get_global_kobject);
847
848void cpufreq_put_global_kobject(void)
849{
850 if (!--cpufreq_global_kobject_usage)
851 kobject_del(cpufreq_global_kobject);
852}
853EXPORT_SYMBOL(cpufreq_put_global_kobject);
854
855int cpufreq_sysfs_create_file(const struct attribute *attr)
856{
857 int ret = cpufreq_get_global_kobject();
858
859 if (!ret) {
860 ret = sysfs_create_file(cpufreq_global_kobject, attr);
861 if (ret)
862 cpufreq_put_global_kobject();
863 }
864
865 return ret;
866}
867EXPORT_SYMBOL(cpufreq_sysfs_create_file);
868
869void cpufreq_sysfs_remove_file(const struct attribute *attr)
870{
871 sysfs_remove_file(cpufreq_global_kobject, attr);
872 cpufreq_put_global_kobject();
873}
874EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
875
876
877static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
878{
879 unsigned int j;
880 int ret = 0;
881
882 for_each_cpu(j, policy->cpus) {
883 struct device *cpu_dev;
884
885 if (j == policy->cpu)
886 continue;
887
888 pr_debug("Adding link for CPU: %u\n", j);
889 cpu_dev = get_cpu_device(j);
890 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
891 "cpufreq");
892 if (ret)
893 break;
894 }
895 return ret;
896}
897
898static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
899 struct device *dev)
900{
901 struct freq_attr **drv_attr;
902 int ret = 0;
903
904
905 drv_attr = cpufreq_driver->attr;
906 while (drv_attr && *drv_attr) {
907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
908 if (ret)
909 return ret;
910 drv_attr++;
911 }
912 if (cpufreq_driver->get) {
913 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
914 if (ret)
915 return ret;
916 }
917
918 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
919 if (ret)
920 return ret;
921
922 if (cpufreq_driver->bios_limit) {
923 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
924 if (ret)
925 return ret;
926 }
927
928 return cpufreq_add_dev_symlink(policy);
929}
930
931static void cpufreq_init_policy(struct cpufreq_policy *policy)
932{
933 struct cpufreq_governor *gov = NULL;
934 struct cpufreq_policy new_policy;
935 int ret = 0;
936
937 memcpy(&new_policy, policy, sizeof(*policy));
938
939
940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
941 if (gov)
942 pr_debug("Restoring governor %s for cpu %d\n",
943 policy->governor->name, policy->cpu);
944 else
945 gov = CPUFREQ_DEFAULT_GOVERNOR;
946
947 new_policy.governor = gov;
948
949
950 if (cpufreq_driver->setpolicy)
951 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
952
953
954 ret = cpufreq_set_policy(policy, &new_policy);
955 if (ret) {
956 pr_debug("setting policy failed\n");
957 if (cpufreq_driver->exit)
958 cpufreq_driver->exit(policy);
959 }
960}
961
962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
963 unsigned int cpu, struct device *dev)
964{
965 int ret = 0;
966 unsigned long flags;
967
968 if (has_target()) {
969 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
970 if (ret) {
971 pr_err("%s: Failed to stop governor\n", __func__);
972 return ret;
973 }
974 }
975
976 down_write(&policy->rwsem);
977
978 write_lock_irqsave(&cpufreq_driver_lock, flags);
979
980 cpumask_set_cpu(cpu, policy->cpus);
981 per_cpu(cpufreq_cpu_data, cpu) = policy;
982 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
983
984 up_write(&policy->rwsem);
985
986 if (has_target()) {
987 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
988 if (!ret)
989 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
990
991 if (ret) {
992 pr_err("%s: Failed to start governor\n", __func__);
993 return ret;
994 }
995 }
996
997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
998}
999
1000static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1001{
1002 struct cpufreq_policy *policy;
1003 unsigned long flags;
1004
1005 read_lock_irqsave(&cpufreq_driver_lock, flags);
1006
1007 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1008
1009 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1010
1011 if (policy)
1012 policy->governor = NULL;
1013
1014 return policy;
1015}
1016
1017static struct cpufreq_policy *cpufreq_policy_alloc(void)
1018{
1019 struct cpufreq_policy *policy;
1020
1021 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1022 if (!policy)
1023 return NULL;
1024
1025 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1026 goto err_free_policy;
1027
1028 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1029 goto err_free_cpumask;
1030
1031 INIT_LIST_HEAD(&policy->policy_list);
1032 init_rwsem(&policy->rwsem);
1033 spin_lock_init(&policy->transition_lock);
1034 init_waitqueue_head(&policy->transition_wait);
1035 init_completion(&policy->kobj_unregister);
1036 INIT_WORK(&policy->update, handle_update);
1037
1038 return policy;
1039
1040err_free_cpumask:
1041 free_cpumask_var(policy->cpus);
1042err_free_policy:
1043 kfree(policy);
1044
1045 return NULL;
1046}
1047
1048static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1049{
1050 struct kobject *kobj;
1051 struct completion *cmp;
1052
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_REMOVE_POLICY, policy);
1055
1056 down_read(&policy->rwsem);
1057 kobj = &policy->kobj;
1058 cmp = &policy->kobj_unregister;
1059 up_read(&policy->rwsem);
1060 kobject_put(kobj);
1061
1062
1063
1064
1065
1066
1067 pr_debug("waiting for dropping of refcount\n");
1068 wait_for_completion(cmp);
1069 pr_debug("wait complete\n");
1070}
1071
1072static void cpufreq_policy_free(struct cpufreq_policy *policy)
1073{
1074 free_cpumask_var(policy->related_cpus);
1075 free_cpumask_var(policy->cpus);
1076 kfree(policy);
1077}
1078
1079static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1080 struct device *cpu_dev)
1081{
1082 int ret;
1083
1084 if (WARN_ON(cpu == policy->cpu))
1085 return 0;
1086
1087
1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1089 if (ret) {
1090 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1091 return ret;
1092 }
1093
1094 down_write(&policy->rwsem);
1095 policy->cpu = cpu;
1096 up_write(&policy->rwsem);
1097
1098 return 0;
1099}
1100
1101static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1102{
1103 unsigned int j, cpu = dev->id;
1104 int ret = -ENOMEM;
1105 struct cpufreq_policy *policy;
1106 unsigned long flags;
1107 bool recover_policy = cpufreq_suspended;
1108
1109 if (cpu_is_offline(cpu))
1110 return 0;
1111
1112 pr_debug("adding CPU %u\n", cpu);
1113
1114
1115
1116 policy = cpufreq_cpu_get_raw(cpu);
1117 if (unlikely(policy))
1118 return 0;
1119
1120 if (!down_read_trylock(&cpufreq_rwsem))
1121 return 0;
1122
1123
1124 read_lock_irqsave(&cpufreq_driver_lock, flags);
1125 for_each_policy(policy) {
1126 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1128 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1129 up_read(&cpufreq_rwsem);
1130 return ret;
1131 }
1132 }
1133 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1134
1135
1136
1137
1138
1139 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1140 if (!policy) {
1141 recover_policy = false;
1142 policy = cpufreq_policy_alloc();
1143 if (!policy)
1144 goto nomem_out;
1145 }
1146
1147
1148
1149
1150
1151
1152
1153 if (recover_policy && cpu != policy->cpu)
1154 WARN_ON(update_policy_cpu(policy, cpu, dev));
1155 else
1156 policy->cpu = cpu;
1157
1158 cpumask_copy(policy->cpus, cpumask_of(cpu));
1159
1160
1161
1162
1163 ret = cpufreq_driver->init(policy);
1164 if (ret) {
1165 pr_debug("initialization failed\n");
1166 goto err_set_policy_cpu;
1167 }
1168
1169 down_write(&policy->rwsem);
1170
1171
1172 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1173
1174
1175
1176
1177
1178 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1179
1180 if (!recover_policy) {
1181 policy->user_policy.min = policy->min;
1182 policy->user_policy.max = policy->max;
1183
1184
1185 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1186 &dev->kobj, "cpufreq");
1187 if (ret) {
1188 pr_err("%s: failed to init policy->kobj: %d\n",
1189 __func__, ret);
1190 goto err_init_policy_kobj;
1191 }
1192 }
1193
1194 write_lock_irqsave(&cpufreq_driver_lock, flags);
1195 for_each_cpu(j, policy->cpus)
1196 per_cpu(cpufreq_cpu_data, j) = policy;
1197 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1198
1199 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1200 policy->cur = cpufreq_driver->get(policy->cpu);
1201 if (!policy->cur) {
1202 pr_err("%s: ->get() failed\n", __func__);
1203 goto err_get_freq;
1204 }
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1226 && has_target()) {
1227
1228 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1229 if (ret == -EINVAL) {
1230
1231 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1232 __func__, policy->cpu, policy->cur);
1233 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1234 CPUFREQ_RELATION_L);
1235
1236
1237
1238
1239
1240
1241 BUG_ON(ret);
1242 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1243 __func__, policy->cpu, policy->cur);
1244 }
1245 }
1246
1247 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1248 CPUFREQ_START, policy);
1249
1250 if (!recover_policy) {
1251 ret = cpufreq_add_dev_interface(policy, dev);
1252 if (ret)
1253 goto err_out_unregister;
1254 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1255 CPUFREQ_CREATE_POLICY, policy);
1256 }
1257
1258 write_lock_irqsave(&cpufreq_driver_lock, flags);
1259 list_add(&policy->policy_list, &cpufreq_policy_list);
1260 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1261
1262 cpufreq_init_policy(policy);
1263
1264 if (!recover_policy) {
1265 policy->user_policy.policy = policy->policy;
1266 policy->user_policy.governor = policy->governor;
1267 }
1268 up_write(&policy->rwsem);
1269
1270 kobject_uevent(&policy->kobj, KOBJ_ADD);
1271
1272 up_read(&cpufreq_rwsem);
1273
1274
1275 if (cpufreq_driver->ready)
1276 cpufreq_driver->ready(policy);
1277
1278 pr_debug("initialization complete\n");
1279
1280 return 0;
1281
1282err_out_unregister:
1283err_get_freq:
1284 write_lock_irqsave(&cpufreq_driver_lock, flags);
1285 for_each_cpu(j, policy->cpus)
1286 per_cpu(cpufreq_cpu_data, j) = NULL;
1287 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1288
1289 if (!recover_policy) {
1290 kobject_put(&policy->kobj);
1291 wait_for_completion(&policy->kobj_unregister);
1292 }
1293err_init_policy_kobj:
1294 up_write(&policy->rwsem);
1295
1296 if (cpufreq_driver->exit)
1297 cpufreq_driver->exit(policy);
1298err_set_policy_cpu:
1299 if (recover_policy) {
1300
1301 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1302 cpufreq_policy_put_kobj(policy);
1303 }
1304 cpufreq_policy_free(policy);
1305
1306nomem_out:
1307 up_read(&cpufreq_rwsem);
1308
1309 return ret;
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1322{
1323 return __cpufreq_add_dev(dev, sif);
1324}
1325
1326static int __cpufreq_remove_dev_prepare(struct device *dev,
1327 struct subsys_interface *sif)
1328{
1329 unsigned int cpu = dev->id, cpus;
1330 int ret;
1331 unsigned long flags;
1332 struct cpufreq_policy *policy;
1333
1334 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1335
1336 write_lock_irqsave(&cpufreq_driver_lock, flags);
1337
1338 policy = per_cpu(cpufreq_cpu_data, cpu);
1339
1340
1341 if (cpufreq_suspended)
1342 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1343
1344 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1345
1346 if (!policy) {
1347 pr_debug("%s: No cpu_data found\n", __func__);
1348 return -EINVAL;
1349 }
1350
1351 if (has_target()) {
1352 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1353 if (ret) {
1354 pr_err("%s: Failed to stop governor\n", __func__);
1355 return ret;
1356 }
1357
1358 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1359 policy->governor->name, CPUFREQ_NAME_LEN);
1360 }
1361
1362 down_read(&policy->rwsem);
1363 cpus = cpumask_weight(policy->cpus);
1364 up_read(&policy->rwsem);
1365
1366 if (cpu != policy->cpu) {
1367 sysfs_remove_link(&dev->kobj, "cpufreq");
1368 } else if (cpus > 1) {
1369
1370 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1371 struct device *cpu_dev = get_cpu_device(new_cpu);
1372
1373 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1374 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1375 if (ret) {
1376 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1377 "cpufreq"))
1378 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1379 __func__, cpu_dev->id);
1380 return ret;
1381 }
1382
1383 if (!cpufreq_suspended)
1384 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1385 __func__, new_cpu, cpu);
1386 } else if (cpufreq_driver->stop_cpu) {
1387 cpufreq_driver->stop_cpu(policy);
1388 }
1389
1390 return 0;
1391}
1392
1393static int __cpufreq_remove_dev_finish(struct device *dev,
1394 struct subsys_interface *sif)
1395{
1396 unsigned int cpu = dev->id, cpus;
1397 int ret;
1398 unsigned long flags;
1399 struct cpufreq_policy *policy;
1400
1401 write_lock_irqsave(&cpufreq_driver_lock, flags);
1402 policy = per_cpu(cpufreq_cpu_data, cpu);
1403 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1404 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1405
1406 if (!policy) {
1407 pr_debug("%s: No cpu_data found\n", __func__);
1408 return -EINVAL;
1409 }
1410
1411 down_write(&policy->rwsem);
1412 cpus = cpumask_weight(policy->cpus);
1413
1414 if (cpus > 1)
1415 cpumask_clear_cpu(cpu, policy->cpus);
1416 up_write(&policy->rwsem);
1417
1418
1419 if (cpus == 1) {
1420 if (has_target()) {
1421 ret = __cpufreq_governor(policy,
1422 CPUFREQ_GOV_POLICY_EXIT);
1423 if (ret) {
1424 pr_err("%s: Failed to exit governor\n",
1425 __func__);
1426 return ret;
1427 }
1428 }
1429
1430 if (!cpufreq_suspended)
1431 cpufreq_policy_put_kobj(policy);
1432
1433
1434
1435
1436
1437
1438 if (cpufreq_driver->exit)
1439 cpufreq_driver->exit(policy);
1440
1441
1442 write_lock_irqsave(&cpufreq_driver_lock, flags);
1443 list_del(&policy->policy_list);
1444 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1445
1446 if (!cpufreq_suspended)
1447 cpufreq_policy_free(policy);
1448 } else if (has_target()) {
1449 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1450 if (!ret)
1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1452
1453 if (ret) {
1454 pr_err("%s: Failed to start governor\n", __func__);
1455 return ret;
1456 }
1457 }
1458
1459 return 0;
1460}
1461
1462
1463
1464
1465
1466
1467static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1468{
1469 unsigned int cpu = dev->id;
1470 int ret;
1471
1472 if (cpu_is_offline(cpu))
1473 return 0;
1474
1475 ret = __cpufreq_remove_dev_prepare(dev, sif);
1476
1477 if (!ret)
1478 ret = __cpufreq_remove_dev_finish(dev, sif);
1479
1480 return ret;
1481}
1482
1483static void handle_update(struct work_struct *work)
1484{
1485 struct cpufreq_policy *policy =
1486 container_of(work, struct cpufreq_policy, update);
1487 unsigned int cpu = policy->cpu;
1488 pr_debug("handle_update for cpu %u called\n", cpu);
1489 cpufreq_update_policy(cpu);
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1502 unsigned int new_freq)
1503{
1504 struct cpufreq_freqs freqs;
1505
1506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1507 policy->cur, new_freq);
1508
1509 freqs.old = policy->cur;
1510 freqs.new = new_freq;
1511
1512 cpufreq_freq_transition_begin(policy, &freqs);
1513 cpufreq_freq_transition_end(policy, &freqs, 0);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523unsigned int cpufreq_quick_get(unsigned int cpu)
1524{
1525 struct cpufreq_policy *policy;
1526 unsigned int ret_freq = 0;
1527
1528 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1529 return cpufreq_driver->get(cpu);
1530
1531 policy = cpufreq_cpu_get(cpu);
1532 if (policy) {
1533 ret_freq = policy->cur;
1534 cpufreq_cpu_put(policy);
1535 }
1536
1537 return ret_freq;
1538}
1539EXPORT_SYMBOL(cpufreq_quick_get);
1540
1541
1542
1543
1544
1545
1546
1547unsigned int cpufreq_quick_get_max(unsigned int cpu)
1548{
1549 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1550 unsigned int ret_freq = 0;
1551
1552 if (policy) {
1553 ret_freq = policy->max;
1554 cpufreq_cpu_put(policy);
1555 }
1556
1557 return ret_freq;
1558}
1559EXPORT_SYMBOL(cpufreq_quick_get_max);
1560
1561static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1562{
1563 unsigned int ret_freq = 0;
1564
1565 if (!cpufreq_driver->get)
1566 return ret_freq;
1567
1568 ret_freq = cpufreq_driver->get(policy->cpu);
1569
1570 if (ret_freq && policy->cur &&
1571 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1572
1573
1574 if (unlikely(ret_freq != policy->cur)) {
1575 cpufreq_out_of_sync(policy, ret_freq);
1576 schedule_work(&policy->update);
1577 }
1578 }
1579
1580 return ret_freq;
1581}
1582
1583
1584
1585
1586
1587
1588
1589unsigned int cpufreq_get(unsigned int cpu)
1590{
1591 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1592 unsigned int ret_freq = 0;
1593
1594 if (policy) {
1595 down_read(&policy->rwsem);
1596 ret_freq = __cpufreq_get(policy);
1597 up_read(&policy->rwsem);
1598
1599 cpufreq_cpu_put(policy);
1600 }
1601
1602 return ret_freq;
1603}
1604EXPORT_SYMBOL(cpufreq_get);
1605
1606static struct subsys_interface cpufreq_interface = {
1607 .name = "cpufreq",
1608 .subsys = &cpu_subsys,
1609 .add_dev = cpufreq_add_dev,
1610 .remove_dev = cpufreq_remove_dev,
1611};
1612
1613
1614
1615
1616
1617int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1618{
1619 int ret;
1620
1621 if (!policy->suspend_freq) {
1622 pr_err("%s: suspend_freq can't be zero\n", __func__);
1623 return -EINVAL;
1624 }
1625
1626 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1627 policy->suspend_freq);
1628
1629 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1630 CPUFREQ_RELATION_H);
1631 if (ret)
1632 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1633 __func__, policy->suspend_freq, ret);
1634
1635 return ret;
1636}
1637EXPORT_SYMBOL(cpufreq_generic_suspend);
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647void cpufreq_suspend(void)
1648{
1649 struct cpufreq_policy *policy;
1650
1651 if (!cpufreq_driver)
1652 return;
1653
1654 if (!has_target())
1655 goto suspend;
1656
1657 pr_debug("%s: Suspending Governors\n", __func__);
1658
1659 for_each_policy(policy) {
1660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1661 pr_err("%s: Failed to stop governor for policy: %p\n",
1662 __func__, policy);
1663 else if (cpufreq_driver->suspend
1664 && cpufreq_driver->suspend(policy))
1665 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1666 policy);
1667 }
1668
1669suspend:
1670 cpufreq_suspended = true;
1671}
1672
1673
1674
1675
1676
1677
1678
1679void cpufreq_resume(void)
1680{
1681 struct cpufreq_policy *policy;
1682
1683 if (!cpufreq_driver)
1684 return;
1685
1686 cpufreq_suspended = false;
1687
1688 if (!has_target())
1689 return;
1690
1691 pr_debug("%s: Resuming Governors\n", __func__);
1692
1693 for_each_policy(policy) {
1694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1695 pr_err("%s: Failed to resume driver: %p\n", __func__,
1696 policy);
1697 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1698 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1699 pr_err("%s: Failed to start governor for policy: %p\n",
1700 __func__, policy);
1701 }
1702
1703
1704
1705
1706
1707
1708 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1709 if (WARN_ON(!policy))
1710 return;
1711
1712 schedule_work(&policy->update);
1713}
1714
1715
1716
1717
1718
1719
1720
1721const char *cpufreq_get_current_driver(void)
1722{
1723 if (cpufreq_driver)
1724 return cpufreq_driver->name;
1725
1726 return NULL;
1727}
1728EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1729
1730
1731
1732
1733
1734
1735
1736void *cpufreq_get_driver_data(void)
1737{
1738 if (cpufreq_driver)
1739 return cpufreq_driver->driver_data;
1740
1741 return NULL;
1742}
1743EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1763{
1764 int ret;
1765
1766 if (cpufreq_disabled())
1767 return -EINVAL;
1768
1769 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1770
1771 switch (list) {
1772 case CPUFREQ_TRANSITION_NOTIFIER:
1773 ret = srcu_notifier_chain_register(
1774 &cpufreq_transition_notifier_list, nb);
1775 break;
1776 case CPUFREQ_POLICY_NOTIFIER:
1777 ret = blocking_notifier_chain_register(
1778 &cpufreq_policy_notifier_list, nb);
1779 break;
1780 default:
1781 ret = -EINVAL;
1782 }
1783
1784 return ret;
1785}
1786EXPORT_SYMBOL(cpufreq_register_notifier);
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1799{
1800 int ret;
1801
1802 if (cpufreq_disabled())
1803 return -EINVAL;
1804
1805 switch (list) {
1806 case CPUFREQ_TRANSITION_NOTIFIER:
1807 ret = srcu_notifier_chain_unregister(
1808 &cpufreq_transition_notifier_list, nb);
1809 break;
1810 case CPUFREQ_POLICY_NOTIFIER:
1811 ret = blocking_notifier_chain_unregister(
1812 &cpufreq_policy_notifier_list, nb);
1813 break;
1814 default:
1815 ret = -EINVAL;
1816 }
1817
1818 return ret;
1819}
1820EXPORT_SYMBOL(cpufreq_unregister_notifier);
1821
1822
1823
1824
1825
1826
1827
1828static int __target_intermediate(struct cpufreq_policy *policy,
1829 struct cpufreq_freqs *freqs, int index)
1830{
1831 int ret;
1832
1833 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1834
1835
1836 if (!freqs->new)
1837 return 0;
1838
1839 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1840 __func__, policy->cpu, freqs->old, freqs->new);
1841
1842 cpufreq_freq_transition_begin(policy, freqs);
1843 ret = cpufreq_driver->target_intermediate(policy, index);
1844 cpufreq_freq_transition_end(policy, freqs, ret);
1845
1846 if (ret)
1847 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1848 __func__, ret);
1849
1850 return ret;
1851}
1852
1853static int __target_index(struct cpufreq_policy *policy,
1854 struct cpufreq_frequency_table *freq_table, int index)
1855{
1856 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1857 unsigned int intermediate_freq = 0;
1858 int retval = -EINVAL;
1859 bool notify;
1860
1861 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1862 if (notify) {
1863
1864 if (cpufreq_driver->get_intermediate) {
1865 retval = __target_intermediate(policy, &freqs, index);
1866 if (retval)
1867 return retval;
1868
1869 intermediate_freq = freqs.new;
1870
1871 if (intermediate_freq)
1872 freqs.old = freqs.new;
1873 }
1874
1875 freqs.new = freq_table[index].frequency;
1876 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1877 __func__, policy->cpu, freqs.old, freqs.new);
1878
1879 cpufreq_freq_transition_begin(policy, &freqs);
1880 }
1881
1882 retval = cpufreq_driver->target_index(policy, index);
1883 if (retval)
1884 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1885 retval);
1886
1887 if (notify) {
1888 cpufreq_freq_transition_end(policy, &freqs, retval);
1889
1890
1891
1892
1893
1894
1895
1896 if (unlikely(retval && intermediate_freq)) {
1897 freqs.old = intermediate_freq;
1898 freqs.new = policy->restore_freq;
1899 cpufreq_freq_transition_begin(policy, &freqs);
1900 cpufreq_freq_transition_end(policy, &freqs, 0);
1901 }
1902 }
1903
1904 return retval;
1905}
1906
1907int __cpufreq_driver_target(struct cpufreq_policy *policy,
1908 unsigned int target_freq,
1909 unsigned int relation)
1910{
1911 unsigned int old_target_freq = target_freq;
1912 int retval = -EINVAL;
1913
1914 if (cpufreq_disabled())
1915 return -ENODEV;
1916
1917
1918 if (target_freq > policy->max)
1919 target_freq = policy->max;
1920 if (target_freq < policy->min)
1921 target_freq = policy->min;
1922
1923 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1924 policy->cpu, target_freq, relation, old_target_freq);
1925
1926
1927
1928
1929
1930
1931
1932 if (target_freq == policy->cur)
1933 return 0;
1934
1935
1936 policy->restore_freq = policy->cur;
1937
1938 if (cpufreq_driver->target)
1939 retval = cpufreq_driver->target(policy, target_freq, relation);
1940 else if (cpufreq_driver->target_index) {
1941 struct cpufreq_frequency_table *freq_table;
1942 int index;
1943
1944 freq_table = cpufreq_frequency_get_table(policy->cpu);
1945 if (unlikely(!freq_table)) {
1946 pr_err("%s: Unable to find freq_table\n", __func__);
1947 goto out;
1948 }
1949
1950 retval = cpufreq_frequency_table_target(policy, freq_table,
1951 target_freq, relation, &index);
1952 if (unlikely(retval)) {
1953 pr_err("%s: Unable to find matching freq\n", __func__);
1954 goto out;
1955 }
1956
1957 if (freq_table[index].frequency == policy->cur) {
1958 retval = 0;
1959 goto out;
1960 }
1961
1962 retval = __target_index(policy, freq_table, index);
1963 }
1964
1965out:
1966 return retval;
1967}
1968EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1969
1970int cpufreq_driver_target(struct cpufreq_policy *policy,
1971 unsigned int target_freq,
1972 unsigned int relation)
1973{
1974 int ret = -EINVAL;
1975
1976 down_write(&policy->rwsem);
1977
1978 ret = __cpufreq_driver_target(policy, target_freq, relation);
1979
1980 up_write(&policy->rwsem);
1981
1982 return ret;
1983}
1984EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1985
1986static int __cpufreq_governor(struct cpufreq_policy *policy,
1987 unsigned int event)
1988{
1989 int ret;
1990
1991
1992
1993
1994
1995#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1996 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1997#else
1998 struct cpufreq_governor *gov = NULL;
1999#endif
2000
2001
2002 if (cpufreq_suspended)
2003 return 0;
2004
2005
2006
2007
2008 if (!policy->governor)
2009 return -EINVAL;
2010
2011 if (policy->governor->max_transition_latency &&
2012 policy->cpuinfo.transition_latency >
2013 policy->governor->max_transition_latency) {
2014 if (!gov)
2015 return -EINVAL;
2016 else {
2017 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2018 policy->governor->name, gov->name);
2019 policy->governor = gov;
2020 }
2021 }
2022
2023 if (event == CPUFREQ_GOV_POLICY_INIT)
2024 if (!try_module_get(policy->governor->owner))
2025 return -EINVAL;
2026
2027 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2028 policy->cpu, event);
2029
2030 mutex_lock(&cpufreq_governor_lock);
2031 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2032 || (!policy->governor_enabled
2033 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2034 mutex_unlock(&cpufreq_governor_lock);
2035 return -EBUSY;
2036 }
2037
2038 if (event == CPUFREQ_GOV_STOP)
2039 policy->governor_enabled = false;
2040 else if (event == CPUFREQ_GOV_START)
2041 policy->governor_enabled = true;
2042
2043 mutex_unlock(&cpufreq_governor_lock);
2044
2045 ret = policy->governor->governor(policy, event);
2046
2047 if (!ret) {
2048 if (event == CPUFREQ_GOV_POLICY_INIT)
2049 policy->governor->initialized++;
2050 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2051 policy->governor->initialized--;
2052 } else {
2053
2054 mutex_lock(&cpufreq_governor_lock);
2055 if (event == CPUFREQ_GOV_STOP)
2056 policy->governor_enabled = true;
2057 else if (event == CPUFREQ_GOV_START)
2058 policy->governor_enabled = false;
2059 mutex_unlock(&cpufreq_governor_lock);
2060 }
2061
2062 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2063 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2064 module_put(policy->governor->owner);
2065
2066 return ret;
2067}
2068
2069int cpufreq_register_governor(struct cpufreq_governor *governor)
2070{
2071 int err;
2072
2073 if (!governor)
2074 return -EINVAL;
2075
2076 if (cpufreq_disabled())
2077 return -ENODEV;
2078
2079 mutex_lock(&cpufreq_governor_mutex);
2080
2081 governor->initialized = 0;
2082 err = -EBUSY;
2083 if (!find_governor(governor->name)) {
2084 err = 0;
2085 list_add(&governor->governor_list, &cpufreq_governor_list);
2086 }
2087
2088 mutex_unlock(&cpufreq_governor_mutex);
2089 return err;
2090}
2091EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2092
2093void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2094{
2095 int cpu;
2096
2097 if (!governor)
2098 return;
2099
2100 if (cpufreq_disabled())
2101 return;
2102
2103 for_each_present_cpu(cpu) {
2104 if (cpu_online(cpu))
2105 continue;
2106 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2107 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2108 }
2109
2110 mutex_lock(&cpufreq_governor_mutex);
2111 list_del(&governor->governor_list);
2112 mutex_unlock(&cpufreq_governor_mutex);
2113 return;
2114}
2115EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2130{
2131 struct cpufreq_policy *cpu_policy;
2132 if (!policy)
2133 return -EINVAL;
2134
2135 cpu_policy = cpufreq_cpu_get(cpu);
2136 if (!cpu_policy)
2137 return -EINVAL;
2138
2139 memcpy(policy, cpu_policy, sizeof(*policy));
2140
2141 cpufreq_cpu_put(cpu_policy);
2142 return 0;
2143}
2144EXPORT_SYMBOL(cpufreq_get_policy);
2145
2146
2147
2148
2149
2150static int cpufreq_set_policy(struct cpufreq_policy *policy,
2151 struct cpufreq_policy *new_policy)
2152{
2153 struct cpufreq_governor *old_gov;
2154 int ret;
2155
2156 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2157 new_policy->cpu, new_policy->min, new_policy->max);
2158
2159 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2160
2161 if (new_policy->min > policy->max || new_policy->max < policy->min)
2162 return -EINVAL;
2163
2164
2165 ret = cpufreq_driver->verify(new_policy);
2166 if (ret)
2167 return ret;
2168
2169
2170 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2171 CPUFREQ_ADJUST, new_policy);
2172
2173
2174 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2175 CPUFREQ_INCOMPATIBLE, new_policy);
2176
2177
2178
2179
2180
2181 ret = cpufreq_driver->verify(new_policy);
2182 if (ret)
2183 return ret;
2184
2185
2186 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2187 CPUFREQ_NOTIFY, new_policy);
2188
2189 policy->min = new_policy->min;
2190 policy->max = new_policy->max;
2191
2192 pr_debug("new min and max freqs are %u - %u kHz\n",
2193 policy->min, policy->max);
2194
2195 if (cpufreq_driver->setpolicy) {
2196 policy->policy = new_policy->policy;
2197 pr_debug("setting range\n");
2198 return cpufreq_driver->setpolicy(new_policy);
2199 }
2200
2201 if (new_policy->governor == policy->governor)
2202 goto out;
2203
2204 pr_debug("governor switch\n");
2205
2206
2207 old_gov = policy->governor;
2208
2209 if (old_gov) {
2210 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2211 up_write(&policy->rwsem);
2212 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2213 down_write(&policy->rwsem);
2214 }
2215
2216
2217 policy->governor = new_policy->governor;
2218 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2219 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2220 goto out;
2221
2222 up_write(&policy->rwsem);
2223 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2224 down_write(&policy->rwsem);
2225 }
2226
2227
2228 pr_debug("starting governor %s failed\n", policy->governor->name);
2229 if (old_gov) {
2230 policy->governor = old_gov;
2231 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2232 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2233 }
2234
2235 return -EINVAL;
2236
2237 out:
2238 pr_debug("governor: change or update limits\n");
2239 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2240}
2241
2242
2243
2244
2245
2246
2247
2248
2249int cpufreq_update_policy(unsigned int cpu)
2250{
2251 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2252 struct cpufreq_policy new_policy;
2253 int ret;
2254
2255 if (!policy)
2256 return -ENODEV;
2257
2258 down_write(&policy->rwsem);
2259
2260 pr_debug("updating policy for CPU %u\n", cpu);
2261 memcpy(&new_policy, policy, sizeof(*policy));
2262 new_policy.min = policy->user_policy.min;
2263 new_policy.max = policy->user_policy.max;
2264 new_policy.policy = policy->user_policy.policy;
2265 new_policy.governor = policy->user_policy.governor;
2266
2267
2268
2269
2270
2271 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2272 new_policy.cur = cpufreq_driver->get(cpu);
2273 if (WARN_ON(!new_policy.cur)) {
2274 ret = -EIO;
2275 goto unlock;
2276 }
2277
2278 if (!policy->cur) {
2279 pr_debug("Driver did not initialize current freq\n");
2280 policy->cur = new_policy.cur;
2281 } else {
2282 if (policy->cur != new_policy.cur && has_target())
2283 cpufreq_out_of_sync(policy, new_policy.cur);
2284 }
2285 }
2286
2287 ret = cpufreq_set_policy(policy, &new_policy);
2288
2289unlock:
2290 up_write(&policy->rwsem);
2291
2292 cpufreq_cpu_put(policy);
2293 return ret;
2294}
2295EXPORT_SYMBOL(cpufreq_update_policy);
2296
2297static int cpufreq_cpu_callback(struct notifier_block *nfb,
2298 unsigned long action, void *hcpu)
2299{
2300 unsigned int cpu = (unsigned long)hcpu;
2301 struct device *dev;
2302
2303 dev = get_cpu_device(cpu);
2304 if (dev) {
2305 switch (action & ~CPU_TASKS_FROZEN) {
2306 case CPU_ONLINE:
2307 __cpufreq_add_dev(dev, NULL);
2308 break;
2309
2310 case CPU_DOWN_PREPARE:
2311 __cpufreq_remove_dev_prepare(dev, NULL);
2312 break;
2313
2314 case CPU_POST_DEAD:
2315 __cpufreq_remove_dev_finish(dev, NULL);
2316 break;
2317
2318 case CPU_DOWN_FAILED:
2319 __cpufreq_add_dev(dev, NULL);
2320 break;
2321 }
2322 }
2323 return NOTIFY_OK;
2324}
2325
2326static struct notifier_block __refdata cpufreq_cpu_notifier = {
2327 .notifier_call = cpufreq_cpu_callback,
2328};
2329
2330
2331
2332
2333static int cpufreq_boost_set_sw(int state)
2334{
2335 struct cpufreq_frequency_table *freq_table;
2336 struct cpufreq_policy *policy;
2337 int ret = -EINVAL;
2338
2339 for_each_policy(policy) {
2340 freq_table = cpufreq_frequency_get_table(policy->cpu);
2341 if (freq_table) {
2342 ret = cpufreq_frequency_table_cpuinfo(policy,
2343 freq_table);
2344 if (ret) {
2345 pr_err("%s: Policy frequency update failed\n",
2346 __func__);
2347 break;
2348 }
2349 policy->user_policy.max = policy->max;
2350 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2351 }
2352 }
2353
2354 return ret;
2355}
2356
2357int cpufreq_boost_trigger_state(int state)
2358{
2359 unsigned long flags;
2360 int ret = 0;
2361
2362 if (cpufreq_driver->boost_enabled == state)
2363 return 0;
2364
2365 write_lock_irqsave(&cpufreq_driver_lock, flags);
2366 cpufreq_driver->boost_enabled = state;
2367 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2368
2369 ret = cpufreq_driver->set_boost(state);
2370 if (ret) {
2371 write_lock_irqsave(&cpufreq_driver_lock, flags);
2372 cpufreq_driver->boost_enabled = !state;
2373 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2374
2375 pr_err("%s: Cannot %s BOOST\n",
2376 __func__, state ? "enable" : "disable");
2377 }
2378
2379 return ret;
2380}
2381
2382int cpufreq_boost_supported(void)
2383{
2384 if (likely(cpufreq_driver))
2385 return cpufreq_driver->boost_supported;
2386
2387 return 0;
2388}
2389EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2390
2391int cpufreq_boost_enabled(void)
2392{
2393 return cpufreq_driver->boost_enabled;
2394}
2395EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2412{
2413 unsigned long flags;
2414 int ret;
2415
2416 if (cpufreq_disabled())
2417 return -ENODEV;
2418
2419 if (!driver_data || !driver_data->verify || !driver_data->init ||
2420 !(driver_data->setpolicy || driver_data->target_index ||
2421 driver_data->target) ||
2422 (driver_data->setpolicy && (driver_data->target_index ||
2423 driver_data->target)) ||
2424 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2425 return -EINVAL;
2426
2427 pr_debug("trying to register driver %s\n", driver_data->name);
2428
2429 write_lock_irqsave(&cpufreq_driver_lock, flags);
2430 if (cpufreq_driver) {
2431 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2432 return -EEXIST;
2433 }
2434 cpufreq_driver = driver_data;
2435 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2436
2437 if (driver_data->setpolicy)
2438 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2439
2440 if (cpufreq_boost_supported()) {
2441
2442
2443
2444
2445 if (!cpufreq_driver->set_boost)
2446 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2447
2448 ret = cpufreq_sysfs_create_file(&boost.attr);
2449 if (ret) {
2450 pr_err("%s: cannot register global BOOST sysfs file\n",
2451 __func__);
2452 goto err_null_driver;
2453 }
2454 }
2455
2456 ret = subsys_interface_register(&cpufreq_interface);
2457 if (ret)
2458 goto err_boost_unreg;
2459
2460 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2461 list_empty(&cpufreq_policy_list)) {
2462
2463 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2464 driver_data->name);
2465 goto err_if_unreg;
2466 }
2467
2468 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2469 pr_debug("driver %s up and running\n", driver_data->name);
2470
2471 return 0;
2472err_if_unreg:
2473 subsys_interface_unregister(&cpufreq_interface);
2474err_boost_unreg:
2475 if (cpufreq_boost_supported())
2476 cpufreq_sysfs_remove_file(&boost.attr);
2477err_null_driver:
2478 write_lock_irqsave(&cpufreq_driver_lock, flags);
2479 cpufreq_driver = NULL;
2480 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2481 return ret;
2482}
2483EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2494{
2495 unsigned long flags;
2496
2497 if (!cpufreq_driver || (driver != cpufreq_driver))
2498 return -EINVAL;
2499
2500 pr_debug("unregistering driver %s\n", driver->name);
2501
2502 subsys_interface_unregister(&cpufreq_interface);
2503 if (cpufreq_boost_supported())
2504 cpufreq_sysfs_remove_file(&boost.attr);
2505
2506 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2507
2508 down_write(&cpufreq_rwsem);
2509 write_lock_irqsave(&cpufreq_driver_lock, flags);
2510
2511 cpufreq_driver = NULL;
2512
2513 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2514 up_write(&cpufreq_rwsem);
2515
2516 return 0;
2517}
2518EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2519
2520
2521
2522
2523
2524static struct syscore_ops cpufreq_syscore_ops = {
2525 .shutdown = cpufreq_suspend,
2526};
2527
2528static int __init cpufreq_core_init(void)
2529{
2530 if (cpufreq_disabled())
2531 return -ENODEV;
2532
2533 cpufreq_global_kobject = kobject_create();
2534 BUG_ON(!cpufreq_global_kobject);
2535
2536 register_syscore_ops(&cpufreq_syscore_ops);
2537
2538 return 0;
2539}
2540core_initcall(cpufreq_core_init);
2541