1
2
3
4
5
6
7
8#ifndef _LINUX_CPUFREQ_H
9#define _LINUX_CPUFREQ_H
10
11#include <linux/clk.h>
12#include <linux/cpumask.h>
13#include <linux/completion.h>
14#include <linux/kobject.h>
15#include <linux/notifier.h>
16#include <linux/pm_qos.h>
17#include <linux/spinlock.h>
18#include <linux/sysfs.h>
19
20
21
22
23
24
25
26
27
28
29
30#define CPUFREQ_ETERNAL (-1)
31#define CPUFREQ_NAME_LEN 16
32
33#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
34
35struct cpufreq_governor;
36
37enum cpufreq_table_sorting {
38 CPUFREQ_TABLE_UNSORTED,
39 CPUFREQ_TABLE_SORTED_ASCENDING,
40 CPUFREQ_TABLE_SORTED_DESCENDING
41};
42
43struct cpufreq_cpuinfo {
44 unsigned int max_freq;
45 unsigned int min_freq;
46
47
48 unsigned int transition_latency;
49};
50
51struct cpufreq_policy {
52
53 cpumask_var_t cpus;
54 cpumask_var_t related_cpus;
55 cpumask_var_t real_cpus;
56
57 unsigned int shared_type;
58
59 unsigned int cpu;
60
61 struct clk *clk;
62 struct cpufreq_cpuinfo cpuinfo;
63
64 unsigned int min;
65 unsigned int max;
66 unsigned int cur;
67
68 unsigned int suspend_freq;
69
70 unsigned int policy;
71 unsigned int last_policy;
72 struct cpufreq_governor *governor;
73 void *governor_data;
74 char last_governor[CPUFREQ_NAME_LEN];
75
76 struct work_struct update;
77
78
79 struct freq_constraints constraints;
80 struct freq_qos_request *min_freq_req;
81 struct freq_qos_request *max_freq_req;
82
83 struct cpufreq_frequency_table *freq_table;
84 enum cpufreq_table_sorting freq_table_sorted;
85
86 struct list_head policy_list;
87 struct kobject kobj;
88 struct completion kobj_unregister;
89
90
91
92
93
94
95
96
97
98 struct rw_semaphore rwsem;
99
100
101
102
103
104
105
106
107
108 bool fast_switch_possible;
109 bool fast_switch_enabled;
110
111
112
113
114
115 bool strict_target;
116
117
118
119
120
121
122 unsigned int transition_delay_us;
123
124
125
126
127
128
129
130
131 bool dvfs_possible_from_any_cpu;
132
133
134 unsigned int cached_target_freq;
135 unsigned int cached_resolved_idx;
136
137
138 bool transition_ongoing;
139 spinlock_t transition_lock;
140 wait_queue_head_t transition_wait;
141 struct task_struct *transition_task;
142
143
144 struct cpufreq_stats *stats;
145
146
147 void *driver_data;
148
149
150 struct thermal_cooling_device *cdev;
151
152 struct notifier_block nb_min;
153 struct notifier_block nb_max;
154};
155
156
157
158
159
160
161
162struct cpufreq_policy_data {
163 struct cpufreq_cpuinfo cpuinfo;
164 struct cpufreq_frequency_table *freq_table;
165 unsigned int cpu;
166 unsigned int min;
167 unsigned int max;
168};
169
170struct cpufreq_freqs {
171 struct cpufreq_policy *policy;
172 unsigned int old;
173 unsigned int new;
174 u8 flags;
175};
176
177
178#define CPUFREQ_SHARED_TYPE_NONE (0)
179#define CPUFREQ_SHARED_TYPE_HW (1)
180#define CPUFREQ_SHARED_TYPE_ALL (2)
181#define CPUFREQ_SHARED_TYPE_ANY (3)
182
183#ifdef CONFIG_CPU_FREQ
184struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
185struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
186void cpufreq_cpu_put(struct cpufreq_policy *policy);
187#else
188static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
189{
190 return NULL;
191}
192static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
193{
194 return NULL;
195}
196static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
197#endif
198
199static inline bool policy_is_inactive(struct cpufreq_policy *policy)
200{
201 return cpumask_empty(policy->cpus);
202}
203
204static inline bool policy_is_shared(struct cpufreq_policy *policy)
205{
206 return cpumask_weight(policy->cpus) > 1;
207}
208
209#ifdef CONFIG_CPU_FREQ
210unsigned int cpufreq_get(unsigned int cpu);
211unsigned int cpufreq_quick_get(unsigned int cpu);
212unsigned int cpufreq_quick_get_max(unsigned int cpu);
213unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
214void disable_cpufreq(void);
215
216u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
217
218struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
219void cpufreq_cpu_release(struct cpufreq_policy *policy);
220int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
221void refresh_frequency_limits(struct cpufreq_policy *policy);
222void cpufreq_update_policy(unsigned int cpu);
223void cpufreq_update_limits(unsigned int cpu);
224bool have_governor_per_policy(void);
225bool cpufreq_supports_freq_invariance(void);
226struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
227void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
228void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
229#else
230static inline unsigned int cpufreq_get(unsigned int cpu)
231{
232 return 0;
233}
234static inline unsigned int cpufreq_quick_get(unsigned int cpu)
235{
236 return 0;
237}
238static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
239{
240 return 0;
241}
242static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
243{
244 return 0;
245}
246static inline bool cpufreq_supports_freq_invariance(void)
247{
248 return false;
249}
250static inline void disable_cpufreq(void) { }
251#endif
252
253#ifdef CONFIG_CPU_FREQ_STAT
254void cpufreq_stats_create_table(struct cpufreq_policy *policy);
255void cpufreq_stats_free_table(struct cpufreq_policy *policy);
256void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
257 unsigned int new_freq);
258#else
259static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
260static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
261static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
262 unsigned int new_freq) { }
263#endif
264
265
266
267
268
269#define CPUFREQ_RELATION_L 0
270#define CPUFREQ_RELATION_H 1
271#define CPUFREQ_RELATION_C 2
272
273struct freq_attr {
274 struct attribute attr;
275 ssize_t (*show)(struct cpufreq_policy *, char *);
276 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
277};
278
279#define cpufreq_freq_attr_ro(_name) \
280static struct freq_attr _name = \
281__ATTR(_name, 0444, show_##_name, NULL)
282
283#define cpufreq_freq_attr_ro_perm(_name, _perm) \
284static struct freq_attr _name = \
285__ATTR(_name, _perm, show_##_name, NULL)
286
287#define cpufreq_freq_attr_rw(_name) \
288static struct freq_attr _name = \
289__ATTR(_name, 0644, show_##_name, store_##_name)
290
291#define cpufreq_freq_attr_wo(_name) \
292static struct freq_attr _name = \
293__ATTR(_name, 0200, NULL, store_##_name)
294
295#define define_one_global_ro(_name) \
296static struct kobj_attribute _name = \
297__ATTR(_name, 0444, show_##_name, NULL)
298
299#define define_one_global_rw(_name) \
300static struct kobj_attribute _name = \
301__ATTR(_name, 0644, show_##_name, store_##_name)
302
303
304struct cpufreq_driver {
305 char name[CPUFREQ_NAME_LEN];
306 u16 flags;
307 void *driver_data;
308
309
310 int (*init)(struct cpufreq_policy *policy);
311 int (*verify)(struct cpufreq_policy_data *policy);
312
313
314 int (*setpolicy)(struct cpufreq_policy *policy);
315
316 int (*target)(struct cpufreq_policy *policy,
317 unsigned int target_freq,
318 unsigned int relation);
319 int (*target_index)(struct cpufreq_policy *policy,
320 unsigned int index);
321 unsigned int (*fast_switch)(struct cpufreq_policy *policy,
322 unsigned int target_freq);
323
324
325
326
327
328 void (*adjust_perf)(unsigned int cpu,
329 unsigned long min_perf,
330 unsigned long target_perf,
331 unsigned long capacity);
332
333
334
335
336
337
338
339 unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
340 unsigned int target_freq);
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
358 unsigned int index);
359 int (*target_intermediate)(struct cpufreq_policy *policy,
360 unsigned int index);
361
362
363 unsigned int (*get)(unsigned int cpu);
364
365
366 void (*update_limits)(unsigned int cpu);
367
368
369 int (*bios_limit)(int cpu, unsigned int *limit);
370
371 int (*online)(struct cpufreq_policy *policy);
372 int (*offline)(struct cpufreq_policy *policy);
373 int (*exit)(struct cpufreq_policy *policy);
374 void (*stop_cpu)(struct cpufreq_policy *policy);
375 int (*suspend)(struct cpufreq_policy *policy);
376 int (*resume)(struct cpufreq_policy *policy);
377
378
379 void (*ready)(struct cpufreq_policy *policy);
380
381 struct freq_attr **attr;
382
383
384 bool boost_enabled;
385 int (*set_boost)(struct cpufreq_policy *policy, int state);
386};
387
388
389
390
391#define CPUFREQ_STICKY BIT(0)
392
393
394#define CPUFREQ_CONST_LOOPS BIT(1)
395
396
397#define CPUFREQ_PM_NO_WARN BIT(2)
398
399
400
401
402
403
404
405#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
406
407
408
409
410
411
412#define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
413
414
415
416
417
418
419
420
421#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
422
423
424
425
426
427#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
428
429
430
431
432
433#define CPUFREQ_IS_COOLING_DEV BIT(7)
434
435
436
437
438
439
440
441#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8)
442
443int cpufreq_register_driver(struct cpufreq_driver *driver_data);
444int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
445
446bool cpufreq_driver_test_flags(u16 flags);
447const char *cpufreq_get_current_driver(void);
448void *cpufreq_get_driver_data(void);
449
450static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
451{
452 return IS_ENABLED(CONFIG_CPU_THERMAL) &&
453 (drv->flags & CPUFREQ_IS_COOLING_DEV);
454}
455
456static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
457 unsigned int min,
458 unsigned int max)
459{
460 if (policy->min < min)
461 policy->min = min;
462 if (policy->max < min)
463 policy->max = min;
464 if (policy->min > max)
465 policy->min = max;
466 if (policy->max > max)
467 policy->max = max;
468 if (policy->min > policy->max)
469 policy->min = policy->max;
470 return;
471}
472
473static inline void
474cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
475{
476 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
477 policy->cpuinfo.max_freq);
478}
479
480#ifdef CONFIG_CPU_FREQ
481void cpufreq_suspend(void);
482void cpufreq_resume(void);
483int cpufreq_generic_suspend(struct cpufreq_policy *policy);
484#else
485static inline void cpufreq_suspend(void) {}
486static inline void cpufreq_resume(void) {}
487#endif
488
489
490
491
492
493#define CPUFREQ_TRANSITION_NOTIFIER (0)
494#define CPUFREQ_POLICY_NOTIFIER (1)
495
496
497#define CPUFREQ_PRECHANGE (0)
498#define CPUFREQ_POSTCHANGE (1)
499
500
501#define CPUFREQ_CREATE_POLICY (0)
502#define CPUFREQ_REMOVE_POLICY (1)
503
504#ifdef CONFIG_CPU_FREQ
505int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
506int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
507
508void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
509 struct cpufreq_freqs *freqs);
510void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
511 struct cpufreq_freqs *freqs, int transition_failed);
512
513#else
514static inline int cpufreq_register_notifier(struct notifier_block *nb,
515 unsigned int list)
516{
517 return 0;
518}
519static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
520 unsigned int list)
521{
522 return 0;
523}
524#endif
525
526
527
528
529
530
531
532
533
534
535
536static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
537 u_int mult)
538{
539#if BITS_PER_LONG == 32
540 u64 result = ((u64) old) * ((u64) mult);
541 do_div(result, div);
542 return (unsigned long) result;
543
544#elif BITS_PER_LONG == 64
545 unsigned long result = old * ((u64) mult);
546 result /= div;
547 return result;
548#endif
549}
550
551
552
553
554
555#define CPUFREQ_POLICY_UNKNOWN (0)
556
557
558
559
560
561#define CPUFREQ_POLICY_POWERSAVE (1)
562#define CPUFREQ_POLICY_PERFORMANCE (2)
563
564
565
566
567
568
569
570#define LATENCY_MULTIPLIER (1000)
571
572struct cpufreq_governor {
573 char name[CPUFREQ_NAME_LEN];
574 int (*init)(struct cpufreq_policy *policy);
575 void (*exit)(struct cpufreq_policy *policy);
576 int (*start)(struct cpufreq_policy *policy);
577 void (*stop)(struct cpufreq_policy *policy);
578 void (*limits)(struct cpufreq_policy *policy);
579 ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
580 char *buf);
581 int (*store_setspeed) (struct cpufreq_policy *policy,
582 unsigned int freq);
583 struct list_head governor_list;
584 struct module *owner;
585 u8 flags;
586};
587
588
589
590
591#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
592
593
594#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
595
596
597
598unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
599 unsigned int target_freq);
600void cpufreq_driver_adjust_perf(unsigned int cpu,
601 unsigned long min_perf,
602 unsigned long target_perf,
603 unsigned long capacity);
604bool cpufreq_driver_has_adjust_perf(void);
605int cpufreq_driver_target(struct cpufreq_policy *policy,
606 unsigned int target_freq,
607 unsigned int relation);
608int __cpufreq_driver_target(struct cpufreq_policy *policy,
609 unsigned int target_freq,
610 unsigned int relation);
611unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
612 unsigned int target_freq);
613unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
614int cpufreq_register_governor(struct cpufreq_governor *governor);
615void cpufreq_unregister_governor(struct cpufreq_governor *governor);
616int cpufreq_start_governor(struct cpufreq_policy *policy);
617void cpufreq_stop_governor(struct cpufreq_policy *policy);
618
619#define cpufreq_governor_init(__governor) \
620static int __init __governor##_init(void) \
621{ \
622 return cpufreq_register_governor(&__governor); \
623} \
624core_initcall(__governor##_init)
625
626#define cpufreq_governor_exit(__governor) \
627static void __exit __governor##_exit(void) \
628{ \
629 return cpufreq_unregister_governor(&__governor); \
630} \
631module_exit(__governor##_exit)
632
633struct cpufreq_governor *cpufreq_default_governor(void);
634struct cpufreq_governor *cpufreq_fallback_governor(void);
635
636static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
637{
638 if (policy->max < policy->cur)
639 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
640 else if (policy->min > policy->cur)
641 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
642}
643
644
645struct gov_attr_set {
646 struct kobject kobj;
647 struct list_head policy_list;
648 struct mutex update_lock;
649 int usage_count;
650};
651
652
653extern const struct sysfs_ops governor_sysfs_ops;
654
655void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
656void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
657unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
658
659
660struct governor_attr {
661 struct attribute attr;
662 ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
663 ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
664 size_t count);
665};
666
667
668
669
670
671
672#define CPUFREQ_ENTRY_INVALID ~0u
673#define CPUFREQ_TABLE_END ~1u
674
675#define CPUFREQ_BOOST_FREQ (1 << 0)
676
677struct cpufreq_frequency_table {
678 unsigned int flags;
679 unsigned int driver_data;
680 unsigned int frequency;
681
682};
683
684#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
685int dev_pm_opp_init_cpufreq_table(struct device *dev,
686 struct cpufreq_frequency_table **table);
687void dev_pm_opp_free_cpufreq_table(struct device *dev,
688 struct cpufreq_frequency_table **table);
689#else
690static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
691 struct cpufreq_frequency_table
692 **table)
693{
694 return -EINVAL;
695}
696
697static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
698 struct cpufreq_frequency_table
699 **table)
700{
701}
702#endif
703
704
705
706
707
708
709
710#define cpufreq_for_each_entry(pos, table) \
711 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
712
713
714
715
716
717
718
719
720
721#define cpufreq_for_each_entry_idx(pos, table, idx) \
722 for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
723 pos++, idx++)
724
725
726
727
728
729
730
731
732#define cpufreq_for_each_valid_entry(pos, table) \
733 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
734 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
735 continue; \
736 else
737
738
739
740
741
742
743
744
745
746#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
747 cpufreq_for_each_entry_idx(pos, table, idx) \
748 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
749 continue; \
750 else
751
752
753int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
754 struct cpufreq_frequency_table *table);
755
756int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
757 struct cpufreq_frequency_table *table);
758int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
759
760int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
761 unsigned int target_freq,
762 unsigned int relation);
763int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
764 unsigned int freq);
765
766ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
767
768#ifdef CONFIG_CPU_FREQ
769int cpufreq_boost_trigger_state(int state);
770int cpufreq_boost_enabled(void);
771int cpufreq_enable_boost_support(void);
772bool policy_has_boost_freq(struct cpufreq_policy *policy);
773
774
775static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
776 unsigned int target_freq)
777{
778 struct cpufreq_frequency_table *table = policy->freq_table;
779 struct cpufreq_frequency_table *pos;
780 unsigned int freq;
781 int idx, best = -1;
782
783 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
784 freq = pos->frequency;
785
786 if (freq >= target_freq)
787 return idx;
788
789 best = idx;
790 }
791
792 return best;
793}
794
795
796static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
797 unsigned int target_freq)
798{
799 struct cpufreq_frequency_table *table = policy->freq_table;
800 struct cpufreq_frequency_table *pos;
801 unsigned int freq;
802 int idx, best = -1;
803
804 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
805 freq = pos->frequency;
806
807 if (freq == target_freq)
808 return idx;
809
810 if (freq > target_freq) {
811 best = idx;
812 continue;
813 }
814
815
816 if (best == -1)
817 return idx;
818
819 return best;
820 }
821
822 return best;
823}
824
825
826static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
827 unsigned int target_freq)
828{
829 target_freq = clamp_val(target_freq, policy->min, policy->max);
830
831 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
832 return cpufreq_table_find_index_al(policy, target_freq);
833 else
834 return cpufreq_table_find_index_dl(policy, target_freq);
835}
836
837
838static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
839 unsigned int target_freq)
840{
841 struct cpufreq_frequency_table *table = policy->freq_table;
842 struct cpufreq_frequency_table *pos;
843 unsigned int freq;
844 int idx, best = -1;
845
846 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
847 freq = pos->frequency;
848
849 if (freq == target_freq)
850 return idx;
851
852 if (freq < target_freq) {
853 best = idx;
854 continue;
855 }
856
857
858 if (best == -1)
859 return idx;
860
861 return best;
862 }
863
864 return best;
865}
866
867
868static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
869 unsigned int target_freq)
870{
871 struct cpufreq_frequency_table *table = policy->freq_table;
872 struct cpufreq_frequency_table *pos;
873 unsigned int freq;
874 int idx, best = -1;
875
876 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
877 freq = pos->frequency;
878
879 if (freq <= target_freq)
880 return idx;
881
882 best = idx;
883 }
884
885 return best;
886}
887
888
889static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
890 unsigned int target_freq)
891{
892 target_freq = clamp_val(target_freq, policy->min, policy->max);
893
894 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
895 return cpufreq_table_find_index_ah(policy, target_freq);
896 else
897 return cpufreq_table_find_index_dh(policy, target_freq);
898}
899
900
901static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
902 unsigned int target_freq)
903{
904 struct cpufreq_frequency_table *table = policy->freq_table;
905 struct cpufreq_frequency_table *pos;
906 unsigned int freq;
907 int idx, best = -1;
908
909 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
910 freq = pos->frequency;
911
912 if (freq == target_freq)
913 return idx;
914
915 if (freq < target_freq) {
916 best = idx;
917 continue;
918 }
919
920
921 if (best == -1)
922 return idx;
923
924
925 if (target_freq - table[best].frequency > freq - target_freq)
926 return idx;
927
928 return best;
929 }
930
931 return best;
932}
933
934
935static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
936 unsigned int target_freq)
937{
938 struct cpufreq_frequency_table *table = policy->freq_table;
939 struct cpufreq_frequency_table *pos;
940 unsigned int freq;
941 int idx, best = -1;
942
943 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
944 freq = pos->frequency;
945
946 if (freq == target_freq)
947 return idx;
948
949 if (freq > target_freq) {
950 best = idx;
951 continue;
952 }
953
954
955 if (best == -1)
956 return idx;
957
958
959 if (table[best].frequency - target_freq > target_freq - freq)
960 return idx;
961
962 return best;
963 }
964
965 return best;
966}
967
968
969static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
970 unsigned int target_freq)
971{
972 target_freq = clamp_val(target_freq, policy->min, policy->max);
973
974 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
975 return cpufreq_table_find_index_ac(policy, target_freq);
976 else
977 return cpufreq_table_find_index_dc(policy, target_freq);
978}
979
980static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
981 unsigned int target_freq,
982 unsigned int relation)
983{
984 if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
985 return cpufreq_table_index_unsorted(policy, target_freq,
986 relation);
987
988 switch (relation) {
989 case CPUFREQ_RELATION_L:
990 return cpufreq_table_find_index_l(policy, target_freq);
991 case CPUFREQ_RELATION_H:
992 return cpufreq_table_find_index_h(policy, target_freq);
993 case CPUFREQ_RELATION_C:
994 return cpufreq_table_find_index_c(policy, target_freq);
995 default:
996 WARN_ON_ONCE(1);
997 return 0;
998 }
999}
1000
1001static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
1002{
1003 struct cpufreq_frequency_table *pos;
1004 int count = 0;
1005
1006 if (unlikely(!policy->freq_table))
1007 return 0;
1008
1009 cpufreq_for_each_valid_entry(pos, policy->freq_table)
1010 count++;
1011
1012 return count;
1013}
1014#else
1015static inline int cpufreq_boost_trigger_state(int state)
1016{
1017 return 0;
1018}
1019static inline int cpufreq_boost_enabled(void)
1020{
1021 return 0;
1022}
1023
1024static inline int cpufreq_enable_boost_support(void)
1025{
1026 return -EINVAL;
1027}
1028
1029static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
1030{
1031 return false;
1032}
1033#endif
1034
1035#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
1036void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
1037 struct cpufreq_governor *old_gov);
1038#else
1039static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
1040 struct cpufreq_governor *old_gov) { }
1041#endif
1042
1043extern void arch_freq_prepare_all(void);
1044extern unsigned int arch_freq_get_on_cpu(int cpu);
1045
1046#ifndef arch_set_freq_scale
1047static __always_inline
1048void arch_set_freq_scale(const struct cpumask *cpus,
1049 unsigned long cur_freq,
1050 unsigned long max_freq)
1051{
1052}
1053#endif
1054
1055
1056extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
1057extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
1058extern struct freq_attr *cpufreq_generic_attr[];
1059int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
1060
1061unsigned int cpufreq_generic_get(unsigned int cpu);
1062void cpufreq_generic_init(struct cpufreq_policy *policy,
1063 struct cpufreq_frequency_table *table,
1064 unsigned int transition_latency);
1065#endif
1066