1
2
3
4
5
6
7
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpufreq.h>
12#include <linux/device.h>
13#include <linux/of.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/sched/topology.h>
17#include <linux/cpuset.h>
18
19DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
20
21void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
22 unsigned long max_freq)
23{
24 unsigned long scale;
25 int i;
26
27 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
28
29 for_each_cpu(i, cpus)
30 per_cpu(freq_scale, i) = scale;
31}
32
33DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
34
35void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
36{
37 per_cpu(cpu_scale, cpu) = capacity;
38}
39
40static ssize_t cpu_capacity_show(struct device *dev,
41 struct device_attribute *attr,
42 char *buf)
43{
44 struct cpu *cpu = container_of(dev, struct cpu, dev);
45
46 return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
47}
48
49static void update_topology_flags_workfn(struct work_struct *work);
50static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
51
52static DEVICE_ATTR_RO(cpu_capacity);
53
54static int register_cpu_capacity_sysctl(void)
55{
56 int i;
57 struct device *cpu;
58
59 for_each_possible_cpu(i) {
60 cpu = get_cpu_device(i);
61 if (!cpu) {
62 pr_err("%s: too early to get CPU%d device!\n",
63 __func__, i);
64 continue;
65 }
66 device_create_file(cpu, &dev_attr_cpu_capacity);
67 }
68
69 return 0;
70}
71subsys_initcall(register_cpu_capacity_sysctl);
72
73static int update_topology;
74
75int topology_update_cpu_topology(void)
76{
77 return update_topology;
78}
79
80
81
82
83
84static void update_topology_flags_workfn(struct work_struct *work)
85{
86 update_topology = 1;
87 rebuild_sched_domains();
88 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
89 update_topology = 0;
90}
91
92static u32 capacity_scale;
93static u32 *raw_capacity;
94
95static int free_raw_capacity(void)
96{
97 kfree(raw_capacity);
98 raw_capacity = NULL;
99
100 return 0;
101}
102
103void topology_normalize_cpu_scale(void)
104{
105 u64 capacity;
106 int cpu;
107
108 if (!raw_capacity)
109 return;
110
111 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
112 for_each_possible_cpu(cpu) {
113 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
114 cpu, raw_capacity[cpu]);
115 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
116 / capacity_scale;
117 topology_set_cpu_scale(cpu, capacity);
118 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
119 cpu, topology_get_cpu_scale(cpu));
120 }
121}
122
123bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
124{
125 static bool cap_parsing_failed;
126 int ret;
127 u32 cpu_capacity;
128
129 if (cap_parsing_failed)
130 return false;
131
132 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
133 &cpu_capacity);
134 if (!ret) {
135 if (!raw_capacity) {
136 raw_capacity = kcalloc(num_possible_cpus(),
137 sizeof(*raw_capacity),
138 GFP_KERNEL);
139 if (!raw_capacity) {
140 cap_parsing_failed = true;
141 return false;
142 }
143 }
144 capacity_scale = max(cpu_capacity, capacity_scale);
145 raw_capacity[cpu] = cpu_capacity;
146 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
147 cpu_node, raw_capacity[cpu]);
148 } else {
149 if (raw_capacity) {
150 pr_err("cpu_capacity: missing %pOF raw capacity\n",
151 cpu_node);
152 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
153 }
154 cap_parsing_failed = true;
155 free_raw_capacity();
156 }
157
158 return !ret;
159}
160
161#ifdef CONFIG_CPU_FREQ
162static cpumask_var_t cpus_to_visit;
163static void parsing_done_workfn(struct work_struct *work);
164static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
165
166static int
167init_cpu_capacity_callback(struct notifier_block *nb,
168 unsigned long val,
169 void *data)
170{
171 struct cpufreq_policy *policy = data;
172 int cpu;
173
174 if (!raw_capacity)
175 return 0;
176
177 if (val != CPUFREQ_NOTIFY)
178 return 0;
179
180 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
181 cpumask_pr_args(policy->related_cpus),
182 cpumask_pr_args(cpus_to_visit));
183
184 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
185
186 for_each_cpu(cpu, policy->related_cpus) {
187 raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
188 policy->cpuinfo.max_freq / 1000UL;
189 capacity_scale = max(raw_capacity[cpu], capacity_scale);
190 }
191
192 if (cpumask_empty(cpus_to_visit)) {
193 topology_normalize_cpu_scale();
194 schedule_work(&update_topology_flags_work);
195 free_raw_capacity();
196 pr_debug("cpu_capacity: parsing done\n");
197 schedule_work(&parsing_done_work);
198 }
199
200 return 0;
201}
202
203static struct notifier_block init_cpu_capacity_notifier = {
204 .notifier_call = init_cpu_capacity_callback,
205};
206
207static int __init register_cpufreq_notifier(void)
208{
209 int ret;
210
211
212
213
214
215
216 if (!acpi_disabled || !raw_capacity)
217 return -EINVAL;
218
219 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
220 return -ENOMEM;
221
222 cpumask_copy(cpus_to_visit, cpu_possible_mask);
223
224 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
225 CPUFREQ_POLICY_NOTIFIER);
226
227 if (ret)
228 free_cpumask_var(cpus_to_visit);
229
230 return ret;
231}
232core_initcall(register_cpufreq_notifier);
233
234static void parsing_done_workfn(struct work_struct *work)
235{
236 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
237 CPUFREQ_POLICY_NOTIFIER);
238 free_cpumask_var(cpus_to_visit);
239}
240
241#else
242core_initcall(free_raw_capacity);
243#endif
244