1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/acpi.h>
15#include <linux/arch_topology.h>
16#include <linux/cacheinfo.h>
17#include <linux/cpufreq.h>
18#include <linux/init.h>
19#include <linux/percpu.h>
20
21#include <asm/cpu.h>
22#include <asm/cputype.h>
23#include <asm/topology.h>
24
25void store_cpu_topology(unsigned int cpuid)
26{
27 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
28 u64 mpidr;
29
30 if (cpuid_topo->package_id != -1)
31 goto topology_populated;
32
33 mpidr = read_cpuid_mpidr();
34
35
36 if (mpidr & MPIDR_UP_BITMASK)
37 return;
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 cpuid_topo->thread_id = -1;
54 cpuid_topo->core_id = cpuid;
55 cpuid_topo->package_id = cpu_to_node(cpuid);
56
57 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
58 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
59 cpuid_topo->thread_id, mpidr);
60
61topology_populated:
62 update_siblings_masks(cpuid);
63}
64
65#ifdef CONFIG_ACPI
66static bool __init acpi_cpu_is_threaded(int cpu)
67{
68 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
69
70
71
72
73
74 if (is_threaded < 0)
75 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
76
77 return !!is_threaded;
78}
79
80
81
82
83
84int __init parse_acpi_topology(void)
85{
86 int cpu, topology_id;
87
88 if (acpi_disabled)
89 return 0;
90
91 for_each_possible_cpu(cpu) {
92 int i, cache_id;
93
94 topology_id = find_acpi_cpu_topology(cpu, 0);
95 if (topology_id < 0)
96 return topology_id;
97
98 if (acpi_cpu_is_threaded(cpu)) {
99 cpu_topology[cpu].thread_id = topology_id;
100 topology_id = find_acpi_cpu_topology(cpu, 1);
101 cpu_topology[cpu].core_id = topology_id;
102 } else {
103 cpu_topology[cpu].thread_id = -1;
104 cpu_topology[cpu].core_id = topology_id;
105 }
106 topology_id = find_acpi_cpu_topology_package(cpu);
107 cpu_topology[cpu].package_id = topology_id;
108
109 i = acpi_find_last_cache_level(cpu);
110
111 if (i > 0) {
112
113
114
115
116 cache_id = find_acpi_cpu_cache_topology(cpu, i);
117 if (cache_id > 0)
118 cpu_topology[cpu].llc_id = cache_id;
119 }
120 }
121
122 return 0;
123}
124#endif
125
126#ifdef CONFIG_ARM64_AMU_EXTN
127#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
128#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
129#else
130#define read_corecnt() (0UL)
131#define read_constcnt() (0UL)
132#endif
133
134#undef pr_fmt
135#define pr_fmt(fmt) "AMU: " fmt
136
137static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
138static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
139static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
140static cpumask_var_t amu_fie_cpus;
141
142void update_freq_counters_refs(void)
143{
144 this_cpu_write(arch_core_cycles_prev, read_corecnt());
145 this_cpu_write(arch_const_cycles_prev, read_constcnt());
146}
147
148static inline bool freq_counters_valid(int cpu)
149{
150 if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
151 return false;
152
153 if (!cpu_has_amu_feat(cpu)) {
154 pr_debug("CPU%d: counters are not supported.\n", cpu);
155 return false;
156 }
157
158 if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
159 !per_cpu(arch_core_cycles_prev, cpu))) {
160 pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
161 return false;
162 }
163
164 return true;
165}
166
167static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
168{
169 u64 ratio;
170
171 if (unlikely(!max_rate || !ref_rate)) {
172 pr_debug("CPU%d: invalid maximum or reference frequency.\n",
173 cpu);
174 return -EINVAL;
175 }
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190 ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT);
191 ratio = div64_u64(ratio, max_rate);
192 if (!ratio) {
193 WARN_ONCE(1, "Reference frequency too low.\n");
194 return -EINVAL;
195 }
196
197 per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
198
199 return 0;
200}
201
202static void amu_scale_freq_tick(void)
203{
204 u64 prev_core_cnt, prev_const_cnt;
205 u64 core_cnt, const_cnt, scale;
206
207 prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
208 prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
209
210 update_freq_counters_refs();
211
212 const_cnt = this_cpu_read(arch_const_cycles_prev);
213 core_cnt = this_cpu_read(arch_core_cycles_prev);
214
215 if (unlikely(core_cnt <= prev_core_cnt ||
216 const_cnt <= prev_const_cnt))
217 return;
218
219
220
221
222
223
224
225
226
227 scale = core_cnt - prev_core_cnt;
228 scale *= this_cpu_read(arch_max_freq_scale);
229 scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
230 const_cnt - prev_const_cnt);
231
232 scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
233 this_cpu_write(arch_freq_scale, (unsigned long)scale);
234}
235
236static struct scale_freq_data amu_sfd = {
237 .source = SCALE_FREQ_SOURCE_ARCH,
238 .set_freq_scale = amu_scale_freq_tick,
239};
240
241static void amu_fie_setup(const struct cpumask *cpus)
242{
243 int cpu;
244
245
246 if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
247 return;
248
249 for_each_cpu(cpu, cpus) {
250 if (!freq_counters_valid(cpu) ||
251 freq_inv_set_max_ratio(cpu,
252 cpufreq_get_hw_max_freq(cpu) * 1000,
253 arch_timer_get_rate()))
254 return;
255 }
256
257 cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
258
259 topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);
260
261 pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
262 cpumask_pr_args(cpus));
263}
264
265static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
266 void *data)
267{
268 struct cpufreq_policy *policy = data;
269
270 if (val == CPUFREQ_CREATE_POLICY)
271 amu_fie_setup(policy->related_cpus);
272
273
274
275
276
277
278
279
280
281
282
283 return 0;
284}
285
286static struct notifier_block init_amu_fie_notifier = {
287 .notifier_call = init_amu_fie_callback,
288};
289
290static int __init init_amu_fie(void)
291{
292 int ret;
293
294 if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
295 return -ENOMEM;
296
297 ret = cpufreq_register_notifier(&init_amu_fie_notifier,
298 CPUFREQ_POLICY_NOTIFIER);
299 if (ret)
300 free_cpumask_var(amu_fie_cpus);
301
302 return ret;
303}
304core_initcall(init_amu_fie);
305
306#ifdef CONFIG_ACPI_CPPC_LIB
307#include <acpi/cppc_acpi.h>
308
309static void cpu_read_corecnt(void *val)
310{
311 *(u64 *)val = read_corecnt();
312}
313
314static void cpu_read_constcnt(void *val)
315{
316 *(u64 *)val = read_constcnt();
317}
318
319static inline
320int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
321{
322
323
324
325
326 if (!cpu_has_amu_feat(cpu))
327 return -EOPNOTSUPP;
328
329 if (WARN_ON_ONCE(irqs_disabled()))
330 return -EPERM;
331
332 smp_call_function_single(cpu, func, val, 1);
333
334 return 0;
335}
336
337
338
339
340
341bool cpc_ffh_supported(void)
342{
343 return freq_counters_valid(get_cpu_with_amu_feat());
344}
345
346int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
347{
348 int ret = -EOPNOTSUPP;
349
350 switch ((u64)reg->address) {
351 case 0x0:
352 ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val);
353 break;
354 case 0x1:
355 ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val);
356 break;
357 }
358
359 if (!ret) {
360 *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
361 reg->bit_offset);
362 *val >>= reg->bit_offset;
363 }
364
365 return ret;
366}
367
368int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
369{
370 return -EOPNOTSUPP;
371}
372#endif
373