1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/irqflags.h>
15#include <linux/cpu_pm.h>
16
17#include <asm/mcpm.h>
18#include <asm/cacheflush.h>
19#include <asm/idmap.h>
20#include <asm/cputype.h>
21#include <asm/suspend.h>
22
23
24
25
26
27
28
29struct sync_struct mcpm_sync;
30
31
32
33
34
35
36static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
37{
38 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
39 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
40}
41
42
43
44
45
46
47
48
49static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
50{
51 dmb();
52 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
53 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
54 sev();
55}
56
57
58
59
60
61
62
63
64
65static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
66{
67 dmb();
68 mcpm_sync.clusters[cluster].cluster = state;
69 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
70 sev();
71}
72
73
74
75
76
77
78
79
80
81
82
83
84static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
85{
86 unsigned int i;
87 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
88
89
90 c->cluster = CLUSTER_GOING_DOWN;
91 sync_cache_w(&c->cluster);
92
93
94 sync_cache_r(&c->inbound);
95 if (c->inbound == INBOUND_COMING_UP)
96 goto abort;
97
98
99
100
101
102
103
104
105 sync_cache_r(&c->cpus);
106 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
107 int cpustate;
108
109 if (i == cpu)
110 continue;
111
112 while (1) {
113 cpustate = c->cpus[i].cpu;
114 if (cpustate != CPU_GOING_DOWN)
115 break;
116
117 wfe();
118 sync_cache_r(&c->cpus[i].cpu);
119 }
120
121 switch (cpustate) {
122 case CPU_DOWN:
123 continue;
124
125 default:
126 goto abort;
127 }
128 }
129
130 return true;
131
132abort:
133 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
134 return false;
135}
136
137static int __mcpm_cluster_state(unsigned int cluster)
138{
139 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
140 return mcpm_sync.clusters[cluster].cluster;
141}
142
143extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
144
145void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
146{
147 unsigned long val = ptr ? virt_to_phys(ptr) : 0;
148 mcpm_entry_vectors[cluster][cpu] = val;
149 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
150}
151
152extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
153
154void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
155 unsigned long poke_phys_addr, unsigned long poke_val)
156{
157 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
158 poke[0] = poke_phys_addr;
159 poke[1] = poke_val;
160 __sync_cache_range_w(poke, 2 * sizeof(*poke));
161}
162
163static const struct mcpm_platform_ops *platform_ops;
164
165int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
166{
167 if (platform_ops)
168 return -EBUSY;
169 platform_ops = ops;
170 return 0;
171}
172
173bool mcpm_is_available(void)
174{
175 return (platform_ops) ? true : false;
176}
177
178
179
180
181
182
183
184static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
185
186static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
187
188static inline bool mcpm_cluster_unused(unsigned int cluster)
189{
190 int i, cnt;
191 for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
192 cnt |= mcpm_cpu_use_count[cluster][i];
193 return !cnt;
194}
195
196int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
197{
198 bool cpu_is_down, cluster_is_down;
199 int ret = 0;
200
201 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
202 if (!platform_ops)
203 return -EUNATCH;
204 might_sleep();
205
206
207
208
209
210 local_irq_disable();
211 arch_spin_lock(&mcpm_lock);
212
213 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
214 cluster_is_down = mcpm_cluster_unused(cluster);
215
216 mcpm_cpu_use_count[cluster][cpu]++;
217
218
219
220
221
222
223
224
225 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
226 mcpm_cpu_use_count[cluster][cpu] != 2);
227
228 if (cluster_is_down)
229 ret = platform_ops->cluster_powerup(cluster);
230 if (cpu_is_down && !ret)
231 ret = platform_ops->cpu_powerup(cpu, cluster);
232
233 arch_spin_unlock(&mcpm_lock);
234 local_irq_enable();
235 return ret;
236}
237
238typedef void (*phys_reset_t)(unsigned long);
239
240void mcpm_cpu_power_down(void)
241{
242 unsigned int mpidr, cpu, cluster;
243 bool cpu_going_down, last_man;
244 phys_reset_t phys_reset;
245
246 mpidr = read_cpuid_mpidr();
247 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
248 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
249 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
250 if (WARN_ON_ONCE(!platform_ops))
251 return;
252 BUG_ON(!irqs_disabled());
253
254 setup_mm_for_reboot();
255
256 __mcpm_cpu_going_down(cpu, cluster);
257 arch_spin_lock(&mcpm_lock);
258 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
259
260 mcpm_cpu_use_count[cluster][cpu]--;
261 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
262 mcpm_cpu_use_count[cluster][cpu] != 1);
263 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
264 last_man = mcpm_cluster_unused(cluster);
265
266 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
267 platform_ops->cpu_powerdown_prepare(cpu, cluster);
268 platform_ops->cluster_powerdown_prepare(cluster);
269 arch_spin_unlock(&mcpm_lock);
270 platform_ops->cluster_cache_disable();
271 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
272 } else {
273 if (cpu_going_down)
274 platform_ops->cpu_powerdown_prepare(cpu, cluster);
275 arch_spin_unlock(&mcpm_lock);
276
277
278
279
280
281
282
283
284 platform_ops->cpu_cache_disable();
285 }
286
287 __mcpm_cpu_down(cpu, cluster);
288
289
290 if (cpu_going_down)
291 wfi();
292
293
294
295
296
297
298
299
300
301
302 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
303 phys_reset(virt_to_phys(mcpm_entry_point));
304
305
306 BUG();
307}
308
309int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
310{
311 int ret;
312
313 if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
314 return -EUNATCH;
315
316 ret = platform_ops->wait_for_powerdown(cpu, cluster);
317 if (ret)
318 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
319 __func__, cpu, cluster, ret);
320
321 return ret;
322}
323
324void mcpm_cpu_suspend(void)
325{
326 if (WARN_ON_ONCE(!platform_ops))
327 return;
328
329
330 if (platform_ops->cpu_suspend_prepare) {
331 unsigned int mpidr = read_cpuid_mpidr();
332 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
333 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
334 arch_spin_lock(&mcpm_lock);
335 platform_ops->cpu_suspend_prepare(cpu, cluster);
336 arch_spin_unlock(&mcpm_lock);
337 }
338 mcpm_cpu_power_down();
339}
340
341int mcpm_cpu_powered_up(void)
342{
343 unsigned int mpidr, cpu, cluster;
344 bool cpu_was_down, first_man;
345 unsigned long flags;
346
347 if (!platform_ops)
348 return -EUNATCH;
349
350 mpidr = read_cpuid_mpidr();
351 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
352 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
353 local_irq_save(flags);
354 arch_spin_lock(&mcpm_lock);
355
356 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
357 first_man = mcpm_cluster_unused(cluster);
358
359 if (first_man && platform_ops->cluster_is_up)
360 platform_ops->cluster_is_up(cluster);
361 if (cpu_was_down)
362 mcpm_cpu_use_count[cluster][cpu] = 1;
363 if (platform_ops->cpu_is_up)
364 platform_ops->cpu_is_up(cpu, cluster);
365
366 arch_spin_unlock(&mcpm_lock);
367 local_irq_restore(flags);
368
369 return 0;
370}
371
372#ifdef CONFIG_ARM_CPU_SUSPEND
373
374static int __init nocache_trampoline(unsigned long _arg)
375{
376 void (*cache_disable)(void) = (void *)_arg;
377 unsigned int mpidr = read_cpuid_mpidr();
378 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
379 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
380 phys_reset_t phys_reset;
381
382 mcpm_set_entry_vector(cpu, cluster, cpu_resume);
383 setup_mm_for_reboot();
384
385 __mcpm_cpu_going_down(cpu, cluster);
386 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
387 cache_disable();
388 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
389 __mcpm_cpu_down(cpu, cluster);
390
391 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
392 phys_reset(virt_to_phys(mcpm_entry_point));
393 BUG();
394}
395
396int __init mcpm_loopback(void (*cache_disable)(void))
397{
398 int ret;
399
400
401
402
403
404
405
406 local_irq_disable();
407 local_fiq_disable();
408 ret = cpu_pm_enter();
409 if (!ret) {
410 ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
411 cpu_pm_exit();
412 }
413 local_fiq_enable();
414 local_irq_enable();
415 if (ret)
416 pr_err("%s returned %d\n", __func__, ret);
417 return ret;
418}
419
420#endif
421
422extern unsigned long mcpm_power_up_setup_phys;
423
424int __init mcpm_sync_init(
425 void (*power_up_setup)(unsigned int affinity_level))
426{
427 unsigned int i, j, mpidr, this_cluster;
428
429 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
430 BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
431
432
433
434
435
436 for (i = 0; i < MAX_NR_CLUSTERS; i++) {
437 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
438 mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
439 for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
440 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
441 }
442 mpidr = read_cpuid_mpidr();
443 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
444 for_each_online_cpu(i) {
445 mcpm_cpu_use_count[this_cluster][i] = 1;
446 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
447 }
448 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
449 sync_cache_w(&mcpm_sync);
450
451 if (power_up_setup) {
452 mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
453 sync_cache_w(&mcpm_power_up_setup_phys);
454 }
455
456 return 0;
457}
458