1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/atomic.h>
17#include <linux/completion.h>
18#include <linux/cpu.h>
19#include <linux/cpuidle.h>
20#include <linux/cpu_pm.h>
21#include <linux/kernel.h>
22#include <linux/kthread.h>
23#include <uapi/linux/sched/types.h>
24#include <linux/module.h>
25#include <linux/preempt.h>
26#include <linux/psci.h>
27#include <linux/slab.h>
28#include <linux/tick.h>
29#include <linux/topology.h>
30
31#include <asm/cpuidle.h>
32
33#include <uapi/linux/psci.h>
34
35#define NUM_SUSPEND_CYCLE (10)
36
37static unsigned int nb_available_cpus;
38static int tos_resident_cpu = -1;
39
40static atomic_t nb_active_threads;
41static struct completion suspend_threads_started =
42 COMPLETION_INITIALIZER(suspend_threads_started);
43static struct completion suspend_threads_done =
44 COMPLETION_INITIALIZER(suspend_threads_done);
45
46
47
48
49
50
51
52static int psci_ops_check(void)
53{
54 int migrate_type = -1;
55 int cpu;
56
57 if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
58 pr_warn("Missing PSCI operations, aborting tests\n");
59 return -EOPNOTSUPP;
60 }
61
62 if (psci_ops.migrate_info_type)
63 migrate_type = psci_ops.migrate_info_type();
64
65 if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
66 migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
67
68 for_each_online_cpu(cpu)
69 if (psci_tos_resident_on(cpu)) {
70 tos_resident_cpu = cpu;
71 break;
72 }
73 if (tos_resident_cpu == -1)
74 pr_warn("UP Trusted OS resides on no online CPU\n");
75 }
76
77 return 0;
78}
79
80static int find_cpu_groups(const struct cpumask *cpus,
81 const struct cpumask **cpu_groups)
82{
83 unsigned int nb = 0;
84 cpumask_var_t tmp;
85
86 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
87 return -ENOMEM;
88 cpumask_copy(tmp, cpus);
89
90 while (!cpumask_empty(tmp)) {
91 const struct cpumask *cpu_group =
92 topology_core_cpumask(cpumask_any(tmp));
93
94 cpu_groups[nb++] = cpu_group;
95 cpumask_andnot(tmp, tmp, cpu_group);
96 }
97
98 free_cpumask_var(tmp);
99 return nb;
100}
101
102
103
104
105
106static unsigned int down_and_up_cpus(const struct cpumask *cpus,
107 struct cpumask *offlined_cpus)
108{
109 int cpu;
110 int err = 0;
111
112 cpumask_clear(offlined_cpus);
113
114
115 for_each_cpu(cpu, cpus) {
116 int ret = cpu_down(cpu);
117
118
119
120
121
122 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
123 if (ret != -EBUSY) {
124 pr_err("Unexpected return code %d while trying "
125 "to power down last online CPU %d\n",
126 ret, cpu);
127 ++err;
128 }
129 } else if (cpu == tos_resident_cpu) {
130 if (ret != -EPERM) {
131 pr_err("Unexpected return code %d while trying "
132 "to power down TOS resident CPU %d\n",
133 ret, cpu);
134 ++err;
135 }
136 } else if (ret != 0) {
137 pr_err("Error occurred (%d) while trying "
138 "to power down CPU %d\n", ret, cpu);
139 ++err;
140 }
141
142 if (ret == 0)
143 cpumask_set_cpu(cpu, offlined_cpus);
144 }
145
146
147 for_each_cpu(cpu, offlined_cpus) {
148 int ret = cpu_up(cpu);
149
150 if (ret != 0) {
151 pr_err("Error occurred (%d) while trying "
152 "to power up CPU %d\n", ret, cpu);
153 ++err;
154 } else {
155 cpumask_clear_cpu(cpu, offlined_cpus);
156 }
157 }
158
159
160
161
162
163 WARN_ON(!cpumask_empty(offlined_cpus) ||
164 num_online_cpus() != nb_available_cpus);
165
166 return err;
167}
168
169static int hotplug_tests(void)
170{
171 int err;
172 cpumask_var_t offlined_cpus;
173 int i, nb_cpu_group;
174 const struct cpumask **cpu_groups;
175 char *page_buf;
176
177 err = -ENOMEM;
178 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
179 return err;
180
181 cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
182 GFP_KERNEL);
183 if (!cpu_groups)
184 goto out_free_cpus;
185 page_buf = (char *)__get_free_page(GFP_KERNEL);
186 if (!page_buf)
187 goto out_free_cpu_groups;
188
189 err = 0;
190 nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
191
192
193
194
195
196 pr_info("Trying to turn off and on again all CPUs\n");
197 err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
198
199
200
201
202
203 for (i = 0; i < nb_cpu_group; ++i) {
204 ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
205 cpu_groups[i]);
206
207 page_buf[len - 1] = '\0';
208 pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
209 i, page_buf);
210 err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
211 }
212
213 free_page((unsigned long)page_buf);
214out_free_cpu_groups:
215 kfree(cpu_groups);
216out_free_cpus:
217 free_cpumask_var(offlined_cpus);
218 return err;
219}
220
221static void dummy_callback(struct timer_list *unused) {}
222
223static int suspend_cpu(int index, bool broadcast)
224{
225 int ret;
226
227 arch_cpu_idle_enter();
228
229 if (broadcast) {
230
231
232
233
234 ret = tick_broadcast_enter();
235 if (ret) {
236
237
238
239
240
241
242
243 cpu_do_idle();
244 ret = 0;
245 goto out_arch_exit;
246 }
247 }
248
249
250
251
252
253 ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
254
255 if (broadcast)
256 tick_broadcast_exit();
257
258out_arch_exit:
259 arch_cpu_idle_exit();
260
261 return ret;
262}
263
264static int suspend_test_thread(void *arg)
265{
266 int cpu = (long)arg;
267 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
268 struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
269 struct cpuidle_device *dev;
270 struct cpuidle_driver *drv;
271
272 struct timer_list wakeup_timer;
273
274
275 wait_for_completion(&suspend_threads_started);
276
277
278 if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
279 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
280 cpu);
281
282 dev = this_cpu_read(cpuidle_devices);
283 drv = cpuidle_get_cpu_driver(dev);
284
285 pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
286 cpu, drv->state_count - 1);
287
288 timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
289 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
290 int index;
291
292
293
294
295 for (index = 1; index < drv->state_count; ++index) {
296 struct cpuidle_state *state = &drv->states[index];
297 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
298 int ret;
299
300
301
302
303
304
305
306
307 mod_timer(&wakeup_timer, jiffies +
308 usecs_to_jiffies(state->target_residency));
309
310
311 local_irq_disable();
312
313 ret = suspend_cpu(index, broadcast);
314
315
316
317
318
319
320 local_irq_enable();
321
322 if (ret == index) {
323 ++nb_suspend;
324 } else if (ret >= 0) {
325
326 ++nb_shallow_sleep;
327 } else {
328 pr_err("Failed to suspend CPU %d: error %d "
329 "(requested state %d, cycle %d)\n",
330 cpu, ret, index, i);
331 ++nb_err;
332 }
333 }
334 }
335
336
337
338
339
340 del_timer(&wakeup_timer);
341 destroy_timer_on_stack(&wakeup_timer);
342
343 if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
344 complete(&suspend_threads_done);
345
346
347 sched_priority.sched_priority = 0;
348 if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
349 pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
350 cpu);
351 for (;;) {
352
353 set_current_state(TASK_INTERRUPTIBLE);
354 if (kthread_should_stop()) {
355 __set_current_state(TASK_RUNNING);
356 break;
357 }
358 schedule();
359 }
360
361 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
362 cpu, nb_suspend, nb_shallow_sleep, nb_err);
363
364 return nb_err;
365}
366
367static int suspend_tests(void)
368{
369 int i, cpu, err = 0;
370 struct task_struct **threads;
371 int nb_threads = 0;
372
373 threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
374 GFP_KERNEL);
375 if (!threads)
376 return -ENOMEM;
377
378
379
380
381
382
383
384
385 cpuidle_pause_and_lock();
386
387 for_each_online_cpu(cpu) {
388 struct task_struct *thread;
389
390 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
391 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
392
393 if (!dev || !drv) {
394 pr_warn("cpuidle not available on CPU %d, ignoring\n",
395 cpu);
396 continue;
397 }
398
399 thread = kthread_create_on_cpu(suspend_test_thread,
400 (void *)(long)cpu, cpu,
401 "psci_suspend_test");
402 if (IS_ERR(thread))
403 pr_err("Failed to create kthread on CPU %d\n", cpu);
404 else
405 threads[nb_threads++] = thread;
406 }
407
408 if (nb_threads < 1) {
409 err = -ENODEV;
410 goto out;
411 }
412
413 atomic_set(&nb_active_threads, nb_threads);
414
415
416
417
418
419
420 for (i = 0; i < nb_threads; ++i)
421 wake_up_process(threads[i]);
422 complete_all(&suspend_threads_started);
423
424 wait_for_completion(&suspend_threads_done);
425
426
427
428 for (i = 0; i < nb_threads; ++i)
429 err += kthread_stop(threads[i]);
430 out:
431 cpuidle_resume_and_unlock();
432 kfree(threads);
433 return err;
434}
435
436static int __init psci_checker(void)
437{
438 int ret;
439
440
441
442
443
444
445
446
447
448
449 nb_available_cpus = num_online_cpus();
450
451
452 ret = psci_ops_check();
453 if (ret)
454 return ret;
455
456 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
457
458 pr_info("Starting hotplug tests\n");
459 ret = hotplug_tests();
460 if (ret == 0)
461 pr_info("Hotplug tests passed OK\n");
462 else if (ret > 0)
463 pr_err("%d error(s) encountered in hotplug tests\n", ret);
464 else {
465 pr_err("Out of memory\n");
466 return ret;
467 }
468
469 pr_info("Starting suspend tests (%d cycles per state)\n",
470 NUM_SUSPEND_CYCLE);
471 ret = suspend_tests();
472 if (ret == 0)
473 pr_info("Suspend tests passed OK\n");
474 else if (ret > 0)
475 pr_err("%d error(s) encountered in suspend tests\n", ret);
476 else {
477 switch (ret) {
478 case -ENOMEM:
479 pr_err("Out of memory\n");
480 break;
481 case -ENODEV:
482 pr_warn("Could not start suspend tests on any CPU\n");
483 break;
484 }
485 }
486
487 pr_info("PSCI checker completed\n");
488 return ret < 0 ? ret : 0;
489}
490late_initcall(psci_checker);
491