1
2
3
4#include <linux/cpu.h>
5#include <linux/err.h>
6#include <linux/smp.h>
7#include <linux/delay.h>
8#include <linux/init.h>
9#include <linux/list.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/sched/task.h>
13#include <linux/export.h>
14#include <linux/percpu.h>
15#include <linux/kthread.h>
16#include <linux/smpboot.h>
17
18#include "smpboot.h"
19
20#ifdef CONFIG_SMP
21
22#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
23
24
25
26
27static DEFINE_PER_CPU(struct task_struct *, idle_threads);
28
29struct task_struct *idle_thread_get(unsigned int cpu)
30{
31 struct task_struct *tsk = per_cpu(idle_threads, cpu);
32
33 if (!tsk)
34 return ERR_PTR(-ENOMEM);
35 init_idle(tsk, cpu);
36 return tsk;
37}
38
39void __init idle_thread_set_boot_cpu(void)
40{
41 per_cpu(idle_threads, smp_processor_id()) = current;
42}
43
44
45
46
47
48
49
50static inline void idle_init(unsigned int cpu)
51{
52 struct task_struct *tsk = per_cpu(idle_threads, cpu);
53
54 if (!tsk) {
55 tsk = fork_idle(cpu);
56 if (IS_ERR(tsk))
57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
58 else
59 per_cpu(idle_threads, cpu) = tsk;
60 }
61}
62
63
64
65
66void __init idle_threads_init(void)
67{
68 unsigned int cpu, boot_cpu;
69
70 boot_cpu = smp_processor_id();
71
72 for_each_possible_cpu(cpu) {
73 if (cpu != boot_cpu)
74 idle_init(cpu);
75 }
76}
77#endif
78
79#endif
80
81static LIST_HEAD(hotplug_threads);
82static DEFINE_MUTEX(smpboot_threads_lock);
83
84struct smpboot_thread_data {
85 unsigned int cpu;
86 unsigned int status;
87 struct smp_hotplug_thread *ht;
88};
89
90enum {
91 HP_THREAD_NONE = 0,
92 HP_THREAD_ACTIVE,
93 HP_THREAD_PARKED,
94};
95
96
97
98
99
100
101
102
103
104
105
106static int smpboot_thread_fn(void *data)
107{
108 struct smpboot_thread_data *td = data;
109 struct smp_hotplug_thread *ht = td->ht;
110
111 while (1) {
112 set_current_state(TASK_INTERRUPTIBLE);
113 preempt_disable();
114 if (kthread_should_stop()) {
115 __set_current_state(TASK_RUNNING);
116 preempt_enable();
117
118 if (ht->cleanup && td->status != HP_THREAD_NONE)
119 ht->cleanup(td->cpu, cpu_online(td->cpu));
120 kfree(td);
121 return 0;
122 }
123
124 if (kthread_should_park()) {
125 __set_current_state(TASK_RUNNING);
126 preempt_enable();
127 if (ht->park && td->status == HP_THREAD_ACTIVE) {
128 BUG_ON(td->cpu != smp_processor_id());
129 ht->park(td->cpu);
130 td->status = HP_THREAD_PARKED;
131 }
132 kthread_parkme();
133
134 continue;
135 }
136
137 BUG_ON(td->cpu != smp_processor_id());
138
139
140 switch (td->status) {
141 case HP_THREAD_NONE:
142 __set_current_state(TASK_RUNNING);
143 preempt_enable();
144 if (ht->setup)
145 ht->setup(td->cpu);
146 td->status = HP_THREAD_ACTIVE;
147 continue;
148
149 case HP_THREAD_PARKED:
150 __set_current_state(TASK_RUNNING);
151 preempt_enable();
152 if (ht->unpark)
153 ht->unpark(td->cpu);
154 td->status = HP_THREAD_ACTIVE;
155 continue;
156 }
157
158 if (!ht->thread_should_run(td->cpu)) {
159 preempt_enable_no_resched();
160 schedule();
161 } else {
162 __set_current_state(TASK_RUNNING);
163 preempt_enable();
164 ht->thread_fn(td->cpu);
165 }
166 }
167}
168
169static int
170__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
171{
172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
173 struct smpboot_thread_data *td;
174
175 if (tsk)
176 return 0;
177
178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
179 if (!td)
180 return -ENOMEM;
181 td->cpu = cpu;
182 td->ht = ht;
183
184 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
185 ht->thread_comm);
186 if (IS_ERR(tsk)) {
187 kfree(td);
188 return PTR_ERR(tsk);
189 }
190
191
192
193
194 kthread_park(tsk);
195 get_task_struct(tsk);
196 *per_cpu_ptr(ht->store, cpu) = tsk;
197 if (ht->create) {
198
199
200
201
202
203
204 if (!wait_task_inactive(tsk, TASK_PARKED))
205 WARN_ON(1);
206 else
207 ht->create(cpu);
208 }
209 return 0;
210}
211
212int smpboot_create_threads(unsigned int cpu)
213{
214 struct smp_hotplug_thread *cur;
215 int ret = 0;
216
217 mutex_lock(&smpboot_threads_lock);
218 list_for_each_entry(cur, &hotplug_threads, list) {
219 ret = __smpboot_create_thread(cur, cpu);
220 if (ret)
221 break;
222 }
223 mutex_unlock(&smpboot_threads_lock);
224 return ret;
225}
226
227static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
228{
229 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
230
231 if (!ht->selfparking)
232 kthread_unpark(tsk);
233}
234
235int smpboot_unpark_threads(unsigned int cpu)
236{
237 struct smp_hotplug_thread *cur;
238
239 mutex_lock(&smpboot_threads_lock);
240 list_for_each_entry(cur, &hotplug_threads, list)
241 if (cpumask_test_cpu(cpu, cur->cpumask))
242 smpboot_unpark_thread(cur, cpu);
243 mutex_unlock(&smpboot_threads_lock);
244 return 0;
245}
246
247static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248{
249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250
251 if (tsk && !ht->selfparking)
252 kthread_park(tsk);
253}
254
255int smpboot_park_threads(unsigned int cpu)
256{
257 struct smp_hotplug_thread *cur;
258
259 mutex_lock(&smpboot_threads_lock);
260 list_for_each_entry_reverse(cur, &hotplug_threads, list)
261 smpboot_park_thread(cur, cpu);
262 mutex_unlock(&smpboot_threads_lock);
263 return 0;
264}
265
266static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267{
268 unsigned int cpu;
269
270
271 for_each_possible_cpu(cpu) {
272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273
274 if (tsk) {
275 kthread_stop(tsk);
276 put_task_struct(tsk);
277 *per_cpu_ptr(ht->store, cpu) = NULL;
278 }
279 }
280}
281
282
283
284
285
286
287
288
289
290int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
291 const struct cpumask *cpumask)
292{
293 unsigned int cpu;
294 int ret = 0;
295
296 if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
297 return -ENOMEM;
298 cpumask_copy(plug_thread->cpumask, cpumask);
299
300 get_online_cpus();
301 mutex_lock(&smpboot_threads_lock);
302 for_each_online_cpu(cpu) {
303 ret = __smpboot_create_thread(plug_thread, cpu);
304 if (ret) {
305 smpboot_destroy_threads(plug_thread);
306 free_cpumask_var(plug_thread->cpumask);
307 goto out;
308 }
309 if (cpumask_test_cpu(cpu, cpumask))
310 smpboot_unpark_thread(plug_thread, cpu);
311 }
312 list_add(&plug_thread->list, &hotplug_threads);
313out:
314 mutex_unlock(&smpboot_threads_lock);
315 put_online_cpus();
316 return ret;
317}
318EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
319
320
321
322
323
324
325
326void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
327{
328 get_online_cpus();
329 mutex_lock(&smpboot_threads_lock);
330 list_del(&plug_thread->list);
331 smpboot_destroy_threads(plug_thread);
332 mutex_unlock(&smpboot_threads_lock);
333 put_online_cpus();
334 free_cpumask_var(plug_thread->cpumask);
335}
336EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
337
338
339
340
341
342
343
344
345
346
347void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
348 const struct cpumask *new)
349{
350 struct cpumask *old = plug_thread->cpumask;
351 static struct cpumask tmp;
352 unsigned int cpu;
353
354 lockdep_assert_cpus_held();
355 mutex_lock(&smpboot_threads_lock);
356
357
358 cpumask_andnot(&tmp, old, new);
359 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
360 smpboot_park_thread(plug_thread, cpu);
361
362
363 cpumask_andnot(&tmp, new, old);
364 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
365 smpboot_unpark_thread(plug_thread, cpu);
366
367 cpumask_copy(old, new);
368
369 mutex_unlock(&smpboot_threads_lock);
370}
371
372static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
373
374
375
376
377
378int cpu_report_state(int cpu)
379{
380 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395int cpu_check_up_prepare(int cpu)
396{
397 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
398 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
399 return 0;
400 }
401
402 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
403
404 case CPU_POST_DEAD:
405
406
407 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
408 return 0;
409
410 case CPU_DEAD_FROZEN:
411
412
413
414
415
416
417
418
419
420
421
422
423 return -EBUSY;
424
425 case CPU_BROKEN:
426
427
428
429
430
431
432
433
434
435
436 return -EAGAIN;
437
438 default:
439
440
441 return -EIO;
442 }
443}
444
445
446
447
448
449
450
451void cpu_set_state_online(int cpu)
452{
453 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
454}
455
456#ifdef CONFIG_HOTPLUG_CPU
457
458
459
460
461bool cpu_wait_death(unsigned int cpu, int seconds)
462{
463 int jf_left = seconds * HZ;
464 int oldstate;
465 bool ret = true;
466 int sleep_jf = 1;
467
468 might_sleep();
469
470
471 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
472 goto update_state;
473 udelay(5);
474
475
476 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
477 schedule_timeout_uninterruptible(sleep_jf);
478 jf_left -= sleep_jf;
479 if (jf_left <= 0)
480 break;
481 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
482 }
483update_state:
484 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
485 if (oldstate == CPU_DEAD) {
486
487 smp_mb();
488 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
489 } else {
490
491 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
492 oldstate, CPU_BROKEN) != oldstate)
493 goto update_state;
494 ret = false;
495 }
496 return ret;
497}
498
499
500
501
502
503
504
505
506
507
508bool cpu_report_death(void)
509{
510 int oldstate;
511 int newstate;
512 int cpu = smp_processor_id();
513
514 do {
515 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
516 if (oldstate != CPU_BROKEN)
517 newstate = CPU_DEAD;
518 else
519 newstate = CPU_DEAD_FROZEN;
520 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
521 oldstate, newstate) != oldstate);
522 return newstate == CPU_DEAD;
523}
524
525#endif
526