1
2
3
4#include <linux/cpu.h>
5#include <linux/err.h>
6#include <linux/smp.h>
7#include <linux/delay.h>
8#include <linux/init.h>
9#include <linux/list.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/export.h>
13#include <linux/percpu.h>
14#include <linux/kthread.h>
15#include <linux/smpboot.h>
16
17#include "smpboot.h"
18
19#ifdef CONFIG_SMP
20
21#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
22
23
24
25
26static DEFINE_PER_CPU(struct task_struct *, idle_threads);
27
28struct task_struct *idle_thread_get(unsigned int cpu)
29{
30 struct task_struct *tsk = per_cpu(idle_threads, cpu);
31
32 if (!tsk)
33 return ERR_PTR(-ENOMEM);
34 init_idle(tsk, cpu);
35 return tsk;
36}
37
38void __init idle_thread_set_boot_cpu(void)
39{
40 per_cpu(idle_threads, smp_processor_id()) = current;
41}
42
43
44
45
46
47
48
49static inline void idle_init(unsigned int cpu)
50{
51 struct task_struct *tsk = per_cpu(idle_threads, cpu);
52
53 if (!tsk) {
54 tsk = fork_idle(cpu);
55 if (IS_ERR(tsk))
56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
57 else
58 per_cpu(idle_threads, cpu) = tsk;
59 }
60}
61
62
63
64
65void __init idle_threads_init(void)
66{
67 unsigned int cpu, boot_cpu;
68
69 boot_cpu = smp_processor_id();
70
71 for_each_possible_cpu(cpu) {
72 if (cpu != boot_cpu)
73 idle_init(cpu);
74 }
75}
76#endif
77
78#endif
79
80static LIST_HEAD(hotplug_threads);
81static DEFINE_MUTEX(smpboot_threads_lock);
82
83struct smpboot_thread_data {
84 unsigned int cpu;
85 unsigned int status;
86 struct smp_hotplug_thread *ht;
87};
88
89enum {
90 HP_THREAD_NONE = 0,
91 HP_THREAD_ACTIVE,
92 HP_THREAD_PARKED,
93};
94
95
96
97
98
99
100
101
102
103
104
105static int smpboot_thread_fn(void *data)
106{
107 struct smpboot_thread_data *td = data;
108 struct smp_hotplug_thread *ht = td->ht;
109
110 while (1) {
111 set_current_state(TASK_INTERRUPTIBLE);
112 preempt_disable();
113 if (kthread_should_stop()) {
114 __set_current_state(TASK_RUNNING);
115 preempt_enable();
116
117 if (ht->cleanup && td->status != HP_THREAD_NONE)
118 ht->cleanup(td->cpu, cpu_online(td->cpu));
119 kfree(td);
120 return 0;
121 }
122
123 if (kthread_should_park()) {
124 __set_current_state(TASK_RUNNING);
125 preempt_enable();
126 if (ht->park && td->status == HP_THREAD_ACTIVE) {
127 BUG_ON(td->cpu != smp_processor_id());
128 ht->park(td->cpu);
129 td->status = HP_THREAD_PARKED;
130 }
131 kthread_parkme();
132
133 continue;
134 }
135
136 BUG_ON(td->cpu != smp_processor_id());
137
138
139 switch (td->status) {
140 case HP_THREAD_NONE:
141 __set_current_state(TASK_RUNNING);
142 preempt_enable();
143 if (ht->setup)
144 ht->setup(td->cpu);
145 td->status = HP_THREAD_ACTIVE;
146 continue;
147
148 case HP_THREAD_PARKED:
149 __set_current_state(TASK_RUNNING);
150 preempt_enable();
151 if (ht->unpark)
152 ht->unpark(td->cpu);
153 td->status = HP_THREAD_ACTIVE;
154 continue;
155 }
156
157 if (!ht->thread_should_run(td->cpu)) {
158 preempt_enable_no_resched();
159 schedule();
160 } else {
161 __set_current_state(TASK_RUNNING);
162 preempt_enable();
163 ht->thread_fn(td->cpu);
164 }
165 }
166}
167
168static int
169__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
170{
171 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
172 struct smpboot_thread_data *td;
173
174 if (tsk)
175 return 0;
176
177 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
178 if (!td)
179 return -ENOMEM;
180 td->cpu = cpu;
181 td->ht = ht;
182
183 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
184 ht->thread_comm);
185 if (IS_ERR(tsk)) {
186 kfree(td);
187 return PTR_ERR(tsk);
188 }
189 get_task_struct(tsk);
190 *per_cpu_ptr(ht->store, cpu) = tsk;
191 if (ht->create) {
192
193
194
195
196
197
198 if (!wait_task_inactive(tsk, TASK_PARKED))
199 WARN_ON(1);
200 else
201 ht->create(cpu);
202 }
203 return 0;
204}
205
206int smpboot_create_threads(unsigned int cpu)
207{
208 struct smp_hotplug_thread *cur;
209 int ret = 0;
210
211 mutex_lock(&smpboot_threads_lock);
212 list_for_each_entry(cur, &hotplug_threads, list) {
213 ret = __smpboot_create_thread(cur, cpu);
214 if (ret)
215 break;
216 }
217 mutex_unlock(&smpboot_threads_lock);
218 return ret;
219}
220
221static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
222{
223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
224
225 if (!ht->selfparking)
226 kthread_unpark(tsk);
227}
228
229int smpboot_unpark_threads(unsigned int cpu)
230{
231 struct smp_hotplug_thread *cur;
232
233 mutex_lock(&smpboot_threads_lock);
234 list_for_each_entry(cur, &hotplug_threads, list)
235 if (cpumask_test_cpu(cpu, cur->cpumask))
236 smpboot_unpark_thread(cur, cpu);
237 mutex_unlock(&smpboot_threads_lock);
238 return 0;
239}
240
241static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
242{
243 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
244
245 if (tsk && !ht->selfparking)
246 kthread_park(tsk);
247}
248
249int smpboot_park_threads(unsigned int cpu)
250{
251 struct smp_hotplug_thread *cur;
252
253 mutex_lock(&smpboot_threads_lock);
254 list_for_each_entry_reverse(cur, &hotplug_threads, list)
255 smpboot_park_thread(cur, cpu);
256 mutex_unlock(&smpboot_threads_lock);
257 return 0;
258}
259
260static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
261{
262 unsigned int cpu;
263
264
265 for_each_possible_cpu(cpu) {
266 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
267
268 if (tsk) {
269 kthread_stop(tsk);
270 put_task_struct(tsk);
271 *per_cpu_ptr(ht->store, cpu) = NULL;
272 }
273 }
274}
275
276
277
278
279
280
281
282
283
284int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
285 const struct cpumask *cpumask)
286{
287 unsigned int cpu;
288 int ret = 0;
289
290 if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
291 return -ENOMEM;
292 cpumask_copy(plug_thread->cpumask, cpumask);
293
294 get_online_cpus();
295 mutex_lock(&smpboot_threads_lock);
296 for_each_online_cpu(cpu) {
297 ret = __smpboot_create_thread(plug_thread, cpu);
298 if (ret) {
299 smpboot_destroy_threads(plug_thread);
300 free_cpumask_var(plug_thread->cpumask);
301 goto out;
302 }
303 if (cpumask_test_cpu(cpu, cpumask))
304 smpboot_unpark_thread(plug_thread, cpu);
305 }
306 list_add(&plug_thread->list, &hotplug_threads);
307out:
308 mutex_unlock(&smpboot_threads_lock);
309 put_online_cpus();
310 return ret;
311}
312EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
313
314
315
316
317
318
319
320void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
321{
322 get_online_cpus();
323 mutex_lock(&smpboot_threads_lock);
324 list_del(&plug_thread->list);
325 smpboot_destroy_threads(plug_thread);
326 mutex_unlock(&smpboot_threads_lock);
327 put_online_cpus();
328 free_cpumask_var(plug_thread->cpumask);
329}
330EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
331
332
333
334
335
336
337
338
339
340
341int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
342 const struct cpumask *new)
343{
344 struct cpumask *old = plug_thread->cpumask;
345 cpumask_var_t tmp;
346 unsigned int cpu;
347
348 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
349 return -ENOMEM;
350
351 get_online_cpus();
352 mutex_lock(&smpboot_threads_lock);
353
354
355 cpumask_andnot(tmp, old, new);
356 for_each_cpu_and(cpu, tmp, cpu_online_mask)
357 smpboot_park_thread(plug_thread, cpu);
358
359
360 cpumask_andnot(tmp, new, old);
361 for_each_cpu_and(cpu, tmp, cpu_online_mask)
362 smpboot_unpark_thread(plug_thread, cpu);
363
364 cpumask_copy(old, new);
365
366 mutex_unlock(&smpboot_threads_lock);
367 put_online_cpus();
368
369 free_cpumask_var(tmp);
370
371 return 0;
372}
373EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
374
375static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
376
377
378
379
380
381int cpu_report_state(int cpu)
382{
383 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398int cpu_check_up_prepare(int cpu)
399{
400 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
401 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
402 return 0;
403 }
404
405 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
406
407 case CPU_POST_DEAD:
408
409
410 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
411 return 0;
412
413 case CPU_DEAD_FROZEN:
414
415
416
417
418
419
420
421
422
423
424
425
426 return -EBUSY;
427
428 case CPU_BROKEN:
429
430
431
432
433
434
435
436
437
438
439 return -EAGAIN;
440
441 default:
442
443
444 return -EIO;
445 }
446}
447
448
449
450
451
452
453
454void cpu_set_state_online(int cpu)
455{
456 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
457}
458
459#ifdef CONFIG_HOTPLUG_CPU
460
461
462
463
464bool cpu_wait_death(unsigned int cpu, int seconds)
465{
466 int jf_left = seconds * HZ;
467 int oldstate;
468 bool ret = true;
469 int sleep_jf = 1;
470
471 might_sleep();
472
473
474 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
475 goto update_state;
476 udelay(5);
477
478
479 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
480 schedule_timeout_uninterruptible(sleep_jf);
481 jf_left -= sleep_jf;
482 if (jf_left <= 0)
483 break;
484 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
485 }
486update_state:
487 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
488 if (oldstate == CPU_DEAD) {
489
490 smp_mb();
491 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
492 } else {
493
494 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
495 oldstate, CPU_BROKEN) != oldstate)
496 goto update_state;
497 ret = false;
498 }
499 return ret;
500}
501
502
503
504
505
506
507
508
509
510
511bool cpu_report_death(void)
512{
513 int oldstate;
514 int newstate;
515 int cpu = smp_processor_id();
516
517 do {
518 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
519 if (oldstate != CPU_BROKEN)
520 newstate = CPU_DEAD;
521 else
522 newstate = CPU_DEAD_FROZEN;
523 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
524 oldstate, newstate) != oldstate);
525 return newstate == CPU_DEAD;
526}
527
528#endif
529