1
2
3
4#include <linux/cpu.h>
5#include <linux/err.h>
6#include <linux/smp.h>
7#include <linux/delay.h>
8#include <linux/init.h>
9#include <linux/list.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/export.h>
13#include <linux/percpu.h>
14#include <linux/kthread.h>
15#include <linux/smpboot.h>
16
17#include "smpboot.h"
18
19#ifdef CONFIG_SMP
20
21#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
22
23
24
25
26static DEFINE_PER_CPU(struct task_struct *, idle_threads);
27
28struct task_struct *idle_thread_get(unsigned int cpu)
29{
30 struct task_struct *tsk = per_cpu(idle_threads, cpu);
31
32 if (!tsk)
33 return ERR_PTR(-ENOMEM);
34 init_idle(tsk, cpu);
35 return tsk;
36}
37
38void __init idle_thread_set_boot_cpu(void)
39{
40 per_cpu(idle_threads, smp_processor_id()) = current;
41}
42
43
44
45
46
47
48
49static inline void idle_init(unsigned int cpu)
50{
51 struct task_struct *tsk = per_cpu(idle_threads, cpu);
52
53 if (!tsk) {
54 tsk = fork_idle(cpu);
55 if (IS_ERR(tsk))
56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
57 else
58 per_cpu(idle_threads, cpu) = tsk;
59 }
60}
61
62
63
64
65void __init idle_threads_init(void)
66{
67 unsigned int cpu, boot_cpu;
68
69 boot_cpu = smp_processor_id();
70
71 for_each_possible_cpu(cpu) {
72 if (cpu != boot_cpu)
73 idle_init(cpu);
74 }
75}
76#endif
77
78#endif
79
80static LIST_HEAD(hotplug_threads);
81static DEFINE_MUTEX(smpboot_threads_lock);
82
83struct smpboot_thread_data {
84 unsigned int cpu;
85 unsigned int status;
86 struct smp_hotplug_thread *ht;
87};
88
89enum {
90 HP_THREAD_NONE = 0,
91 HP_THREAD_ACTIVE,
92 HP_THREAD_PARKED,
93};
94
95
96
97
98
99
100
101
102
103
104
105static int smpboot_thread_fn(void *data)
106{
107 struct smpboot_thread_data *td = data;
108 struct smp_hotplug_thread *ht = td->ht;
109
110 while (1) {
111 set_current_state(TASK_INTERRUPTIBLE);
112 preempt_disable();
113 if (kthread_should_stop()) {
114 __set_current_state(TASK_RUNNING);
115 preempt_enable();
116 if (ht->cleanup)
117 ht->cleanup(td->cpu, cpu_online(td->cpu));
118 kfree(td);
119 return 0;
120 }
121
122 if (kthread_should_park()) {
123 __set_current_state(TASK_RUNNING);
124 preempt_enable();
125 if (ht->park && td->status == HP_THREAD_ACTIVE) {
126 BUG_ON(td->cpu != smp_processor_id());
127 ht->park(td->cpu);
128 td->status = HP_THREAD_PARKED;
129 }
130 kthread_parkme();
131
132 continue;
133 }
134
135 BUG_ON(td->cpu != smp_processor_id());
136
137
138 switch (td->status) {
139 case HP_THREAD_NONE:
140 __set_current_state(TASK_RUNNING);
141 preempt_enable();
142 if (ht->setup)
143 ht->setup(td->cpu);
144 td->status = HP_THREAD_ACTIVE;
145 continue;
146
147 case HP_THREAD_PARKED:
148 __set_current_state(TASK_RUNNING);
149 preempt_enable();
150 if (ht->unpark)
151 ht->unpark(td->cpu);
152 td->status = HP_THREAD_ACTIVE;
153 continue;
154 }
155
156 if (!ht->thread_should_run(td->cpu)) {
157 preempt_enable_no_resched();
158 schedule();
159 } else {
160 __set_current_state(TASK_RUNNING);
161 preempt_enable();
162 ht->thread_fn(td->cpu);
163 }
164 }
165}
166
167static int
168__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
169{
170 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
171 struct smpboot_thread_data *td;
172
173 if (tsk)
174 return 0;
175
176 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
177 if (!td)
178 return -ENOMEM;
179 td->cpu = cpu;
180 td->ht = ht;
181
182 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
183 ht->thread_comm);
184 if (IS_ERR(tsk)) {
185 kfree(td);
186 return PTR_ERR(tsk);
187 }
188 get_task_struct(tsk);
189 *per_cpu_ptr(ht->store, cpu) = tsk;
190 if (ht->create) {
191
192
193
194
195
196
197 if (!wait_task_inactive(tsk, TASK_PARKED))
198 WARN_ON(1);
199 else
200 ht->create(cpu);
201 }
202 return 0;
203}
204
205int smpboot_create_threads(unsigned int cpu)
206{
207 struct smp_hotplug_thread *cur;
208 int ret = 0;
209
210 mutex_lock(&smpboot_threads_lock);
211 list_for_each_entry(cur, &hotplug_threads, list) {
212 ret = __smpboot_create_thread(cur, cpu);
213 if (ret)
214 break;
215 }
216 mutex_unlock(&smpboot_threads_lock);
217 return ret;
218}
219
220static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
221{
222 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
223
224 if (ht->pre_unpark)
225 ht->pre_unpark(cpu);
226 kthread_unpark(tsk);
227}
228
229void smpboot_unpark_threads(unsigned int cpu)
230{
231 struct smp_hotplug_thread *cur;
232
233 mutex_lock(&smpboot_threads_lock);
234 list_for_each_entry(cur, &hotplug_threads, list)
235 smpboot_unpark_thread(cur, cpu);
236 mutex_unlock(&smpboot_threads_lock);
237}
238
239static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
240{
241 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
242
243 if (tsk && !ht->selfparking)
244 kthread_park(tsk);
245}
246
247void smpboot_park_threads(unsigned int cpu)
248{
249 struct smp_hotplug_thread *cur;
250
251 mutex_lock(&smpboot_threads_lock);
252 list_for_each_entry_reverse(cur, &hotplug_threads, list)
253 smpboot_park_thread(cur, cpu);
254 mutex_unlock(&smpboot_threads_lock);
255}
256
257static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
258{
259 unsigned int cpu;
260
261
262 for_each_possible_cpu(cpu) {
263 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
264
265 if (tsk) {
266 kthread_stop(tsk);
267 put_task_struct(tsk);
268 *per_cpu_ptr(ht->store, cpu) = NULL;
269 }
270 }
271}
272
273
274
275
276
277
278
279int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
280{
281 unsigned int cpu;
282 int ret = 0;
283
284 get_online_cpus();
285 mutex_lock(&smpboot_threads_lock);
286 for_each_online_cpu(cpu) {
287 ret = __smpboot_create_thread(plug_thread, cpu);
288 if (ret) {
289 smpboot_destroy_threads(plug_thread);
290 goto out;
291 }
292 smpboot_unpark_thread(plug_thread, cpu);
293 }
294 list_add(&plug_thread->list, &hotplug_threads);
295out:
296 mutex_unlock(&smpboot_threads_lock);
297 put_online_cpus();
298 return ret;
299}
300EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
301
302
303
304
305
306
307
308void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
309{
310 get_online_cpus();
311 mutex_lock(&smpboot_threads_lock);
312 list_del(&plug_thread->list);
313 smpboot_destroy_threads(plug_thread);
314 mutex_unlock(&smpboot_threads_lock);
315 put_online_cpus();
316}
317EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
318
319static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
320
321
322
323
324
325int cpu_report_state(int cpu)
326{
327 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
328}
329
330
331
332
333
334
335
336
337
338
339
340
341
342int cpu_check_up_prepare(int cpu)
343{
344 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
345 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
346 return 0;
347 }
348
349 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
350
351 case CPU_POST_DEAD:
352
353
354 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
355 return 0;
356
357 case CPU_DEAD_FROZEN:
358
359
360
361
362
363
364
365
366
367
368
369
370 return -EBUSY;
371
372 case CPU_BROKEN:
373
374
375
376
377
378
379
380
381
382
383 return -EAGAIN;
384
385 default:
386
387
388 return -EIO;
389 }
390}
391
392
393
394
395
396
397
398void cpu_set_state_online(int cpu)
399{
400 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
401}
402
403#ifdef CONFIG_HOTPLUG_CPU
404
405
406
407
408bool cpu_wait_death(unsigned int cpu, int seconds)
409{
410 int jf_left = seconds * HZ;
411 int oldstate;
412 bool ret = true;
413 int sleep_jf = 1;
414
415 might_sleep();
416
417
418 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
419 goto update_state;
420 udelay(5);
421
422
423 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
424 schedule_timeout_uninterruptible(sleep_jf);
425 jf_left -= sleep_jf;
426 if (jf_left <= 0)
427 break;
428 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
429 }
430update_state:
431 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
432 if (oldstate == CPU_DEAD) {
433
434 smp_mb();
435 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
436 } else {
437
438 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
439 oldstate, CPU_BROKEN) != oldstate)
440 goto update_state;
441 ret = false;
442 }
443 return ret;
444}
445
446
447
448
449
450
451
452
453
454
455bool cpu_report_death(void)
456{
457 int oldstate;
458 int newstate;
459 int cpu = smp_processor_id();
460
461 do {
462 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
463 if (oldstate != CPU_BROKEN)
464 newstate = CPU_DEAD;
465 else
466 newstate = CPU_DEAD_FROZEN;
467 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
468 oldstate, newstate) != oldstate);
469 return newstate == CPU_DEAD;
470}
471
472#endif
473