1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/spinlock.h>
16#include <linux/sched/mm.h>
17#include <linux/interrupt.h>
18#include <linux/profile.h>
19#include <linux/mm.h>
20#include <linux/cpu.h>
21#include <linux/irq.h>
22#include <linux/atomic.h>
23#include <linux/cpumask.h>
24#include <linux/reboot.h>
25#include <linux/irqdomain.h>
26#include <linux/export.h>
27#include <linux/of_fdt.h>
28
29#include <asm/processor.h>
30#include <asm/setup.h>
31#include <asm/mach_desc.h>
32
33#ifndef CONFIG_ARC_HAS_LLSC
34arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
35arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
36
37EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
38EXPORT_SYMBOL_GPL(smp_bitops_lock);
39#endif
40
41struct plat_smp_ops __weak plat_smp_ops;
42
43
44struct task_struct *secondary_idle_tsk;
45
46
47void __init smp_prepare_boot_cpu(void)
48{
49}
50
51static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
52{
53 unsigned long dt_root = of_get_flat_dt_root();
54 const char *buf;
55
56 buf = of_get_flat_dt_prop(dt_root, name, NULL);
57 if (!buf)
58 return -EINVAL;
59
60 if (cpulist_parse(buf, cpumask))
61 return -EINVAL;
62
63 return 0;
64}
65
66
67
68
69
70static void __init arc_init_cpu_possible(void)
71{
72 struct cpumask cpumask;
73
74 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
75 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
76 NR_CPUS);
77
78 cpumask_setall(&cpumask);
79 }
80
81 if (!cpumask_test_cpu(0, &cpumask))
82 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
83
84 init_cpu_possible(&cpumask);
85}
86
87
88
89
90
91
92
93
94
95
96void __init smp_init_cpus(void)
97{
98 arc_init_cpu_possible();
99
100 if (plat_smp_ops.init_early_smp)
101 plat_smp_ops.init_early_smp();
102}
103
104
105void __init smp_prepare_cpus(unsigned int max_cpus)
106{
107
108
109
110
111 if (num_present_cpus() <= 1)
112 init_cpu_present(cpu_possible_mask);
113}
114
115void __init smp_cpus_done(unsigned int max_cpus)
116{
117
118}
119
120
121
122
123
124
125
126static volatile int wake_flag;
127
128#ifdef CONFIG_ISA_ARCOMPACT
129
130#define __boot_read(f) f
131#define __boot_write(f, v) f = v
132
133#else
134
135#define __boot_read(f) arc_read_uncached_32(&f)
136#define __boot_write(f, v) arc_write_uncached_32(&f, v)
137
138#endif
139
140static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
141{
142 BUG_ON(cpu == 0);
143
144 __boot_write(wake_flag, cpu);
145}
146
147void arc_platform_smp_wait_to_boot(int cpu)
148{
149
150 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
151 return;
152
153 while (__boot_read(wake_flag) != cpu)
154 ;
155
156 __boot_write(wake_flag, 0);
157}
158
159const char *arc_platform_smp_cpuinfo(void)
160{
161 return plat_smp_ops.info ? : "";
162}
163
164
165
166
167
168
169void start_kernel_secondary(void)
170{
171 struct mm_struct *mm = &init_mm;
172 unsigned int cpu = smp_processor_id();
173
174
175 setup_processor();
176
177 mmget(mm);
178 mmgrab(mm);
179 current->active_mm = mm;
180 cpumask_set_cpu(cpu, mm_cpumask(mm));
181
182
183 if (plat_smp_ops.init_per_cpu)
184 plat_smp_ops.init_per_cpu(cpu);
185
186 if (machine_desc->init_per_cpu)
187 machine_desc->init_per_cpu(cpu);
188
189 notify_cpu_starting(cpu);
190 set_cpu_online(cpu, true);
191
192 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
193
194 local_irq_enable();
195 preempt_disable();
196 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
197}
198
199
200
201
202
203
204
205
206
207
208
209int __cpu_up(unsigned int cpu, struct task_struct *idle)
210{
211 unsigned long wait_till;
212
213 secondary_idle_tsk = idle;
214
215 pr_info("Idle Task [%d] %p", cpu, idle);
216 pr_info("Trying to bring up CPU%u ...\n", cpu);
217
218 if (plat_smp_ops.cpu_kick)
219 plat_smp_ops.cpu_kick(cpu,
220 (unsigned long)first_lines_of_secondary);
221 else
222 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
223
224
225 wait_till = jiffies + HZ;
226 while (time_before(jiffies, wait_till)) {
227 if (cpu_online(cpu))
228 break;
229 }
230
231 if (!cpu_online(cpu)) {
232 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
233 return -1;
234 }
235
236 secondary_idle_tsk = NULL;
237
238 return 0;
239}
240
241
242
243
244int setup_profiling_timer(unsigned int multiplier)
245{
246 return -EINVAL;
247}
248
249
250
251
252
253enum ipi_msg_type {
254 IPI_EMPTY = 0,
255 IPI_RESCHEDULE = 1,
256 IPI_CALL_FUNC,
257 IPI_CPU_STOP,
258};
259
260
261
262
263
264
265
266static DEFINE_PER_CPU(unsigned long, ipi_data);
267
268static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
269{
270 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
271 unsigned long old, new;
272 unsigned long flags;
273
274 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
275
276 local_irq_save(flags);
277
278
279
280
281
282 do {
283 new = old = READ_ONCE(*ipi_data_ptr);
284 new |= 1U << msg;
285 } while (cmpxchg(ipi_data_ptr, old, new) != old);
286
287
288
289
290
291
292
293
294
295 if (plat_smp_ops.ipi_send && !old)
296 plat_smp_ops.ipi_send(cpu);
297
298 local_irq_restore(flags);
299}
300
301static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
302{
303 unsigned int cpu;
304
305 for_each_cpu(cpu, callmap)
306 ipi_send_msg_one(cpu, msg);
307}
308
309void smp_send_reschedule(int cpu)
310{
311 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
312}
313
314void smp_send_stop(void)
315{
316 struct cpumask targets;
317 cpumask_copy(&targets, cpu_online_mask);
318 cpumask_clear_cpu(smp_processor_id(), &targets);
319 ipi_send_msg(&targets, IPI_CPU_STOP);
320}
321
322void arch_send_call_function_single_ipi(int cpu)
323{
324 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
325}
326
327void arch_send_call_function_ipi_mask(const struct cpumask *mask)
328{
329 ipi_send_msg(mask, IPI_CALL_FUNC);
330}
331
332
333
334
335static void ipi_cpu_stop(void)
336{
337 machine_halt();
338}
339
340static inline int __do_IPI(unsigned long msg)
341{
342 int rc = 0;
343
344 switch (msg) {
345 case IPI_RESCHEDULE:
346 scheduler_ipi();
347 break;
348
349 case IPI_CALL_FUNC:
350 generic_smp_call_function_interrupt();
351 break;
352
353 case IPI_CPU_STOP:
354 ipi_cpu_stop();
355 break;
356
357 default:
358 rc = 1;
359 }
360
361 return rc;
362}
363
364
365
366
367
368irqreturn_t do_IPI(int irq, void *dev_id)
369{
370 unsigned long pending;
371 unsigned long __maybe_unused copy;
372
373 pr_debug("IPI [%ld] received on cpu %d\n",
374 *this_cpu_ptr(&ipi_data), smp_processor_id());
375
376 if (plat_smp_ops.ipi_clear)
377 plat_smp_ops.ipi_clear(irq);
378
379
380
381
382
383 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
384
385 do {
386 unsigned long msg = __ffs(pending);
387 int rc;
388
389 rc = __do_IPI(msg);
390 if (rc)
391 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
392 pending &= ~(1U << msg);
393 } while (pending);
394
395 return IRQ_HANDLED;
396}
397
398
399
400
401
402
403
404
405static DEFINE_PER_CPU(int, ipi_dev);
406
407int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
408{
409 int *dev = per_cpu_ptr(&ipi_dev, cpu);
410 unsigned int virq = irq_find_mapping(NULL, hwirq);
411
412 if (!virq)
413 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
414
415
416 if (!cpu) {
417 int rc;
418
419 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
420 if (rc)
421 panic("Percpu IRQ request failed for %u\n", virq);
422 }
423
424 enable_percpu_irq(virq, 0);
425
426 return 0;
427}
428