1
2
3
4
5
6
7
8
9
10
11
12#include <linux/spinlock.h>
13#include <linux/sched/mm.h>
14#include <linux/interrupt.h>
15#include <linux/profile.h>
16#include <linux/mm.h>
17#include <linux/cpu.h>
18#include <linux/irq.h>
19#include <linux/atomic.h>
20#include <linux/cpumask.h>
21#include <linux/reboot.h>
22#include <linux/irqdomain.h>
23#include <linux/export.h>
24#include <linux/of_fdt.h>
25
26#include <asm/processor.h>
27#include <asm/setup.h>
28#include <asm/mach_desc.h>
29
30#ifndef CONFIG_ARC_HAS_LLSC
31arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32
33EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
34#endif
35
36struct plat_smp_ops __weak plat_smp_ops;
37
38
39struct task_struct *secondary_idle_tsk;
40
41
42void __init smp_prepare_boot_cpu(void)
43{
44}
45
46static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
47{
48 unsigned long dt_root = of_get_flat_dt_root();
49 const char *buf;
50
51 buf = of_get_flat_dt_prop(dt_root, name, NULL);
52 if (!buf)
53 return -EINVAL;
54
55 if (cpulist_parse(buf, cpumask))
56 return -EINVAL;
57
58 return 0;
59}
60
61
62
63
64
65static void __init arc_init_cpu_possible(void)
66{
67 struct cpumask cpumask;
68
69 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
70 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
71 NR_CPUS);
72
73 cpumask_setall(&cpumask);
74 }
75
76 if (!cpumask_test_cpu(0, &cpumask))
77 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
78
79 init_cpu_possible(&cpumask);
80}
81
82
83
84
85
86
87
88
89
90
91void __init smp_init_cpus(void)
92{
93 arc_init_cpu_possible();
94
95 if (plat_smp_ops.init_early_smp)
96 plat_smp_ops.init_early_smp();
97}
98
99
100void __init smp_prepare_cpus(unsigned int max_cpus)
101{
102
103
104
105
106 if (num_present_cpus() <= 1)
107 init_cpu_present(cpu_possible_mask);
108}
109
110void __init smp_cpus_done(unsigned int max_cpus)
111{
112
113}
114
115
116
117
118
119
120
121static volatile int wake_flag;
122
123#ifdef CONFIG_ISA_ARCOMPACT
124
125#define __boot_read(f) f
126#define __boot_write(f, v) f = v
127
128#else
129
130#define __boot_read(f) arc_read_uncached_32(&f)
131#define __boot_write(f, v) arc_write_uncached_32(&f, v)
132
133#endif
134
135static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
136{
137 BUG_ON(cpu == 0);
138
139 __boot_write(wake_flag, cpu);
140}
141
142void arc_platform_smp_wait_to_boot(int cpu)
143{
144
145 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
146 return;
147
148 while (__boot_read(wake_flag) != cpu)
149 ;
150
151 __boot_write(wake_flag, 0);
152}
153
154const char *arc_platform_smp_cpuinfo(void)
155{
156 return plat_smp_ops.info ? : "";
157}
158
159
160
161
162
163
164void start_kernel_secondary(void)
165{
166 struct mm_struct *mm = &init_mm;
167 unsigned int cpu = smp_processor_id();
168
169
170 setup_processor();
171
172 mmget(mm);
173 mmgrab(mm);
174 current->active_mm = mm;
175 cpumask_set_cpu(cpu, mm_cpumask(mm));
176
177
178 if (plat_smp_ops.init_per_cpu)
179 plat_smp_ops.init_per_cpu(cpu);
180
181 if (machine_desc->init_per_cpu)
182 machine_desc->init_per_cpu(cpu);
183
184 notify_cpu_starting(cpu);
185 set_cpu_online(cpu, true);
186
187 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
188
189 local_irq_enable();
190 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
191}
192
193
194
195
196
197
198
199
200
201
202
203int __cpu_up(unsigned int cpu, struct task_struct *idle)
204{
205 unsigned long wait_till;
206
207 secondary_idle_tsk = idle;
208
209 pr_info("Idle Task [%d] %p", cpu, idle);
210 pr_info("Trying to bring up CPU%u ...\n", cpu);
211
212 if (plat_smp_ops.cpu_kick)
213 plat_smp_ops.cpu_kick(cpu,
214 (unsigned long)first_lines_of_secondary);
215 else
216 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
217
218
219 wait_till = jiffies + HZ;
220 while (time_before(jiffies, wait_till)) {
221 if (cpu_online(cpu))
222 break;
223 }
224
225 if (!cpu_online(cpu)) {
226 pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
227 return -1;
228 }
229
230 secondary_idle_tsk = NULL;
231
232 return 0;
233}
234
235
236
237
238int setup_profiling_timer(unsigned int multiplier)
239{
240 return -EINVAL;
241}
242
243
244
245
246
247enum ipi_msg_type {
248 IPI_EMPTY = 0,
249 IPI_RESCHEDULE = 1,
250 IPI_CALL_FUNC,
251 IPI_CPU_STOP,
252};
253
254
255
256
257
258
259
260static DEFINE_PER_CPU(unsigned long, ipi_data);
261
262static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
263{
264 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
265 unsigned long old, new;
266 unsigned long flags;
267
268 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
269
270 local_irq_save(flags);
271
272
273
274
275
276 do {
277 new = old = READ_ONCE(*ipi_data_ptr);
278 new |= 1U << msg;
279 } while (cmpxchg(ipi_data_ptr, old, new) != old);
280
281
282
283
284
285
286
287
288
289 if (plat_smp_ops.ipi_send && !old)
290 plat_smp_ops.ipi_send(cpu);
291
292 local_irq_restore(flags);
293}
294
295static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
296{
297 unsigned int cpu;
298
299 for_each_cpu(cpu, callmap)
300 ipi_send_msg_one(cpu, msg);
301}
302
303void smp_send_reschedule(int cpu)
304{
305 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
306}
307
308void smp_send_stop(void)
309{
310 struct cpumask targets;
311 cpumask_copy(&targets, cpu_online_mask);
312 cpumask_clear_cpu(smp_processor_id(), &targets);
313 ipi_send_msg(&targets, IPI_CPU_STOP);
314}
315
316void arch_send_call_function_single_ipi(int cpu)
317{
318 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
319}
320
321void arch_send_call_function_ipi_mask(const struct cpumask *mask)
322{
323 ipi_send_msg(mask, IPI_CALL_FUNC);
324}
325
326
327
328
329static void ipi_cpu_stop(void)
330{
331 machine_halt();
332}
333
334static inline int __do_IPI(unsigned long msg)
335{
336 int rc = 0;
337
338 switch (msg) {
339 case IPI_RESCHEDULE:
340 scheduler_ipi();
341 break;
342
343 case IPI_CALL_FUNC:
344 generic_smp_call_function_interrupt();
345 break;
346
347 case IPI_CPU_STOP:
348 ipi_cpu_stop();
349 break;
350
351 default:
352 rc = 1;
353 }
354
355 return rc;
356}
357
358
359
360
361
362irqreturn_t do_IPI(int irq, void *dev_id)
363{
364 unsigned long pending;
365 unsigned long __maybe_unused copy;
366
367 pr_debug("IPI [%ld] received on cpu %d\n",
368 *this_cpu_ptr(&ipi_data), smp_processor_id());
369
370 if (plat_smp_ops.ipi_clear)
371 plat_smp_ops.ipi_clear(irq);
372
373
374
375
376
377 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
378
379 do {
380 unsigned long msg = __ffs(pending);
381 int rc;
382
383 rc = __do_IPI(msg);
384 if (rc)
385 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
386 pending &= ~(1U << msg);
387 } while (pending);
388
389 return IRQ_HANDLED;
390}
391
392
393
394
395
396
397
398
399static DEFINE_PER_CPU(int, ipi_dev);
400
401int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
402{
403 int *dev = per_cpu_ptr(&ipi_dev, cpu);
404 unsigned int virq = irq_find_mapping(NULL, hwirq);
405
406 if (!virq)
407 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
408
409
410 if (!cpu) {
411 int rc;
412
413 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
414 if (rc)
415 panic("Percpu IRQ request failed for %u\n", virq);
416 }
417
418 enable_percpu_irq(virq, 0);
419
420 return 0;
421}
422