1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/spinlock.h>
16#include <linux/sched.h>
17#include <linux/interrupt.h>
18#include <linux/profile.h>
19#include <linux/mm.h>
20#include <linux/cpu.h>
21#include <linux/irq.h>
22#include <linux/atomic.h>
23#include <linux/cpumask.h>
24#include <linux/reboot.h>
25#include <asm/processor.h>
26#include <asm/setup.h>
27#include <asm/mach_desc.h>
28
29#ifndef CONFIG_ARC_HAS_LLSC
30arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
31arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32#endif
33
34struct plat_smp_ops __weak plat_smp_ops;
35
36
37struct task_struct *secondary_idle_tsk;
38
39
40void __init smp_prepare_boot_cpu(void)
41{
42}
43
44
45
46
47
48
49
50
51
52
53void __init smp_init_cpus(void)
54{
55 unsigned int i;
56
57 for (i = 0; i < NR_CPUS; i++)
58 set_cpu_possible(i, true);
59
60 if (plat_smp_ops.init_early_smp)
61 plat_smp_ops.init_early_smp();
62}
63
64
65void __init smp_prepare_cpus(unsigned int max_cpus)
66{
67 int i;
68
69
70
71
72
73 for (i = 0; i < max_cpus; i++)
74 set_cpu_present(i, true);
75}
76
77void __init smp_cpus_done(unsigned int max_cpus)
78{
79
80}
81
82
83
84
85
86
87
88static volatile int wake_flag;
89
90static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
91{
92 BUG_ON(cpu == 0);
93 wake_flag = cpu;
94}
95
96void arc_platform_smp_wait_to_boot(int cpu)
97{
98 while (wake_flag != cpu)
99 ;
100
101 wake_flag = 0;
102 __asm__ __volatile__("j @first_lines_of_secondary \n");
103}
104
105
106const char *arc_platform_smp_cpuinfo(void)
107{
108 return plat_smp_ops.info ? : "";
109}
110
111
112
113
114
115
116void start_kernel_secondary(void)
117{
118 struct mm_struct *mm = &init_mm;
119 unsigned int cpu = smp_processor_id();
120
121
122 setup_processor();
123
124 atomic_inc(&mm->mm_users);
125 atomic_inc(&mm->mm_count);
126 current->active_mm = mm;
127 cpumask_set_cpu(cpu, mm_cpumask(mm));
128
129 notify_cpu_starting(cpu);
130 set_cpu_online(cpu, true);
131
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133
134
135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_per_cpu(cpu);
137
138 if (machine_desc->init_per_cpu)
139 machine_desc->init_per_cpu(cpu);
140
141 arc_local_timer_setup();
142
143 local_irq_enable();
144 preempt_disable();
145 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
146}
147
148
149
150
151
152
153
154
155
156
157
158int __cpu_up(unsigned int cpu, struct task_struct *idle)
159{
160 unsigned long wait_till;
161
162 secondary_idle_tsk = idle;
163
164 pr_info("Idle Task [%d] %p", cpu, idle);
165 pr_info("Trying to bring up CPU%u ...\n", cpu);
166
167 if (plat_smp_ops.cpu_kick)
168 plat_smp_ops.cpu_kick(cpu,
169 (unsigned long)first_lines_of_secondary);
170 else
171 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
172
173
174 wait_till = jiffies + HZ;
175 while (time_before(jiffies, wait_till)) {
176 if (cpu_online(cpu))
177 break;
178 }
179
180 if (!cpu_online(cpu)) {
181 pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
182 return -1;
183 }
184
185 secondary_idle_tsk = NULL;
186
187 return 0;
188}
189
190
191
192
193int setup_profiling_timer(unsigned int multiplier)
194{
195 return -EINVAL;
196}
197
198
199
200
201
202enum ipi_msg_type {
203 IPI_EMPTY = 0,
204 IPI_RESCHEDULE = 1,
205 IPI_CALL_FUNC,
206 IPI_CPU_STOP,
207};
208
209
210
211
212
213
214
215static DEFINE_PER_CPU(unsigned long, ipi_data);
216
217static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
218{
219 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
220 unsigned long old, new;
221 unsigned long flags;
222
223 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
224
225 local_irq_save(flags);
226
227
228
229
230
231 do {
232 new = old = ACCESS_ONCE(*ipi_data_ptr);
233 new |= 1U << msg;
234 } while (cmpxchg(ipi_data_ptr, old, new) != old);
235
236
237
238
239
240
241
242
243
244 if (plat_smp_ops.ipi_send && !old)
245 plat_smp_ops.ipi_send(cpu);
246
247 local_irq_restore(flags);
248}
249
250static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
251{
252 unsigned int cpu;
253
254 for_each_cpu(cpu, callmap)
255 ipi_send_msg_one(cpu, msg);
256}
257
258void smp_send_reschedule(int cpu)
259{
260 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
261}
262
263void smp_send_stop(void)
264{
265 struct cpumask targets;
266 cpumask_copy(&targets, cpu_online_mask);
267 cpumask_clear_cpu(smp_processor_id(), &targets);
268 ipi_send_msg(&targets, IPI_CPU_STOP);
269}
270
271void arch_send_call_function_single_ipi(int cpu)
272{
273 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
274}
275
276void arch_send_call_function_ipi_mask(const struct cpumask *mask)
277{
278 ipi_send_msg(mask, IPI_CALL_FUNC);
279}
280
281
282
283
284static void ipi_cpu_stop(void)
285{
286 machine_halt();
287}
288
289static inline int __do_IPI(unsigned long msg)
290{
291 int rc = 0;
292
293 switch (msg) {
294 case IPI_RESCHEDULE:
295 scheduler_ipi();
296 break;
297
298 case IPI_CALL_FUNC:
299 generic_smp_call_function_interrupt();
300 break;
301
302 case IPI_CPU_STOP:
303 ipi_cpu_stop();
304 break;
305
306 default:
307 rc = 1;
308 }
309
310 return rc;
311}
312
313
314
315
316
317irqreturn_t do_IPI(int irq, void *dev_id)
318{
319 unsigned long pending;
320 unsigned long __maybe_unused copy;
321
322 pr_debug("IPI [%ld] received on cpu %d\n",
323 *this_cpu_ptr(&ipi_data), smp_processor_id());
324
325 if (plat_smp_ops.ipi_clear)
326 plat_smp_ops.ipi_clear(irq);
327
328
329
330
331
332 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
333
334 do {
335 unsigned long msg = __ffs(pending);
336 int rc;
337
338 rc = __do_IPI(msg);
339 if (rc)
340 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
341 pending &= ~(1U << msg);
342 } while (pending);
343
344 return IRQ_HANDLED;
345}
346
347
348
349
350static DEFINE_PER_CPU(int, ipi_dev);
351
352int smp_ipi_irq_setup(int cpu, int irq)
353{
354 int *dev = per_cpu_ptr(&ipi_dev, cpu);
355
356 arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
357
358 return 0;
359}
360