1
2
3
4
5
6
7
8
9
10
11#include <linux/cpu.h>
12#include <linux/clockchips.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/profile.h>
16#include <linux/smp.h>
17#include <linux/sched.h>
18#include <linux/seq_file.h>
19#include <linux/delay.h>
20#include <linux/irq_work.h>
21
22#include <asm/sbi.h>
23#include <asm/tlbflush.h>
24#include <asm/cacheflush.h>
25
26enum ipi_message_type {
27 IPI_RESCHEDULE,
28 IPI_CALL_FUNC,
29 IPI_CPU_STOP,
30 IPI_IRQ_WORK,
31 IPI_TIMER,
32 IPI_MAX
33};
34
35unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
36 [0 ... NR_CPUS-1] = INVALID_HARTID
37};
38
39void __init smp_setup_processor_id(void)
40{
41 cpuid_to_hartid_map(0) = boot_cpu_hartid;
42}
43
44
45static struct {
46 unsigned long stats[IPI_MAX] ____cacheline_aligned;
47 unsigned long bits ____cacheline_aligned;
48} ipi_data[NR_CPUS] __cacheline_aligned;
49
50int riscv_hartid_to_cpuid(int hartid)
51{
52 int i;
53
54 for (i = 0; i < NR_CPUS; i++)
55 if (cpuid_to_hartid_map(i) == hartid)
56 return i;
57
58 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
59 return -ENOENT;
60}
61
62bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
63{
64 return phys_id == cpuid_to_hartid_map(cpu);
65}
66
67
68int setup_profiling_timer(unsigned int multiplier)
69{
70 return -EINVAL;
71}
72
73static void ipi_stop(void)
74{
75 set_cpu_online(smp_processor_id(), false);
76 while (1)
77 wait_for_interrupt();
78}
79
80static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
81
82void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
83{
84 ipi_ops = ops;
85}
86EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
87
88void riscv_clear_ipi(void)
89{
90 if (ipi_ops && ipi_ops->ipi_clear)
91 ipi_ops->ipi_clear();
92
93 csr_clear(CSR_IP, IE_SIE);
94}
95EXPORT_SYMBOL_GPL(riscv_clear_ipi);
96
97static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
98{
99 int cpu;
100
101 smp_mb__before_atomic();
102 for_each_cpu(cpu, mask)
103 set_bit(op, &ipi_data[cpu].bits);
104 smp_mb__after_atomic();
105
106 if (ipi_ops && ipi_ops->ipi_inject)
107 ipi_ops->ipi_inject(mask);
108 else
109 pr_warn("SMP: IPI inject method not available\n");
110}
111
112static void send_ipi_single(int cpu, enum ipi_message_type op)
113{
114 smp_mb__before_atomic();
115 set_bit(op, &ipi_data[cpu].bits);
116 smp_mb__after_atomic();
117
118 if (ipi_ops && ipi_ops->ipi_inject)
119 ipi_ops->ipi_inject(cpumask_of(cpu));
120 else
121 pr_warn("SMP: IPI inject method not available\n");
122}
123
124#ifdef CONFIG_IRQ_WORK
125void arch_irq_work_raise(void)
126{
127 send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
128}
129#endif
130
131void handle_IPI(struct pt_regs *regs)
132{
133 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
134 unsigned long *stats = ipi_data[smp_processor_id()].stats;
135
136 riscv_clear_ipi();
137
138 while (true) {
139 unsigned long ops;
140
141
142 mb();
143
144 ops = xchg(pending_ipis, 0);
145 if (ops == 0)
146 return;
147
148 if (ops & (1 << IPI_RESCHEDULE)) {
149 stats[IPI_RESCHEDULE]++;
150 scheduler_ipi();
151 }
152
153 if (ops & (1 << IPI_CALL_FUNC)) {
154 stats[IPI_CALL_FUNC]++;
155 generic_smp_call_function_interrupt();
156 }
157
158 if (ops & (1 << IPI_CPU_STOP)) {
159 stats[IPI_CPU_STOP]++;
160 ipi_stop();
161 }
162
163 if (ops & (1 << IPI_IRQ_WORK)) {
164 stats[IPI_IRQ_WORK]++;
165 irq_work_run();
166 }
167
168#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
169 if (ops & (1 << IPI_TIMER)) {
170 stats[IPI_TIMER]++;
171 tick_receive_broadcast();
172 }
173#endif
174 BUG_ON((ops >> IPI_MAX) != 0);
175
176
177 mb();
178 }
179}
180
181static const char * const ipi_names[] = {
182 [IPI_RESCHEDULE] = "Rescheduling interrupts",
183 [IPI_CALL_FUNC] = "Function call interrupts",
184 [IPI_CPU_STOP] = "CPU stop interrupts",
185 [IPI_IRQ_WORK] = "IRQ work interrupts",
186 [IPI_TIMER] = "Timer broadcast interrupts",
187};
188
189void show_ipi_stats(struct seq_file *p, int prec)
190{
191 unsigned int cpu, i;
192
193 for (i = 0; i < IPI_MAX; i++) {
194 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
195 prec >= 4 ? " " : "");
196 for_each_online_cpu(cpu)
197 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
198 seq_printf(p, " %s\n", ipi_names[i]);
199 }
200}
201
202void arch_send_call_function_ipi_mask(struct cpumask *mask)
203{
204 send_ipi_mask(mask, IPI_CALL_FUNC);
205}
206
207void arch_send_call_function_single_ipi(int cpu)
208{
209 send_ipi_single(cpu, IPI_CALL_FUNC);
210}
211
212#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
213void tick_broadcast(const struct cpumask *mask)
214{
215 send_ipi_mask(mask, IPI_TIMER);
216}
217#endif
218
219void smp_send_stop(void)
220{
221 unsigned long timeout;
222
223 if (num_online_cpus() > 1) {
224 cpumask_t mask;
225
226 cpumask_copy(&mask, cpu_online_mask);
227 cpumask_clear_cpu(smp_processor_id(), &mask);
228
229 if (system_state <= SYSTEM_RUNNING)
230 pr_crit("SMP: stopping secondary CPUs\n");
231 send_ipi_mask(&mask, IPI_CPU_STOP);
232 }
233
234
235 timeout = USEC_PER_SEC;
236 while (num_online_cpus() > 1 && timeout--)
237 udelay(1);
238
239 if (num_online_cpus() > 1)
240 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
241 cpumask_pr_args(cpu_online_mask));
242}
243
244void smp_send_reschedule(int cpu)
245{
246 send_ipi_single(cpu, IPI_RESCHEDULE);
247}
248EXPORT_SYMBOL_GPL(smp_send_reschedule);
249