1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/smp.h>
15#include <linux/reboot.h>
16#include <linux/kexec.h>
17#include <linux/export.h>
18#include <linux/crash_dump.h>
19#include <linux/delay.h>
20#include <linux/irq.h>
21#include <linux/types.h>
22
23#include <asm/processor.h>
24#include <asm/machdep.h>
25#include <asm/kexec.h>
26#include <asm/kdump.h>
27#include <asm/prom.h>
28#include <asm/smp.h>
29#include <asm/setjmp.h>
30#include <asm/debug.h>
31
32
33
34
35
36
37
38
39
40#define PRIMARY_TIMEOUT 500
41#define SECONDARY_TIMEOUT 1000
42
43#define IPI_TIMEOUT 10000
44#define REAL_MODE_TIMEOUT 10000
45
46static int time_to_dump;
47
48
49
50
51
52
53
54int crash_wake_offline;
55
56#define CRASH_HANDLER_MAX 3
57
58static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
59static DEFINE_SPINLOCK(crash_handlers_lock);
60
61static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
62static int crash_shutdown_cpu = -1;
63
64static int handle_fault(struct pt_regs *regs)
65{
66 if (crash_shutdown_cpu == smp_processor_id())
67 longjmp(crash_shutdown_buf, 1);
68 return 0;
69}
70
71#ifdef CONFIG_SMP
72
73static atomic_t cpus_in_crash;
74void crash_ipi_callback(struct pt_regs *regs)
75{
76 static cpumask_t cpus_state_saved = CPU_MASK_NONE;
77
78 int cpu = smp_processor_id();
79
80 hard_irq_disable();
81 if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
82 crash_save_cpu(regs, cpu);
83 cpumask_set_cpu(cpu, &cpus_state_saved);
84 }
85
86 atomic_inc(&cpus_in_crash);
87 smp_mb__after_atomic();
88
89
90
91
92
93 while (!time_to_dump)
94 cpu_relax();
95
96 if (ppc_md.kexec_cpu_down)
97 ppc_md.kexec_cpu_down(1, 1);
98
99#ifdef CONFIG_PPC64
100 kexec_smp_wait();
101#else
102 for (;;);
103#endif
104
105
106}
107
108static void crash_kexec_prepare_cpus(int cpu)
109{
110 unsigned int msecs;
111 unsigned int ncpus = num_online_cpus() - 1;
112 int tries = 0;
113 int (*old_handler)(struct pt_regs *regs);
114
115 printk(KERN_EMERG "Sending IPI to other CPUs\n");
116
117 if (crash_wake_offline)
118 ncpus = num_present_cpus() - 1;
119
120 crash_send_ipi(crash_ipi_callback);
121 smp_wmb();
122
123again:
124
125
126
127
128
129 msecs = IPI_TIMEOUT;
130 while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
131 mdelay(1);
132
133
134
135 if (atomic_read(&cpus_in_crash) >= ncpus) {
136 printk(KERN_EMERG "IPI complete\n");
137 return;
138 }
139
140 printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
141 ncpus - atomic_read(&cpus_in_crash));
142
143
144
145
146
147
148 if ((panic_timeout > 0) || (tries > 0))
149 return;
150
151
152
153
154
155
156 old_handler = __debugger;
157 __debugger = handle_fault;
158 crash_shutdown_cpu = smp_processor_id();
159
160 if (setjmp(crash_shutdown_buf) == 0) {
161 printk(KERN_EMERG "Activate system reset (dumprestart) "
162 "to stop other cpu(s)\n");
163
164
165
166
167
168
169 atomic_set(&cpus_in_crash, 0);
170 smp_mb();
171
172 while (atomic_read(&cpus_in_crash) < ncpus)
173 cpu_relax();
174 }
175
176 crash_shutdown_cpu = -1;
177 __debugger = old_handler;
178
179 tries++;
180 goto again;
181}
182
183
184
185
186void crash_kexec_secondary(struct pt_regs *regs)
187{
188 unsigned long flags;
189 int msecs = SECONDARY_TIMEOUT;
190
191 local_irq_save(flags);
192
193
194 while (crashing_cpu < 0) {
195 if (--msecs < 0) {
196
197 local_irq_restore(flags);
198 return;
199 }
200
201 mdelay(1);
202 }
203
204 crash_ipi_callback(regs);
205}
206
207#else
208
209static void crash_kexec_prepare_cpus(int cpu)
210{
211
212
213
214
215
216
217#ifdef CONFIG_PPC64
218 smp_release_cpus();
219#else
220
221#endif
222}
223
224void crash_kexec_secondary(struct pt_regs *regs)
225{
226}
227#endif
228
229
230#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
231static void __maybe_unused crash_kexec_wait_realmode(int cpu)
232{
233 unsigned int msecs;
234 int i;
235
236 msecs = REAL_MODE_TIMEOUT;
237 for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
238 if (i == cpu)
239 continue;
240
241 while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) {
242 barrier();
243 if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
244 break;
245 msecs--;
246 mdelay(1);
247 }
248 }
249 mb();
250}
251#else
252static inline void crash_kexec_wait_realmode(int cpu) {}
253#endif
254
255
256
257
258
259int crash_shutdown_register(crash_shutdown_t handler)
260{
261 unsigned int i, rc;
262
263 spin_lock(&crash_handlers_lock);
264 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
265 if (!crash_shutdown_handles[i]) {
266
267 crash_shutdown_handles[i] = handler;
268 rc = 0;
269 break;
270 }
271
272 if (i == CRASH_HANDLER_MAX) {
273 printk(KERN_ERR "Crash shutdown handles full, "
274 "not registered.\n");
275 rc = 1;
276 }
277
278 spin_unlock(&crash_handlers_lock);
279 return rc;
280}
281EXPORT_SYMBOL(crash_shutdown_register);
282
283int crash_shutdown_unregister(crash_shutdown_t handler)
284{
285 unsigned int i, rc;
286
287 spin_lock(&crash_handlers_lock);
288 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
289 if (crash_shutdown_handles[i] == handler)
290 break;
291
292 if (i == CRASH_HANDLER_MAX) {
293 printk(KERN_ERR "Crash shutdown handle not found\n");
294 rc = 1;
295 } else {
296
297 for (; i < (CRASH_HANDLER_MAX - 1); i++)
298 crash_shutdown_handles[i] =
299 crash_shutdown_handles[i+1];
300
301
302
303
304 crash_shutdown_handles[i] = NULL;
305 rc = 0;
306 }
307
308 spin_unlock(&crash_handlers_lock);
309 return rc;
310}
311EXPORT_SYMBOL(crash_shutdown_unregister);
312
313void default_machine_crash_shutdown(struct pt_regs *regs)
314{
315 unsigned int i;
316 int (*old_handler)(struct pt_regs *regs);
317
318
319
320
321
322
323
324
325
326
327
328 hard_irq_disable();
329
330
331
332
333
334 crashing_cpu = smp_processor_id();
335
336
337
338
339
340 if (TRAP(regs) == 0x100)
341 mdelay(PRIMARY_TIMEOUT);
342
343 crash_kexec_prepare_cpus(crashing_cpu);
344
345 crash_save_cpu(regs, crashing_cpu);
346
347 time_to_dump = 1;
348
349 crash_kexec_wait_realmode(crashing_cpu);
350
351 machine_kexec_mask_interrupts();
352
353
354
355
356
357 old_handler = __debugger_fault_handler;
358 __debugger_fault_handler = handle_fault;
359 crash_shutdown_cpu = smp_processor_id();
360 for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
361 if (setjmp(crash_shutdown_buf) == 0) {
362
363
364
365
366
367 asm volatile("sync; isync");
368
369 crash_shutdown_handles[i]();
370 asm volatile("sync; isync");
371 }
372 }
373 crash_shutdown_cpu = -1;
374 __debugger_fault_handler = old_handler;
375
376 if (ppc_md.kexec_cpu_down)
377 ppc_md.kexec_cpu_down(1, 0);
378}
379