1
2
3
4
5
6
7
8
9
10
11#include <linux/smp.h>
12#include <linux/delay.h>
13#include <linux/crash_dump.h>
14#include <linux/bootmem.h>
15#include <linux/kexec.h>
16#include <linux/elfcore.h>
17#include <linux/sysctl.h>
18#include <linux/init.h>
19#include <linux/kdebug.h>
20
21#include <asm/mca.h>
22
23int kdump_status[NR_CPUS];
24static atomic_t kdump_cpu_frozen;
25atomic_t kdump_in_progress;
26static int kdump_freeze_monarch;
27static int kdump_on_init = 1;
28static int kdump_on_fatal_mca = 1;
29
30static inline Elf64_Word
31*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
32 size_t data_len)
33{
34 struct elf_note *note = (struct elf_note *)buf;
35 note->n_namesz = strlen(name) + 1;
36 note->n_descsz = data_len;
37 note->n_type = type;
38 buf += (sizeof(*note) + 3)/4;
39 memcpy(buf, name, note->n_namesz);
40 buf += (note->n_namesz + 3)/4;
41 memcpy(buf, data, data_len);
42 buf += (data_len + 3)/4;
43 return buf;
44}
45
46static void
47final_note(void *buf)
48{
49 memset(buf, 0, sizeof(struct elf_note));
50}
51
52extern void ia64_dump_cpu_regs(void *);
53
54static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
55
56void
57crash_save_this_cpu(void)
58{
59 void *buf;
60 unsigned long cfm, sof, sol;
61
62 int cpu = smp_processor_id();
63 struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
64
65 elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
66 memset(prstatus, 0, sizeof(*prstatus));
67 prstatus->pr_pid = current->pid;
68
69 ia64_dump_cpu_regs(dst);
70 cfm = dst[43];
71 sol = (cfm >> 7) & 0x7f;
72 sof = cfm & 0x7f;
73 dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
74 sof - sol);
75
76 buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
77 if (!buf)
78 return;
79 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
80 sizeof(*prstatus));
81 final_note(buf);
82}
83
84#ifdef CONFIG_SMP
85static int
86kdump_wait_cpu_freeze(void)
87{
88 int cpu_num = num_online_cpus() - 1;
89 int timeout = 1000;
90 while(timeout-- > 0) {
91 if (atomic_read(&kdump_cpu_frozen) == cpu_num)
92 return 0;
93 udelay(1000);
94 }
95 return 1;
96}
97#endif
98
99void
100machine_crash_shutdown(struct pt_regs *pt)
101{
102
103
104
105
106
107
108
109
110 kexec_disable_iosapic();
111#ifdef CONFIG_SMP
112
113
114
115
116 local_irq_disable();
117 ia64_set_psr_mc();
118 if (atomic_inc_return(&kdump_in_progress) != 1)
119 unw_init_running(kdump_cpu_freeze, NULL);
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 kdump_smp_send_stop();
139
140 if (kdump_wait_cpu_freeze()) {
141 kdump_smp_send_init();
142
143 kdump_wait_cpu_freeze();
144 }
145#endif
146}
147
148static void
149machine_kdump_on_init(void)
150{
151 crash_save_vmcoreinfo();
152 local_irq_disable();
153 kexec_disable_iosapic();
154 machine_kexec(ia64_kimage);
155}
156
157void
158kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
159{
160 int cpuid;
161
162 local_irq_disable();
163 cpuid = smp_processor_id();
164 crash_save_this_cpu();
165 current->thread.ksp = (__u64)info->sw - 16;
166
167 ia64_set_psr_mc();
168
169 atomic_inc(&kdump_cpu_frozen);
170 kdump_status[cpuid] = 1;
171 mb();
172 for (;;)
173 cpu_relax();
174}
175
176static int
177kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
178{
179 struct ia64_mca_notify_die *nd;
180 struct die_args *args = data;
181
182 if (atomic_read(&kdump_in_progress)) {
183 switch (val) {
184 case DIE_INIT_MONARCH_LEAVE:
185 if (!kdump_freeze_monarch)
186 break;
187
188 case DIE_INIT_SLAVE_LEAVE:
189 case DIE_INIT_MONARCH_ENTER:
190 case DIE_MCA_RENDZVOUS_LEAVE:
191 unw_init_running(kdump_cpu_freeze, NULL);
192 break;
193 }
194 }
195
196 if (!kdump_on_init && !kdump_on_fatal_mca)
197 return NOTIFY_DONE;
198
199 if (!ia64_kimage) {
200 if (val == DIE_INIT_MONARCH_LEAVE)
201 ia64_mca_printk(KERN_NOTICE
202 "%s: kdump not configured\n",
203 __func__);
204 return NOTIFY_DONE;
205 }
206
207 if (val != DIE_INIT_MONARCH_LEAVE &&
208 val != DIE_INIT_MONARCH_PROCESS &&
209 val != DIE_MCA_MONARCH_LEAVE)
210 return NOTIFY_DONE;
211
212 nd = (struct ia64_mca_notify_die *)args->err;
213
214 switch (val) {
215 case DIE_INIT_MONARCH_PROCESS:
216
217 if (kdump_on_init && (nd->sos->rv_rc != 1)) {
218 if (atomic_inc_return(&kdump_in_progress) != 1)
219 kdump_freeze_monarch = 1;
220 }
221 break;
222 case DIE_INIT_MONARCH_LEAVE:
223
224 if (kdump_on_init && (nd->sos->rv_rc != 1))
225 machine_kdump_on_init();
226 break;
227 case DIE_MCA_MONARCH_LEAVE:
228
229 if (kdump_on_fatal_mca && !(*(nd->data))) {
230 if (atomic_inc_return(&kdump_in_progress) == 1)
231 machine_kdump_on_init();
232
233 }
234 break;
235 }
236 return NOTIFY_DONE;
237}
238
239#ifdef CONFIG_SYSCTL
240static struct ctl_table kdump_ctl_table[] = {
241 {
242 .procname = "kdump_on_init",
243 .data = &kdump_on_init,
244 .maxlen = sizeof(int),
245 .mode = 0644,
246 .proc_handler = proc_dointvec,
247 },
248 {
249 .procname = "kdump_on_fatal_mca",
250 .data = &kdump_on_fatal_mca,
251 .maxlen = sizeof(int),
252 .mode = 0644,
253 .proc_handler = proc_dointvec,
254 },
255 { }
256};
257
258static struct ctl_table sys_table[] = {
259 {
260 .procname = "kernel",
261 .mode = 0555,
262 .child = kdump_ctl_table,
263 },
264 { }
265};
266#endif
267
268static int
269machine_crash_setup(void)
270{
271
272 static struct notifier_block kdump_init_notifier_nb = {
273 .notifier_call = kdump_init_notifier,
274 .priority = 1,
275 };
276 int ret;
277 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
278 return ret;
279#ifdef CONFIG_SYSCTL
280 register_sysctl_table(sys_table);
281#endif
282 return 0;
283}
284
285__initcall(machine_crash_setup);
286
287