1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) "kexec: " fmt
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/smp.h>
18#include <linux/reboot.h>
19#include <linux/kexec.h>
20#include <linux/delay.h>
21#include <linux/elf.h>
22#include <linux/elfcore.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26
27#include <asm/processor.h>
28#include <asm/hardirq.h>
29#include <asm/nmi.h>
30#include <asm/hw_irq.h>
31#include <asm/apic.h>
32#include <asm/io_apic.h>
33#include <asm/hpet.h>
34#include <linux/kdebug.h>
35#include <asm/cpu.h>
36#include <asm/reboot.h>
37#include <asm/virtext.h>
38#include <asm/intel_pt.h>
39
40
41#define ELF_CORE_HEADER_ALIGN 4096
42
43
44#define CRASH_MAX_RANGES 16
45
46struct crash_mem_range {
47 u64 start, end;
48};
49
50struct crash_mem {
51 unsigned int nr_ranges;
52 struct crash_mem_range ranges[CRASH_MAX_RANGES];
53};
54
55
56struct crash_elf_data {
57 struct kimage *image;
58
59
60
61
62 unsigned int max_nr_ranges;
63
64
65 void *ehdr;
66
67 void *bufp;
68 struct crash_mem mem;
69};
70
71
72struct crash_memmap_data {
73 struct boot_params *params;
74
75 unsigned int type;
76};
77
78
79
80
81
82
83
84
85crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
86EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
87unsigned long crash_zero_bytes;
88
89static inline void cpu_crash_vmclear_loaded_vmcss(void)
90{
91 crash_vmclear_fn *do_vmclear_operation = NULL;
92
93 rcu_read_lock();
94 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
95 if (do_vmclear_operation)
96 do_vmclear_operation();
97 rcu_read_unlock();
98}
99
100#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
101
102static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
103{
104#ifdef CONFIG_X86_32
105 struct pt_regs fixed_regs;
106
107 if (!user_mode(regs)) {
108 crash_fixup_ss_esp(&fixed_regs, regs);
109 regs = &fixed_regs;
110 }
111#endif
112 crash_save_cpu(regs, cpu);
113
114
115
116
117 cpu_crash_vmclear_loaded_vmcss();
118
119
120
121
122
123
124
125 cpu_emergency_vmxoff();
126 cpu_emergency_svm_disable();
127
128
129
130
131 cpu_emergency_stop_pt();
132
133 disable_local_APIC();
134}
135
136static void kdump_nmi_shootdown_cpus(void)
137{
138 nmi_shootdown_cpus(kdump_nmi_callback);
139
140 disable_local_APIC();
141}
142
143#else
144static void kdump_nmi_shootdown_cpus(void)
145{
146
147}
148#endif
149
150void native_machine_crash_shutdown(struct pt_regs *regs)
151{
152
153
154
155
156
157
158
159
160
161 local_irq_disable();
162
163 kdump_nmi_shootdown_cpus();
164
165
166
167
168 cpu_crash_vmclear_loaded_vmcss();
169
170
171
172
173
174 cpu_emergency_vmxoff();
175 cpu_emergency_svm_disable();
176
177
178
179
180 cpu_emergency_stop_pt();
181
182#ifdef CONFIG_X86_IO_APIC
183
184 ioapic_zap_locks();
185 disable_IO_APIC();
186#endif
187 lapic_shutdown();
188#ifdef CONFIG_HPET_TIMER
189 hpet_disable();
190#endif
191 crash_save_cpu(regs, safe_smp_processor_id());
192}
193
194#ifdef CONFIG_KEXEC_FILE
195static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
196{
197 unsigned int *nr_ranges = arg;
198
199 (*nr_ranges)++;
200 return 0;
201}
202
203
204
205static void fill_up_crash_elf_data(struct crash_elf_data *ced,
206 struct kimage *image)
207{
208 unsigned int nr_ranges = 0;
209
210 ced->image = image;
211
212 walk_system_ram_res(0, -1, &nr_ranges,
213 get_nr_ram_ranges_callback);
214
215 ced->max_nr_ranges = nr_ranges;
216
217
218 ced->max_nr_ranges++;
219
220
221 if (crashk_low_res.end)
222 ced->max_nr_ranges++;
223}
224
225static int exclude_mem_range(struct crash_mem *mem,
226 unsigned long long mstart, unsigned long long mend)
227{
228 int i, j;
229 unsigned long long start, end;
230 struct crash_mem_range temp_range = {0, 0};
231
232 for (i = 0; i < mem->nr_ranges; i++) {
233 start = mem->ranges[i].start;
234 end = mem->ranges[i].end;
235
236 if (mstart > end || mend < start)
237 continue;
238
239
240 if (mstart < start)
241 mstart = start;
242 if (mend > end)
243 mend = end;
244
245
246 if (mstart == start && mend == end) {
247 mem->ranges[i].start = 0;
248 mem->ranges[i].end = 0;
249 if (i < mem->nr_ranges - 1) {
250
251 for (j = i; j < mem->nr_ranges - 1; j++) {
252 mem->ranges[j].start =
253 mem->ranges[j+1].start;
254 mem->ranges[j].end =
255 mem->ranges[j+1].end;
256 }
257 }
258 mem->nr_ranges--;
259 return 0;
260 }
261
262 if (mstart > start && mend < end) {
263
264 mem->ranges[i].end = mstart - 1;
265 temp_range.start = mend + 1;
266 temp_range.end = end;
267 } else if (mstart != start)
268 mem->ranges[i].end = mstart - 1;
269 else
270 mem->ranges[i].start = mend + 1;
271 break;
272 }
273
274
275 if (!temp_range.end)
276 return 0;
277
278
279 if (i == CRASH_MAX_RANGES - 1) {
280 pr_err("Too many crash ranges after split\n");
281 return -ENOMEM;
282 }
283
284
285 j = i + 1;
286 if (j < mem->nr_ranges) {
287
288 for (i = mem->nr_ranges - 1; i >= j; i--)
289 mem->ranges[i + 1] = mem->ranges[i];
290 }
291
292 mem->ranges[j].start = temp_range.start;
293 mem->ranges[j].end = temp_range.end;
294 mem->nr_ranges++;
295 return 0;
296}
297
298
299
300
301
302static int elf_header_exclude_ranges(struct crash_elf_data *ced,
303 unsigned long long mstart, unsigned long long mend)
304{
305 struct crash_mem *cmem = &ced->mem;
306 int ret = 0;
307
308 memset(cmem->ranges, 0, sizeof(cmem->ranges));
309
310 cmem->ranges[0].start = mstart;
311 cmem->ranges[0].end = mend;
312 cmem->nr_ranges = 1;
313
314
315 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
316 if (ret)
317 return ret;
318
319 if (crashk_low_res.end) {
320 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
321 if (ret)
322 return ret;
323 }
324
325 return ret;
326}
327
328static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
329{
330 struct crash_elf_data *ced = arg;
331 Elf64_Ehdr *ehdr;
332 Elf64_Phdr *phdr;
333 unsigned long mstart, mend;
334 struct kimage *image = ced->image;
335 struct crash_mem *cmem;
336 int ret, i;
337
338 ehdr = ced->ehdr;
339
340
341 ret = elf_header_exclude_ranges(ced, start, end);
342 if (ret)
343 return ret;
344
345
346 cmem = &ced->mem;
347
348 for (i = 0; i < cmem->nr_ranges; i++) {
349 mstart = cmem->ranges[i].start;
350 mend = cmem->ranges[i].end;
351
352 phdr = ced->bufp;
353 ced->bufp += sizeof(Elf64_Phdr);
354
355 phdr->p_type = PT_LOAD;
356 phdr->p_flags = PF_R|PF_W|PF_X;
357 phdr->p_offset = mstart;
358
359
360
361
362
363 if (mstart == image->arch.backup_src_start &&
364 (mend - mstart + 1) == image->arch.backup_src_sz)
365 phdr->p_offset = image->arch.backup_load_addr;
366
367 phdr->p_paddr = mstart;
368 phdr->p_vaddr = (unsigned long long) __va(mstart);
369 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
370 phdr->p_align = 0;
371 ehdr->e_phnum++;
372 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
373 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
374 ehdr->e_phnum, phdr->p_offset);
375 }
376
377 return ret;
378}
379
380static int prepare_elf64_headers(struct crash_elf_data *ced,
381 void **addr, unsigned long *sz)
382{
383 Elf64_Ehdr *ehdr;
384 Elf64_Phdr *phdr;
385 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
386 unsigned char *buf, *bufp;
387 unsigned int cpu;
388 unsigned long long notes_addr;
389 int ret;
390
391
392 nr_phdr = nr_cpus + 1;
393 nr_phdr += ced->max_nr_ranges;
394
395
396
397
398
399
400
401
402
403 nr_phdr++;
404 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
405 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
406
407 buf = vzalloc(elf_sz);
408 if (!buf)
409 return -ENOMEM;
410
411 bufp = buf;
412 ehdr = (Elf64_Ehdr *)bufp;
413 bufp += sizeof(Elf64_Ehdr);
414 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
415 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
416 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
417 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
418 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
419 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
420 ehdr->e_type = ET_CORE;
421 ehdr->e_machine = ELF_ARCH;
422 ehdr->e_version = EV_CURRENT;
423 ehdr->e_phoff = sizeof(Elf64_Ehdr);
424 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
425 ehdr->e_phentsize = sizeof(Elf64_Phdr);
426
427
428 for_each_present_cpu(cpu) {
429 phdr = (Elf64_Phdr *)bufp;
430 bufp += sizeof(Elf64_Phdr);
431 phdr->p_type = PT_NOTE;
432 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
433 phdr->p_offset = phdr->p_paddr = notes_addr;
434 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
435 (ehdr->e_phnum)++;
436 }
437
438
439 phdr = (Elf64_Phdr *)bufp;
440 bufp += sizeof(Elf64_Phdr);
441 phdr->p_type = PT_NOTE;
442 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
443 phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
444 (ehdr->e_phnum)++;
445
446#ifdef CONFIG_X86_64
447
448 phdr = (Elf64_Phdr *)bufp;
449 bufp += sizeof(Elf64_Phdr);
450 phdr->p_type = PT_LOAD;
451 phdr->p_flags = PF_R|PF_W|PF_X;
452 phdr->p_vaddr = (Elf64_Addr)_text;
453 phdr->p_filesz = phdr->p_memsz = _end - _text;
454 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
455 (ehdr->e_phnum)++;
456#endif
457
458
459 ced->ehdr = ehdr;
460 ced->bufp = bufp;
461 ret = walk_system_ram_res(0, -1, ced,
462 prepare_elf64_ram_headers_callback);
463 if (ret < 0)
464 return ret;
465
466 *addr = buf;
467 *sz = elf_sz;
468 return 0;
469}
470
471
472static int prepare_elf_headers(struct kimage *image, void **addr,
473 unsigned long *sz)
474{
475 struct crash_elf_data *ced;
476 int ret;
477
478 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
479 if (!ced)
480 return -ENOMEM;
481
482 fill_up_crash_elf_data(ced, image);
483
484
485 ret = prepare_elf64_headers(ced, addr, sz);
486 kfree(ced);
487 return ret;
488}
489
490static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
491{
492 unsigned int nr_e820_entries;
493
494 nr_e820_entries = params->e820_entries;
495 if (nr_e820_entries >= E820MAX)
496 return 1;
497
498 memcpy(¶ms->e820_map[nr_e820_entries], entry,
499 sizeof(struct e820entry));
500 params->e820_entries++;
501 return 0;
502}
503
504static int memmap_entry_callback(u64 start, u64 end, void *arg)
505{
506 struct crash_memmap_data *cmd = arg;
507 struct boot_params *params = cmd->params;
508 struct e820entry ei;
509
510 ei.addr = start;
511 ei.size = end - start + 1;
512 ei.type = cmd->type;
513 add_e820_entry(params, &ei);
514
515 return 0;
516}
517
518static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
519 unsigned long long mstart,
520 unsigned long long mend)
521{
522 unsigned long start, end;
523 int ret = 0;
524
525 cmem->ranges[0].start = mstart;
526 cmem->ranges[0].end = mend;
527 cmem->nr_ranges = 1;
528
529
530 start = image->arch.backup_load_addr;
531 end = start + image->arch.backup_src_sz - 1;
532 ret = exclude_mem_range(cmem, start, end);
533 if (ret)
534 return ret;
535
536
537 start = image->arch.elf_load_addr;
538 end = start + image->arch.elf_headers_sz - 1;
539 return exclude_mem_range(cmem, start, end);
540}
541
542
543int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
544{
545 int i, ret = 0;
546 unsigned long flags;
547 struct e820entry ei;
548 struct crash_memmap_data cmd;
549 struct crash_mem *cmem;
550
551 cmem = vzalloc(sizeof(struct crash_mem));
552 if (!cmem)
553 return -ENOMEM;
554
555 memset(&cmd, 0, sizeof(struct crash_memmap_data));
556 cmd.params = params;
557
558
559 ei.addr = image->arch.backup_src_start;
560 ei.size = image->arch.backup_src_sz;
561 ei.type = E820_RAM;
562 add_e820_entry(params, &ei);
563
564
565 cmd.type = E820_ACPI;
566 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
567 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
568 memmap_entry_callback);
569
570
571 cmd.type = E820_NVS;
572 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
573 memmap_entry_callback);
574
575
576 if (crashk_low_res.end) {
577 ei.addr = crashk_low_res.start;
578 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
579 ei.type = E820_RAM;
580 add_e820_entry(params, &ei);
581 }
582
583
584 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
585 crashk_res.end);
586 if (ret)
587 goto out;
588
589 for (i = 0; i < cmem->nr_ranges; i++) {
590 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
591
592
593 if (ei.size < PAGE_SIZE)
594 continue;
595 ei.addr = cmem->ranges[i].start;
596 ei.type = E820_RAM;
597 add_e820_entry(params, &ei);
598 }
599
600out:
601 vfree(cmem);
602 return ret;
603}
604
605static int determine_backup_region(u64 start, u64 end, void *arg)
606{
607 struct kimage *image = arg;
608
609 image->arch.backup_src_start = start;
610 image->arch.backup_src_sz = end - start + 1;
611
612
613 return 1;
614}
615
616int crash_load_segments(struct kimage *image)
617{
618 unsigned long src_start, src_sz, elf_sz;
619 void *elf_addr;
620 int ret;
621
622
623
624
625
626
627 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
628 image, determine_backup_region);
629
630
631 if (ret < 0)
632 return ret;
633
634 src_start = image->arch.backup_src_start;
635 src_sz = image->arch.backup_src_sz;
636
637
638 if (src_sz) {
639
640
641
642
643
644 ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
645 sizeof(crash_zero_bytes), src_sz,
646 PAGE_SIZE, 0, -1, 0,
647 &image->arch.backup_load_addr);
648 if (ret)
649 return ret;
650 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
651 image->arch.backup_load_addr, src_start, src_sz);
652 }
653
654
655 ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
656 if (ret)
657 return ret;
658
659 image->arch.elf_headers = elf_addr;
660 image->arch.elf_headers_sz = elf_sz;
661
662 ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
663 ELF_CORE_HEADER_ALIGN, 0, -1, 0,
664 &image->arch.elf_load_addr);
665 if (ret) {
666 vfree((void *)image->arch.elf_headers);
667 return ret;
668 }
669 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
670 image->arch.elf_load_addr, elf_sz, elf_sz);
671
672 return ret;
673}
674#endif
675