1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/vmalloc.h>
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include "internal.h"
27
28
29
30
31static LIST_HEAD(vmcore_list);
32
33
34static char *elfcorebuf;
35static size_t elfcorebuf_sz;
36static size_t elfcorebuf_sz_orig;
37
38static char *elfnotes_buf;
39static size_t elfnotes_sz;
40
41
42static u64 vmcore_size;
43
44static struct proc_dir_entry *proc_vmcore = NULL;
45
46
47
48
49
50static int (*oldmem_pfn_is_ram)(unsigned long pfn);
51
52int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
53{
54 if (oldmem_pfn_is_ram)
55 return -EBUSY;
56 oldmem_pfn_is_ram = fn;
57 return 0;
58}
59EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
60
61void unregister_oldmem_pfn_is_ram(void)
62{
63 oldmem_pfn_is_ram = NULL;
64 wmb();
65}
66EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
67
68static int pfn_is_ram(unsigned long pfn)
69{
70 int (*fn)(unsigned long pfn);
71
72 int ret = 1;
73
74
75
76
77
78
79 fn = oldmem_pfn_is_ram;
80 if (fn)
81 ret = fn(pfn);
82
83 return ret;
84}
85
86
87static ssize_t read_from_oldmem(char *buf, size_t count,
88 u64 *ppos, int userbuf)
89{
90 unsigned long pfn, offset;
91 size_t nr_bytes;
92 ssize_t read = 0, tmp;
93
94 if (!count)
95 return 0;
96
97 offset = (unsigned long)(*ppos % PAGE_SIZE);
98 pfn = (unsigned long)(*ppos / PAGE_SIZE);
99
100 do {
101 if (count > (PAGE_SIZE - offset))
102 nr_bytes = PAGE_SIZE - offset;
103 else
104 nr_bytes = count;
105
106
107 if (pfn_is_ram(pfn) == 0)
108 memset(buf, 0, nr_bytes);
109 else {
110 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
111 offset, userbuf);
112 if (tmp < 0)
113 return tmp;
114 }
115 *ppos += nr_bytes;
116 count -= nr_bytes;
117 buf += nr_bytes;
118 read += nr_bytes;
119 ++pfn;
120 offset = 0;
121 } while (count);
122
123 return read;
124}
125
126
127
128
129static ssize_t read_vmcore(struct file *file, char __user *buffer,
130 size_t buflen, loff_t *fpos)
131{
132 ssize_t acc = 0, tmp;
133 size_t tsz;
134 u64 start;
135 struct vmcore *m = NULL;
136
137 if (buflen == 0 || *fpos >= vmcore_size)
138 return 0;
139
140
141 if (buflen > vmcore_size - *fpos)
142 buflen = vmcore_size - *fpos;
143
144
145 if (*fpos < elfcorebuf_sz) {
146 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
147 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
148 return -EFAULT;
149 buflen -= tsz;
150 *fpos += tsz;
151 buffer += tsz;
152 acc += tsz;
153
154
155 if (buflen == 0)
156 return acc;
157 }
158
159
160 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
161 void *kaddr;
162
163 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
164 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
165 if (copy_to_user(buffer, kaddr, tsz))
166 return -EFAULT;
167 buflen -= tsz;
168 *fpos += tsz;
169 buffer += tsz;
170 acc += tsz;
171
172
173 if (buflen == 0)
174 return acc;
175 }
176
177 list_for_each_entry(m, &vmcore_list, list) {
178 if (*fpos < m->offset + m->size) {
179 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
180 start = m->paddr + *fpos - m->offset;
181 tmp = read_from_oldmem(buffer, tsz, &start, 1);
182 if (tmp < 0)
183 return tmp;
184 buflen -= tsz;
185 *fpos += tsz;
186 buffer += tsz;
187 acc += tsz;
188
189
190 if (buflen == 0)
191 return acc;
192 }
193 }
194
195 return acc;
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210static inline char *alloc_elfnotes_buf(size_t notes_sz)
211{
212#ifdef CONFIG_MMU
213 return vmalloc_user(notes_sz);
214#else
215 return vzalloc(notes_sz);
216#endif
217}
218
219
220
221
222
223
224
225
226#if defined(CONFIG_MMU) && !defined(CONFIG_S390)
227static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
228{
229 size_t size = vma->vm_end - vma->vm_start;
230 u64 start, end, len, tsz;
231 struct vmcore *m;
232
233 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
234 end = start + size;
235
236 if (size > vmcore_size || end > vmcore_size)
237 return -EINVAL;
238
239 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
240 return -EPERM;
241
242 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
243 vma->vm_flags |= VM_MIXEDMAP;
244
245 len = 0;
246
247 if (start < elfcorebuf_sz) {
248 u64 pfn;
249
250 tsz = min(elfcorebuf_sz - (size_t)start, size);
251 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
252 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
253 vma->vm_page_prot))
254 return -EAGAIN;
255 size -= tsz;
256 start += tsz;
257 len += tsz;
258
259 if (size == 0)
260 return 0;
261 }
262
263 if (start < elfcorebuf_sz + elfnotes_sz) {
264 void *kaddr;
265
266 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
267 kaddr = elfnotes_buf + start - elfcorebuf_sz;
268 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
269 kaddr, tsz))
270 goto fail;
271 size -= tsz;
272 start += tsz;
273 len += tsz;
274
275 if (size == 0)
276 return 0;
277 }
278
279 list_for_each_entry(m, &vmcore_list, list) {
280 if (start < m->offset + m->size) {
281 u64 paddr = 0;
282
283 tsz = min_t(size_t, m->offset + m->size - start, size);
284 paddr = m->paddr + start - m->offset;
285 if (remap_pfn_range(vma, vma->vm_start + len,
286 paddr >> PAGE_SHIFT, tsz,
287 vma->vm_page_prot))
288 goto fail;
289 size -= tsz;
290 start += tsz;
291 len += tsz;
292
293 if (size == 0)
294 return 0;
295 }
296 }
297
298 return 0;
299fail:
300 do_munmap(vma->vm_mm, vma->vm_start, len);
301 return -EAGAIN;
302}
303#else
304static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
305{
306 return -ENOSYS;
307}
308#endif
309
310static const struct file_operations proc_vmcore_operations = {
311 .read = read_vmcore,
312 .llseek = default_llseek,
313 .mmap = mmap_vmcore,
314};
315
316static struct vmcore* __init get_new_element(void)
317{
318 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
319}
320
321static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
322 struct list_head *vc_list)
323{
324 u64 size;
325 struct vmcore *m;
326
327 size = elfsz + elfnotesegsz;
328 list_for_each_entry(m, vc_list, list) {
329 size += m->size;
330 }
331 return size;
332}
333
334
335
336
337
338
339
340
341
342
343static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
344{
345 int i, rc=0;
346 Elf64_Phdr *phdr_ptr;
347 Elf64_Nhdr *nhdr_ptr;
348
349 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
350 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
351 void *notes_section;
352 u64 offset, max_sz, sz, real_sz = 0;
353 if (phdr_ptr->p_type != PT_NOTE)
354 continue;
355 max_sz = phdr_ptr->p_memsz;
356 offset = phdr_ptr->p_offset;
357 notes_section = kmalloc(max_sz, GFP_KERNEL);
358 if (!notes_section)
359 return -ENOMEM;
360 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
361 if (rc < 0) {
362 kfree(notes_section);
363 return rc;
364 }
365 nhdr_ptr = notes_section;
366 while (real_sz < max_sz) {
367 if (nhdr_ptr->n_namesz == 0)
368 break;
369 sz = sizeof(Elf64_Nhdr) +
370 ((nhdr_ptr->n_namesz + 3) & ~3) +
371 ((nhdr_ptr->n_descsz + 3) & ~3);
372 real_sz += sz;
373 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
374 }
375 kfree(notes_section);
376 phdr_ptr->p_memsz = real_sz;
377 }
378
379 return 0;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
401 int *nr_ptnote, u64 *sz_ptnote)
402{
403 int i;
404 Elf64_Phdr *phdr_ptr;
405
406 *nr_ptnote = *sz_ptnote = 0;
407
408 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
409 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
410 if (phdr_ptr->p_type != PT_NOTE)
411 continue;
412 *nr_ptnote += 1;
413 *sz_ptnote += phdr_ptr->p_memsz;
414 }
415
416 return 0;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
436{
437 int i, rc=0;
438 Elf64_Phdr *phdr_ptr;
439
440 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
441
442 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
443 u64 offset;
444 if (phdr_ptr->p_type != PT_NOTE)
445 continue;
446 offset = phdr_ptr->p_offset;
447 rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
448 if (rc < 0)
449 return rc;
450 notes_buf += phdr_ptr->p_memsz;
451 }
452
453 return 0;
454}
455
456
457static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
458 char **notes_buf, size_t *notes_sz)
459{
460 int i, nr_ptnote=0, rc=0;
461 char *tmp;
462 Elf64_Ehdr *ehdr_ptr;
463 Elf64_Phdr phdr;
464 u64 phdr_sz = 0, note_off;
465
466 ehdr_ptr = (Elf64_Ehdr *)elfptr;
467
468 rc = update_note_header_size_elf64(ehdr_ptr);
469 if (rc < 0)
470 return rc;
471
472 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
473 if (rc < 0)
474 return rc;
475
476 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
477 *notes_buf = alloc_elfnotes_buf(*notes_sz);
478 if (!*notes_buf)
479 return -ENOMEM;
480
481 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
482 if (rc < 0)
483 return rc;
484
485
486 phdr.p_type = PT_NOTE;
487 phdr.p_flags = 0;
488 note_off = sizeof(Elf64_Ehdr) +
489 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
490 phdr.p_offset = roundup(note_off, PAGE_SIZE);
491 phdr.p_vaddr = phdr.p_paddr = 0;
492 phdr.p_filesz = phdr.p_memsz = phdr_sz;
493 phdr.p_align = 0;
494
495
496 tmp = elfptr + sizeof(Elf64_Ehdr);
497 memcpy(tmp, &phdr, sizeof(phdr));
498 tmp += sizeof(phdr);
499
500
501 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
502 *elfsz = *elfsz - i;
503 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
504 memset(elfptr + *elfsz, 0, i);
505 *elfsz = roundup(*elfsz, PAGE_SIZE);
506
507
508 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
509
510 return 0;
511}
512
513
514
515
516
517
518
519
520
521
522static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
523{
524 int i, rc=0;
525 Elf32_Phdr *phdr_ptr;
526 Elf32_Nhdr *nhdr_ptr;
527
528 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
529 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
530 void *notes_section;
531 u64 offset, max_sz, sz, real_sz = 0;
532 if (phdr_ptr->p_type != PT_NOTE)
533 continue;
534 max_sz = phdr_ptr->p_memsz;
535 offset = phdr_ptr->p_offset;
536 notes_section = kmalloc(max_sz, GFP_KERNEL);
537 if (!notes_section)
538 return -ENOMEM;
539 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
540 if (rc < 0) {
541 kfree(notes_section);
542 return rc;
543 }
544 nhdr_ptr = notes_section;
545 while (real_sz < max_sz) {
546 if (nhdr_ptr->n_namesz == 0)
547 break;
548 sz = sizeof(Elf32_Nhdr) +
549 ((nhdr_ptr->n_namesz + 3) & ~3) +
550 ((nhdr_ptr->n_descsz + 3) & ~3);
551 real_sz += sz;
552 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
553 }
554 kfree(notes_section);
555 phdr_ptr->p_memsz = real_sz;
556 }
557
558 return 0;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
580 int *nr_ptnote, u64 *sz_ptnote)
581{
582 int i;
583 Elf32_Phdr *phdr_ptr;
584
585 *nr_ptnote = *sz_ptnote = 0;
586
587 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
588 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
589 if (phdr_ptr->p_type != PT_NOTE)
590 continue;
591 *nr_ptnote += 1;
592 *sz_ptnote += phdr_ptr->p_memsz;
593 }
594
595 return 0;
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
615{
616 int i, rc=0;
617 Elf32_Phdr *phdr_ptr;
618
619 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
620
621 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
622 u64 offset;
623 if (phdr_ptr->p_type != PT_NOTE)
624 continue;
625 offset = phdr_ptr->p_offset;
626 rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
627 if (rc < 0)
628 return rc;
629 notes_buf += phdr_ptr->p_memsz;
630 }
631
632 return 0;
633}
634
635
636static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
637 char **notes_buf, size_t *notes_sz)
638{
639 int i, nr_ptnote=0, rc=0;
640 char *tmp;
641 Elf32_Ehdr *ehdr_ptr;
642 Elf32_Phdr phdr;
643 u64 phdr_sz = 0, note_off;
644
645 ehdr_ptr = (Elf32_Ehdr *)elfptr;
646
647 rc = update_note_header_size_elf32(ehdr_ptr);
648 if (rc < 0)
649 return rc;
650
651 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
652 if (rc < 0)
653 return rc;
654
655 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
656 *notes_buf = alloc_elfnotes_buf(*notes_sz);
657 if (!*notes_buf)
658 return -ENOMEM;
659
660 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
661 if (rc < 0)
662 return rc;
663
664
665 phdr.p_type = PT_NOTE;
666 phdr.p_flags = 0;
667 note_off = sizeof(Elf32_Ehdr) +
668 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
669 phdr.p_offset = roundup(note_off, PAGE_SIZE);
670 phdr.p_vaddr = phdr.p_paddr = 0;
671 phdr.p_filesz = phdr.p_memsz = phdr_sz;
672 phdr.p_align = 0;
673
674
675 tmp = elfptr + sizeof(Elf32_Ehdr);
676 memcpy(tmp, &phdr, sizeof(phdr));
677 tmp += sizeof(phdr);
678
679
680 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
681 *elfsz = *elfsz - i;
682 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
683 memset(elfptr + *elfsz, 0, i);
684 *elfsz = roundup(*elfsz, PAGE_SIZE);
685
686
687 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
688
689 return 0;
690}
691
692
693
694static int __init process_ptload_program_headers_elf64(char *elfptr,
695 size_t elfsz,
696 size_t elfnotes_sz,
697 struct list_head *vc_list)
698{
699 int i;
700 Elf64_Ehdr *ehdr_ptr;
701 Elf64_Phdr *phdr_ptr;
702 loff_t vmcore_off;
703 struct vmcore *new;
704
705 ehdr_ptr = (Elf64_Ehdr *)elfptr;
706 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
707
708
709 vmcore_off = elfsz + elfnotes_sz;
710
711 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
712 u64 paddr, start, end, size;
713
714 if (phdr_ptr->p_type != PT_LOAD)
715 continue;
716
717 paddr = phdr_ptr->p_offset;
718 start = rounddown(paddr, PAGE_SIZE);
719 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
720 size = end - start;
721
722
723 new = get_new_element();
724 if (!new)
725 return -ENOMEM;
726 new->paddr = start;
727 new->size = size;
728 list_add_tail(&new->list, vc_list);
729
730
731 phdr_ptr->p_offset = vmcore_off + (paddr - start);
732 vmcore_off = vmcore_off + size;
733 }
734 return 0;
735}
736
737static int __init process_ptload_program_headers_elf32(char *elfptr,
738 size_t elfsz,
739 size_t elfnotes_sz,
740 struct list_head *vc_list)
741{
742 int i;
743 Elf32_Ehdr *ehdr_ptr;
744 Elf32_Phdr *phdr_ptr;
745 loff_t vmcore_off;
746 struct vmcore *new;
747
748 ehdr_ptr = (Elf32_Ehdr *)elfptr;
749 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
750
751
752 vmcore_off = elfsz + elfnotes_sz;
753
754 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
755 u64 paddr, start, end, size;
756
757 if (phdr_ptr->p_type != PT_LOAD)
758 continue;
759
760 paddr = phdr_ptr->p_offset;
761 start = rounddown(paddr, PAGE_SIZE);
762 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
763 size = end - start;
764
765
766 new = get_new_element();
767 if (!new)
768 return -ENOMEM;
769 new->paddr = start;
770 new->size = size;
771 list_add_tail(&new->list, vc_list);
772
773
774 phdr_ptr->p_offset = vmcore_off + (paddr - start);
775 vmcore_off = vmcore_off + size;
776 }
777 return 0;
778}
779
780
781static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
782 struct list_head *vc_list)
783{
784 loff_t vmcore_off;
785 struct vmcore *m;
786
787
788 vmcore_off = elfsz + elfnotes_sz;
789
790 list_for_each_entry(m, vc_list, list) {
791 m->offset = vmcore_off;
792 vmcore_off += m->size;
793 }
794}
795
796static void free_elfcorebuf(void)
797{
798 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
799 elfcorebuf = NULL;
800 vfree(elfnotes_buf);
801 elfnotes_buf = NULL;
802}
803
804static int __init parse_crash_elf64_headers(void)
805{
806 int rc=0;
807 Elf64_Ehdr ehdr;
808 u64 addr;
809
810 addr = elfcorehdr_addr;
811
812
813 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
814 if (rc < 0)
815 return rc;
816
817
818 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
819 (ehdr.e_type != ET_CORE) ||
820 !vmcore_elf64_check_arch(&ehdr) ||
821 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
822 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
823 ehdr.e_version != EV_CURRENT ||
824 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
825 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
826 ehdr.e_phnum == 0) {
827 pr_warn("Warning: Core image elf header is not sane\n");
828 return -EINVAL;
829 }
830
831
832 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
833 ehdr.e_phnum * sizeof(Elf64_Phdr);
834 elfcorebuf_sz = elfcorebuf_sz_orig;
835 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
836 get_order(elfcorebuf_sz_orig));
837 if (!elfcorebuf)
838 return -ENOMEM;
839 addr = elfcorehdr_addr;
840 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
841 if (rc < 0)
842 goto fail;
843
844
845 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
846 &elfnotes_buf, &elfnotes_sz);
847 if (rc)
848 goto fail;
849 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
850 elfnotes_sz, &vmcore_list);
851 if (rc)
852 goto fail;
853 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
854 return 0;
855fail:
856 free_elfcorebuf();
857 return rc;
858}
859
860static int __init parse_crash_elf32_headers(void)
861{
862 int rc=0;
863 Elf32_Ehdr ehdr;
864 u64 addr;
865
866 addr = elfcorehdr_addr;
867
868
869 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
870 if (rc < 0)
871 return rc;
872
873
874 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
875 (ehdr.e_type != ET_CORE) ||
876 !elf_check_arch(&ehdr) ||
877 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
878 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
879 ehdr.e_version != EV_CURRENT ||
880 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
881 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
882 ehdr.e_phnum == 0) {
883 pr_warn("Warning: Core image elf header is not sane\n");
884 return -EINVAL;
885 }
886
887
888 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
889 elfcorebuf_sz = elfcorebuf_sz_orig;
890 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
891 get_order(elfcorebuf_sz_orig));
892 if (!elfcorebuf)
893 return -ENOMEM;
894 addr = elfcorehdr_addr;
895 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
896 if (rc < 0)
897 goto fail;
898
899
900 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
901 &elfnotes_buf, &elfnotes_sz);
902 if (rc)
903 goto fail;
904 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
905 elfnotes_sz, &vmcore_list);
906 if (rc)
907 goto fail;
908 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
909 return 0;
910fail:
911 free_elfcorebuf();
912 return rc;
913}
914
915static int __init parse_crash_elf_headers(void)
916{
917 unsigned char e_ident[EI_NIDENT];
918 u64 addr;
919 int rc=0;
920
921 addr = elfcorehdr_addr;
922 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
923 if (rc < 0)
924 return rc;
925 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
926 pr_warn("Warning: Core image elf header not found\n");
927 return -EINVAL;
928 }
929
930 if (e_ident[EI_CLASS] == ELFCLASS64) {
931 rc = parse_crash_elf64_headers();
932 if (rc)
933 return rc;
934 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
935 rc = parse_crash_elf32_headers();
936 if (rc)
937 return rc;
938 } else {
939 pr_warn("Warning: Core image elf header is not sane\n");
940 return -EINVAL;
941 }
942
943
944 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
945 &vmcore_list);
946
947 return 0;
948}
949
950
951static int __init vmcore_init(void)
952{
953 int rc = 0;
954
955
956 if (!(is_vmcore_usable()))
957 return rc;
958 rc = parse_crash_elf_headers();
959 if (rc) {
960 pr_warn("Kdump: vmcore not initialized\n");
961 return rc;
962 }
963
964 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
965 if (proc_vmcore)
966 proc_vmcore->size = vmcore_size;
967 return 0;
968}
969module_init(vmcore_init)
970
971
972void vmcore_cleanup(void)
973{
974 struct list_head *pos, *next;
975
976 if (proc_vmcore) {
977 proc_remove(proc_vmcore);
978 proc_vmcore = NULL;
979 }
980
981
982 list_for_each_safe(pos, next, &vmcore_list) {
983 struct vmcore *m;
984
985 m = list_entry(pos, struct vmcore, list);
986 list_del(&m->list);
987 kfree(m);
988 }
989 free_elfcorebuf();
990}
991EXPORT_SYMBOL_GPL(vmcore_cleanup);
992