1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/vmalloc.h>
25#include <linux/pagemap.h>
26#include <linux/uaccess.h>
27#include <asm/io.h>
28#include "internal.h"
29
30
31
32
33static LIST_HEAD(vmcore_list);
34
35
36static char *elfcorebuf;
37static size_t elfcorebuf_sz;
38static size_t elfcorebuf_sz_orig;
39
40static char *elfnotes_buf;
41static size_t elfnotes_sz;
42
43static size_t elfnotes_orig_sz;
44
45
46static u64 vmcore_size;
47
48static struct proc_dir_entry *proc_vmcore;
49
50#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
51
52static LIST_HEAD(vmcoredd_list);
53static DEFINE_MUTEX(vmcoredd_mutex);
54#endif
55
56
57static size_t vmcoredd_orig_sz;
58
59
60
61
62
63static int (*oldmem_pfn_is_ram)(unsigned long pfn);
64
65int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
66{
67 if (oldmem_pfn_is_ram)
68 return -EBUSY;
69 oldmem_pfn_is_ram = fn;
70 return 0;
71}
72EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
73
74void unregister_oldmem_pfn_is_ram(void)
75{
76 oldmem_pfn_is_ram = NULL;
77 wmb();
78}
79EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
80
81static int pfn_is_ram(unsigned long pfn)
82{
83 int (*fn)(unsigned long pfn);
84
85 int ret = 1;
86
87
88
89
90
91
92 fn = oldmem_pfn_is_ram;
93 if (fn)
94 ret = fn(pfn);
95
96 return ret;
97}
98
99
100static ssize_t read_from_oldmem(char *buf, size_t count,
101 u64 *ppos, int userbuf)
102{
103 unsigned long pfn, offset;
104 size_t nr_bytes;
105 ssize_t read = 0, tmp;
106
107 if (!count)
108 return 0;
109
110 offset = (unsigned long)(*ppos % PAGE_SIZE);
111 pfn = (unsigned long)(*ppos / PAGE_SIZE);
112
113 do {
114 if (count > (PAGE_SIZE - offset))
115 nr_bytes = PAGE_SIZE - offset;
116 else
117 nr_bytes = count;
118
119
120 if (pfn_is_ram(pfn) == 0)
121 memset(buf, 0, nr_bytes);
122 else {
123 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
124 offset, userbuf);
125 if (tmp < 0)
126 return tmp;
127 }
128 *ppos += nr_bytes;
129 count -= nr_bytes;
130 buf += nr_bytes;
131 read += nr_bytes;
132 ++pfn;
133 offset = 0;
134 } while (count);
135
136 return read;
137}
138
139
140
141
142int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
143{
144 return 0;
145}
146
147
148
149
150void __weak elfcorehdr_free(unsigned long long addr)
151{}
152
153
154
155
156ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
157{
158 return read_from_oldmem(buf, count, ppos, 0);
159}
160
161
162
163
164ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
165{
166 return read_from_oldmem(buf, count, ppos, 0);
167}
168
169
170
171
172int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
173 unsigned long from, unsigned long pfn,
174 unsigned long size, pgprot_t prot)
175{
176 return remap_pfn_range(vma, from, pfn, size, prot);
177}
178
179
180
181
182static int copy_to(void *target, void *src, size_t size, int userbuf)
183{
184 if (userbuf) {
185 if (copy_to_user((char __user *) target, src, size))
186 return -EFAULT;
187 } else {
188 memcpy(target, src, size);
189 }
190 return 0;
191}
192
193#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
194static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
195{
196 struct vmcoredd_node *dump;
197 u64 offset = 0;
198 int ret = 0;
199 size_t tsz;
200 char *buf;
201
202 mutex_lock(&vmcoredd_mutex);
203 list_for_each_entry(dump, &vmcoredd_list, list) {
204 if (start < offset + dump->size) {
205 tsz = min(offset + (u64)dump->size - start, (u64)size);
206 buf = dump->buf + start - offset;
207 if (copy_to(dst, buf, tsz, userbuf)) {
208 ret = -EFAULT;
209 goto out_unlock;
210 }
211
212 size -= tsz;
213 start += tsz;
214 dst += tsz;
215
216
217 if (!size)
218 goto out_unlock;
219 }
220 offset += dump->size;
221 }
222
223out_unlock:
224 mutex_unlock(&vmcoredd_mutex);
225 return ret;
226}
227
228#ifdef CONFIG_MMU
229static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
230 u64 start, size_t size)
231{
232 struct vmcoredd_node *dump;
233 u64 offset = 0;
234 int ret = 0;
235 size_t tsz;
236 char *buf;
237
238 mutex_lock(&vmcoredd_mutex);
239 list_for_each_entry(dump, &vmcoredd_list, list) {
240 if (start < offset + dump->size) {
241 tsz = min(offset + (u64)dump->size - start, (u64)size);
242 buf = dump->buf + start - offset;
243 if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
244 ret = -EFAULT;
245 goto out_unlock;
246 }
247
248 size -= tsz;
249 start += tsz;
250 dst += tsz;
251
252
253 if (!size)
254 goto out_unlock;
255 }
256 offset += dump->size;
257 }
258
259out_unlock:
260 mutex_unlock(&vmcoredd_mutex);
261 return ret;
262}
263#endif
264#endif
265
266
267
268
269static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
270 int userbuf)
271{
272 ssize_t acc = 0, tmp;
273 size_t tsz;
274 u64 start;
275 struct vmcore *m = NULL;
276
277 if (buflen == 0 || *fpos >= vmcore_size)
278 return 0;
279
280
281 if (buflen > vmcore_size - *fpos)
282 buflen = vmcore_size - *fpos;
283
284
285 if (*fpos < elfcorebuf_sz) {
286 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
287 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
288 return -EFAULT;
289 buflen -= tsz;
290 *fpos += tsz;
291 buffer += tsz;
292 acc += tsz;
293
294
295 if (buflen == 0)
296 return acc;
297 }
298
299
300 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
301 void *kaddr;
302
303
304
305
306
307
308
309
310
311
312#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
313
314 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
315 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
316 (size_t)*fpos, buflen);
317 start = *fpos - elfcorebuf_sz;
318 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
319 return -EFAULT;
320
321 buflen -= tsz;
322 *fpos += tsz;
323 buffer += tsz;
324 acc += tsz;
325
326
327 if (!buflen)
328 return acc;
329 }
330#endif
331
332
333 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
334 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
335 if (copy_to(buffer, kaddr, tsz, userbuf))
336 return -EFAULT;
337
338 buflen -= tsz;
339 *fpos += tsz;
340 buffer += tsz;
341 acc += tsz;
342
343
344 if (buflen == 0)
345 return acc;
346 }
347
348 list_for_each_entry(m, &vmcore_list, list) {
349 if (*fpos < m->offset + m->size) {
350 tsz = (size_t)min_t(unsigned long long,
351 m->offset + m->size - *fpos,
352 buflen);
353 start = m->paddr + *fpos - m->offset;
354 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
355 if (tmp < 0)
356 return tmp;
357 buflen -= tsz;
358 *fpos += tsz;
359 buffer += tsz;
360 acc += tsz;
361
362
363 if (buflen == 0)
364 return acc;
365 }
366 }
367
368 return acc;
369}
370
371static ssize_t read_vmcore(struct file *file, char __user *buffer,
372 size_t buflen, loff_t *fpos)
373{
374 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
375}
376
377
378
379
380
381
382
383
384static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
385{
386#ifdef CONFIG_S390
387 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
388 pgoff_t index = vmf->pgoff;
389 struct page *page;
390 loff_t offset;
391 char *buf;
392 int rc;
393
394 page = find_or_create_page(mapping, index, GFP_KERNEL);
395 if (!page)
396 return VM_FAULT_OOM;
397 if (!PageUptodate(page)) {
398 offset = (loff_t) index << PAGE_SHIFT;
399 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
400 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
401 if (rc < 0) {
402 unlock_page(page);
403 put_page(page);
404 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
405 }
406 SetPageUptodate(page);
407 }
408 unlock_page(page);
409 vmf->page = page;
410 return 0;
411#else
412 return VM_FAULT_SIGBUS;
413#endif
414}
415
416static const struct vm_operations_struct vmcore_mmap_ops = {
417 .fault = mmap_vmcore_fault,
418};
419
420
421
422
423
424
425
426
427
428
429
430static inline char *vmcore_alloc_buf(size_t size)
431{
432#ifdef CONFIG_MMU
433 return vmalloc_user(size);
434#else
435 return vzalloc(size);
436#endif
437}
438
439
440
441
442
443
444
445
446#ifdef CONFIG_MMU
447
448
449
450
451
452
453
454
455
456
457
458
459static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
460 unsigned long from, unsigned long pfn,
461 unsigned long size, pgprot_t prot)
462{
463 unsigned long map_size;
464 unsigned long pos_start, pos_end, pos;
465 unsigned long zeropage_pfn = my_zero_pfn(0);
466 size_t len = 0;
467
468 pos_start = pfn;
469 pos_end = pfn + (size >> PAGE_SHIFT);
470
471 for (pos = pos_start; pos < pos_end; ++pos) {
472 if (!pfn_is_ram(pos)) {
473
474
475
476
477
478 if (pos > pos_start) {
479
480 map_size = (pos - pos_start) << PAGE_SHIFT;
481 if (remap_oldmem_pfn_range(vma, from + len,
482 pos_start, map_size,
483 prot))
484 goto fail;
485 len += map_size;
486 }
487
488 if (remap_oldmem_pfn_range(vma, from + len,
489 zeropage_pfn,
490 PAGE_SIZE, prot))
491 goto fail;
492 len += PAGE_SIZE;
493 pos_start = pos + 1;
494 }
495 }
496 if (pos > pos_start) {
497
498 map_size = (pos - pos_start) << PAGE_SHIFT;
499 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
500 map_size, prot))
501 goto fail;
502 }
503 return 0;
504fail:
505 do_munmap(vma->vm_mm, from, len, NULL);
506 return -EAGAIN;
507}
508
509static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
510 unsigned long from, unsigned long pfn,
511 unsigned long size, pgprot_t prot)
512{
513
514
515
516
517 if (oldmem_pfn_is_ram)
518 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
519 else
520 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
521}
522
523static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
524{
525 size_t size = vma->vm_end - vma->vm_start;
526 u64 start, end, len, tsz;
527 struct vmcore *m;
528
529 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
530 end = start + size;
531
532 if (size > vmcore_size || end > vmcore_size)
533 return -EINVAL;
534
535 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
536 return -EPERM;
537
538 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
539 vma->vm_flags |= VM_MIXEDMAP;
540 vma->vm_ops = &vmcore_mmap_ops;
541
542 len = 0;
543
544 if (start < elfcorebuf_sz) {
545 u64 pfn;
546
547 tsz = min(elfcorebuf_sz - (size_t)start, size);
548 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
549 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
550 vma->vm_page_prot))
551 return -EAGAIN;
552 size -= tsz;
553 start += tsz;
554 len += tsz;
555
556 if (size == 0)
557 return 0;
558 }
559
560 if (start < elfcorebuf_sz + elfnotes_sz) {
561 void *kaddr;
562
563
564
565
566
567
568
569
570
571
572
573
574#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
575
576 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
577 u64 start_off;
578
579 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
580 (size_t)start, size);
581 start_off = start - elfcorebuf_sz;
582 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
583 start_off, tsz))
584 goto fail;
585
586 size -= tsz;
587 start += tsz;
588 len += tsz;
589
590
591 if (!size)
592 return 0;
593 }
594#endif
595
596
597 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
598 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
599 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
600 kaddr, tsz))
601 goto fail;
602
603 size -= tsz;
604 start += tsz;
605 len += tsz;
606
607 if (size == 0)
608 return 0;
609 }
610
611 list_for_each_entry(m, &vmcore_list, list) {
612 if (start < m->offset + m->size) {
613 u64 paddr = 0;
614
615 tsz = (size_t)min_t(unsigned long long,
616 m->offset + m->size - start, size);
617 paddr = m->paddr + start - m->offset;
618 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
619 paddr >> PAGE_SHIFT, tsz,
620 vma->vm_page_prot))
621 goto fail;
622 size -= tsz;
623 start += tsz;
624 len += tsz;
625
626 if (size == 0)
627 return 0;
628 }
629 }
630
631 return 0;
632fail:
633 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
634 return -EAGAIN;
635}
636#else
637static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
638{
639 return -ENOSYS;
640}
641#endif
642
643static const struct file_operations proc_vmcore_operations = {
644 .read = read_vmcore,
645 .llseek = default_llseek,
646 .mmap = mmap_vmcore,
647};
648
649static struct vmcore* __init get_new_element(void)
650{
651 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
652}
653
654static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
655 struct list_head *vc_list)
656{
657 u64 size;
658 struct vmcore *m;
659
660 size = elfsz + elfnotesegsz;
661 list_for_each_entry(m, vc_list, list) {
662 size += m->size;
663 }
664 return size;
665}
666
667
668
669
670
671
672
673
674
675
676static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
677{
678 int i, rc=0;
679 Elf64_Phdr *phdr_ptr;
680 Elf64_Nhdr *nhdr_ptr;
681
682 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
683 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
684 void *notes_section;
685 u64 offset, max_sz, sz, real_sz = 0;
686 if (phdr_ptr->p_type != PT_NOTE)
687 continue;
688 max_sz = phdr_ptr->p_memsz;
689 offset = phdr_ptr->p_offset;
690 notes_section = kmalloc(max_sz, GFP_KERNEL);
691 if (!notes_section)
692 return -ENOMEM;
693 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
694 if (rc < 0) {
695 kfree(notes_section);
696 return rc;
697 }
698 nhdr_ptr = notes_section;
699 while (nhdr_ptr->n_namesz != 0) {
700 sz = sizeof(Elf64_Nhdr) +
701 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
702 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
703 if ((real_sz + sz) > max_sz) {
704 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
705 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
706 break;
707 }
708 real_sz += sz;
709 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
710 }
711 kfree(notes_section);
712 phdr_ptr->p_memsz = real_sz;
713 if (real_sz == 0) {
714 pr_warn("Warning: Zero PT_NOTE entries found\n");
715 }
716 }
717
718 return 0;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
740 int *nr_ptnote, u64 *sz_ptnote)
741{
742 int i;
743 Elf64_Phdr *phdr_ptr;
744
745 *nr_ptnote = *sz_ptnote = 0;
746
747 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
748 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
749 if (phdr_ptr->p_type != PT_NOTE)
750 continue;
751 *nr_ptnote += 1;
752 *sz_ptnote += phdr_ptr->p_memsz;
753 }
754
755 return 0;
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
775{
776 int i, rc=0;
777 Elf64_Phdr *phdr_ptr;
778
779 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
780
781 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
782 u64 offset;
783 if (phdr_ptr->p_type != PT_NOTE)
784 continue;
785 offset = phdr_ptr->p_offset;
786 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
787 &offset);
788 if (rc < 0)
789 return rc;
790 notes_buf += phdr_ptr->p_memsz;
791 }
792
793 return 0;
794}
795
796
797static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
798 char **notes_buf, size_t *notes_sz)
799{
800 int i, nr_ptnote=0, rc=0;
801 char *tmp;
802 Elf64_Ehdr *ehdr_ptr;
803 Elf64_Phdr phdr;
804 u64 phdr_sz = 0, note_off;
805
806 ehdr_ptr = (Elf64_Ehdr *)elfptr;
807
808 rc = update_note_header_size_elf64(ehdr_ptr);
809 if (rc < 0)
810 return rc;
811
812 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
813 if (rc < 0)
814 return rc;
815
816 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
817 *notes_buf = vmcore_alloc_buf(*notes_sz);
818 if (!*notes_buf)
819 return -ENOMEM;
820
821 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
822 if (rc < 0)
823 return rc;
824
825
826 phdr.p_type = PT_NOTE;
827 phdr.p_flags = 0;
828 note_off = sizeof(Elf64_Ehdr) +
829 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
830 phdr.p_offset = roundup(note_off, PAGE_SIZE);
831 phdr.p_vaddr = phdr.p_paddr = 0;
832 phdr.p_filesz = phdr.p_memsz = phdr_sz;
833 phdr.p_align = 0;
834
835
836 tmp = elfptr + sizeof(Elf64_Ehdr);
837 memcpy(tmp, &phdr, sizeof(phdr));
838 tmp += sizeof(phdr);
839
840
841 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
842 *elfsz = *elfsz - i;
843 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
844 memset(elfptr + *elfsz, 0, i);
845 *elfsz = roundup(*elfsz, PAGE_SIZE);
846
847
848 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
849
850
851
852
853 elfnotes_orig_sz = phdr.p_memsz;
854
855 return 0;
856}
857
858
859
860
861
862
863
864
865
866
867static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
868{
869 int i, rc=0;
870 Elf32_Phdr *phdr_ptr;
871 Elf32_Nhdr *nhdr_ptr;
872
873 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
874 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
875 void *notes_section;
876 u64 offset, max_sz, sz, real_sz = 0;
877 if (phdr_ptr->p_type != PT_NOTE)
878 continue;
879 max_sz = phdr_ptr->p_memsz;
880 offset = phdr_ptr->p_offset;
881 notes_section = kmalloc(max_sz, GFP_KERNEL);
882 if (!notes_section)
883 return -ENOMEM;
884 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
885 if (rc < 0) {
886 kfree(notes_section);
887 return rc;
888 }
889 nhdr_ptr = notes_section;
890 while (nhdr_ptr->n_namesz != 0) {
891 sz = sizeof(Elf32_Nhdr) +
892 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
893 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
894 if ((real_sz + sz) > max_sz) {
895 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
896 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
897 break;
898 }
899 real_sz += sz;
900 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
901 }
902 kfree(notes_section);
903 phdr_ptr->p_memsz = real_sz;
904 if (real_sz == 0) {
905 pr_warn("Warning: Zero PT_NOTE entries found\n");
906 }
907 }
908
909 return 0;
910}
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
931 int *nr_ptnote, u64 *sz_ptnote)
932{
933 int i;
934 Elf32_Phdr *phdr_ptr;
935
936 *nr_ptnote = *sz_ptnote = 0;
937
938 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
939 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
940 if (phdr_ptr->p_type != PT_NOTE)
941 continue;
942 *nr_ptnote += 1;
943 *sz_ptnote += phdr_ptr->p_memsz;
944 }
945
946 return 0;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
966{
967 int i, rc=0;
968 Elf32_Phdr *phdr_ptr;
969
970 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
971
972 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
973 u64 offset;
974 if (phdr_ptr->p_type != PT_NOTE)
975 continue;
976 offset = phdr_ptr->p_offset;
977 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
978 &offset);
979 if (rc < 0)
980 return rc;
981 notes_buf += phdr_ptr->p_memsz;
982 }
983
984 return 0;
985}
986
987
988static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
989 char **notes_buf, size_t *notes_sz)
990{
991 int i, nr_ptnote=0, rc=0;
992 char *tmp;
993 Elf32_Ehdr *ehdr_ptr;
994 Elf32_Phdr phdr;
995 u64 phdr_sz = 0, note_off;
996
997 ehdr_ptr = (Elf32_Ehdr *)elfptr;
998
999 rc = update_note_header_size_elf32(ehdr_ptr);
1000 if (rc < 0)
1001 return rc;
1002
1003 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1004 if (rc < 0)
1005 return rc;
1006
1007 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1008 *notes_buf = vmcore_alloc_buf(*notes_sz);
1009 if (!*notes_buf)
1010 return -ENOMEM;
1011
1012 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1013 if (rc < 0)
1014 return rc;
1015
1016
1017 phdr.p_type = PT_NOTE;
1018 phdr.p_flags = 0;
1019 note_off = sizeof(Elf32_Ehdr) +
1020 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1021 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1022 phdr.p_vaddr = phdr.p_paddr = 0;
1023 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1024 phdr.p_align = 0;
1025
1026
1027 tmp = elfptr + sizeof(Elf32_Ehdr);
1028 memcpy(tmp, &phdr, sizeof(phdr));
1029 tmp += sizeof(phdr);
1030
1031
1032 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1033 *elfsz = *elfsz - i;
1034 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1035 memset(elfptr + *elfsz, 0, i);
1036 *elfsz = roundup(*elfsz, PAGE_SIZE);
1037
1038
1039 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1040
1041
1042
1043
1044 elfnotes_orig_sz = phdr.p_memsz;
1045
1046 return 0;
1047}
1048
1049
1050
1051static int __init process_ptload_program_headers_elf64(char *elfptr,
1052 size_t elfsz,
1053 size_t elfnotes_sz,
1054 struct list_head *vc_list)
1055{
1056 int i;
1057 Elf64_Ehdr *ehdr_ptr;
1058 Elf64_Phdr *phdr_ptr;
1059 loff_t vmcore_off;
1060 struct vmcore *new;
1061
1062 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1063 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1064
1065
1066 vmcore_off = elfsz + elfnotes_sz;
1067
1068 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1069 u64 paddr, start, end, size;
1070
1071 if (phdr_ptr->p_type != PT_LOAD)
1072 continue;
1073
1074 paddr = phdr_ptr->p_offset;
1075 start = rounddown(paddr, PAGE_SIZE);
1076 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1077 size = end - start;
1078
1079
1080 new = get_new_element();
1081 if (!new)
1082 return -ENOMEM;
1083 new->paddr = start;
1084 new->size = size;
1085 list_add_tail(&new->list, vc_list);
1086
1087
1088 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1089 vmcore_off = vmcore_off + size;
1090 }
1091 return 0;
1092}
1093
1094static int __init process_ptload_program_headers_elf32(char *elfptr,
1095 size_t elfsz,
1096 size_t elfnotes_sz,
1097 struct list_head *vc_list)
1098{
1099 int i;
1100 Elf32_Ehdr *ehdr_ptr;
1101 Elf32_Phdr *phdr_ptr;
1102 loff_t vmcore_off;
1103 struct vmcore *new;
1104
1105 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1106 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1107
1108
1109 vmcore_off = elfsz + elfnotes_sz;
1110
1111 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1112 u64 paddr, start, end, size;
1113
1114 if (phdr_ptr->p_type != PT_LOAD)
1115 continue;
1116
1117 paddr = phdr_ptr->p_offset;
1118 start = rounddown(paddr, PAGE_SIZE);
1119 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1120 size = end - start;
1121
1122
1123 new = get_new_element();
1124 if (!new)
1125 return -ENOMEM;
1126 new->paddr = start;
1127 new->size = size;
1128 list_add_tail(&new->list, vc_list);
1129
1130
1131 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1132 vmcore_off = vmcore_off + size;
1133 }
1134 return 0;
1135}
1136
1137
1138static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1139 struct list_head *vc_list)
1140{
1141 loff_t vmcore_off;
1142 struct vmcore *m;
1143
1144
1145 vmcore_off = elfsz + elfnotes_sz;
1146
1147 list_for_each_entry(m, vc_list, list) {
1148 m->offset = vmcore_off;
1149 vmcore_off += m->size;
1150 }
1151}
1152
1153static void free_elfcorebuf(void)
1154{
1155 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1156 elfcorebuf = NULL;
1157 vfree(elfnotes_buf);
1158 elfnotes_buf = NULL;
1159}
1160
1161static int __init parse_crash_elf64_headers(void)
1162{
1163 int rc=0;
1164 Elf64_Ehdr ehdr;
1165 u64 addr;
1166
1167 addr = elfcorehdr_addr;
1168
1169
1170 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1171 if (rc < 0)
1172 return rc;
1173
1174
1175 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1176 (ehdr.e_type != ET_CORE) ||
1177 !vmcore_elf64_check_arch(&ehdr) ||
1178 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1179 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1180 ehdr.e_version != EV_CURRENT ||
1181 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1182 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1183 ehdr.e_phnum == 0) {
1184 pr_warn("Warning: Core image elf header is not sane\n");
1185 return -EINVAL;
1186 }
1187
1188
1189 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1190 ehdr.e_phnum * sizeof(Elf64_Phdr);
1191 elfcorebuf_sz = elfcorebuf_sz_orig;
1192 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1193 get_order(elfcorebuf_sz_orig));
1194 if (!elfcorebuf)
1195 return -ENOMEM;
1196 addr = elfcorehdr_addr;
1197 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1198 if (rc < 0)
1199 goto fail;
1200
1201
1202 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1203 &elfnotes_buf, &elfnotes_sz);
1204 if (rc)
1205 goto fail;
1206 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1207 elfnotes_sz, &vmcore_list);
1208 if (rc)
1209 goto fail;
1210 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1211 return 0;
1212fail:
1213 free_elfcorebuf();
1214 return rc;
1215}
1216
1217static int __init parse_crash_elf32_headers(void)
1218{
1219 int rc=0;
1220 Elf32_Ehdr ehdr;
1221 u64 addr;
1222
1223 addr = elfcorehdr_addr;
1224
1225
1226 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1227 if (rc < 0)
1228 return rc;
1229
1230
1231 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1232 (ehdr.e_type != ET_CORE) ||
1233 !vmcore_elf32_check_arch(&ehdr) ||
1234 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1235 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1236 ehdr.e_version != EV_CURRENT ||
1237 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1238 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1239 ehdr.e_phnum == 0) {
1240 pr_warn("Warning: Core image elf header is not sane\n");
1241 return -EINVAL;
1242 }
1243
1244
1245 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1246 elfcorebuf_sz = elfcorebuf_sz_orig;
1247 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1248 get_order(elfcorebuf_sz_orig));
1249 if (!elfcorebuf)
1250 return -ENOMEM;
1251 addr = elfcorehdr_addr;
1252 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1253 if (rc < 0)
1254 goto fail;
1255
1256
1257 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1258 &elfnotes_buf, &elfnotes_sz);
1259 if (rc)
1260 goto fail;
1261 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1262 elfnotes_sz, &vmcore_list);
1263 if (rc)
1264 goto fail;
1265 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1266 return 0;
1267fail:
1268 free_elfcorebuf();
1269 return rc;
1270}
1271
1272static int __init parse_crash_elf_headers(void)
1273{
1274 unsigned char e_ident[EI_NIDENT];
1275 u64 addr;
1276 int rc=0;
1277
1278 addr = elfcorehdr_addr;
1279 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1280 if (rc < 0)
1281 return rc;
1282 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1283 pr_warn("Warning: Core image elf header not found\n");
1284 return -EINVAL;
1285 }
1286
1287 if (e_ident[EI_CLASS] == ELFCLASS64) {
1288 rc = parse_crash_elf64_headers();
1289 if (rc)
1290 return rc;
1291 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1292 rc = parse_crash_elf32_headers();
1293 if (rc)
1294 return rc;
1295 } else {
1296 pr_warn("Warning: Core image elf header is not sane\n");
1297 return -EINVAL;
1298 }
1299
1300
1301 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1302 &vmcore_list);
1303
1304 return 0;
1305}
1306
1307#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1318 u32 size)
1319{
1320 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1321
1322 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1323 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1324 vdd_hdr->n_type = NT_VMCOREDD;
1325
1326 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1327 sizeof(vdd_hdr->name));
1328 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1341 size_t vmcoreddsz)
1342{
1343 unsigned char *e_ident = (unsigned char *)elfptr;
1344 u64 start, end, size;
1345 loff_t vmcore_off;
1346 u32 i;
1347
1348 vmcore_off = elfcorebuf_sz + elfnotesz;
1349
1350 if (e_ident[EI_CLASS] == ELFCLASS64) {
1351 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1352 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1353
1354
1355 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1356 if (phdr->p_type == PT_NOTE) {
1357
1358 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1359 phdr->p_filesz = phdr->p_memsz;
1360 continue;
1361 }
1362
1363 start = rounddown(phdr->p_offset, PAGE_SIZE);
1364 end = roundup(phdr->p_offset + phdr->p_memsz,
1365 PAGE_SIZE);
1366 size = end - start;
1367 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1368 vmcore_off += size;
1369 }
1370 } else {
1371 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1372 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1373
1374
1375 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1376 if (phdr->p_type == PT_NOTE) {
1377
1378 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1379 phdr->p_filesz = phdr->p_memsz;
1380 continue;
1381 }
1382
1383 start = rounddown(phdr->p_offset, PAGE_SIZE);
1384 end = roundup(phdr->p_offset + phdr->p_memsz,
1385 PAGE_SIZE);
1386 size = end - start;
1387 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1388 vmcore_off += size;
1389 }
1390 }
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402static void vmcoredd_update_size(size_t dump_size)
1403{
1404 vmcoredd_orig_sz += dump_size;
1405 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1406 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1407 vmcoredd_orig_sz);
1408
1409
1410 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1411
1412 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1413 &vmcore_list);
1414 proc_vmcore->size = vmcore_size;
1415}
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425int vmcore_add_device_dump(struct vmcoredd_data *data)
1426{
1427 struct vmcoredd_node *dump;
1428 void *buf = NULL;
1429 size_t data_size;
1430 int ret;
1431
1432 if (!data || !strlen(data->dump_name) ||
1433 !data->vmcoredd_callback || !data->size)
1434 return -EINVAL;
1435
1436 dump = vzalloc(sizeof(*dump));
1437 if (!dump) {
1438 ret = -ENOMEM;
1439 goto out_err;
1440 }
1441
1442
1443 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1444 PAGE_SIZE);
1445
1446
1447 buf = vmcore_alloc_buf(data_size);
1448 if (!buf) {
1449 ret = -ENOMEM;
1450 goto out_err;
1451 }
1452
1453 vmcoredd_write_header(buf, data, data_size -
1454 sizeof(struct vmcoredd_header));
1455
1456
1457 ret = data->vmcoredd_callback(data, buf +
1458 sizeof(struct vmcoredd_header));
1459 if (ret)
1460 goto out_err;
1461
1462 dump->buf = buf;
1463 dump->size = data_size;
1464
1465
1466 mutex_lock(&vmcoredd_mutex);
1467 list_add_tail(&dump->list, &vmcoredd_list);
1468 mutex_unlock(&vmcoredd_mutex);
1469
1470 vmcoredd_update_size(data_size);
1471 return 0;
1472
1473out_err:
1474 if (buf)
1475 vfree(buf);
1476
1477 if (dump)
1478 vfree(dump);
1479
1480 return ret;
1481}
1482EXPORT_SYMBOL(vmcore_add_device_dump);
1483#endif
1484
1485
1486static void vmcore_free_device_dumps(void)
1487{
1488#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1489 mutex_lock(&vmcoredd_mutex);
1490 while (!list_empty(&vmcoredd_list)) {
1491 struct vmcoredd_node *dump;
1492
1493 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1494 list);
1495 list_del(&dump->list);
1496 vfree(dump->buf);
1497 vfree(dump);
1498 }
1499 mutex_unlock(&vmcoredd_mutex);
1500#endif
1501}
1502
1503
1504static int __init vmcore_init(void)
1505{
1506 int rc = 0;
1507
1508
1509 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1510 if (rc)
1511 return rc;
1512
1513
1514
1515
1516 if (!(is_vmcore_usable()))
1517 return rc;
1518 rc = parse_crash_elf_headers();
1519 if (rc) {
1520 pr_warn("Kdump: vmcore not initialized\n");
1521 return rc;
1522 }
1523 elfcorehdr_free(elfcorehdr_addr);
1524 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1525
1526 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1527 if (proc_vmcore)
1528 proc_vmcore->size = vmcore_size;
1529 return 0;
1530}
1531fs_initcall(vmcore_init);
1532
1533
1534void vmcore_cleanup(void)
1535{
1536 if (proc_vmcore) {
1537 proc_remove(proc_vmcore);
1538 proc_vmcore = NULL;
1539 }
1540
1541
1542 while (!list_empty(&vmcore_list)) {
1543 struct vmcore *m;
1544
1545 m = list_first_entry(&vmcore_list, struct vmcore, list);
1546 list_del(&m->list);
1547 kfree(m);
1548 }
1549 free_elfcorebuf();
1550
1551
1552 vmcore_free_device_dumps();
1553}
1554