1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/kcore.h>
13#include <linux/user.h>
14#include <linux/elf.h>
15#include <linux/elfcore.h>
16#include <linux/export.h>
17#include <linux/slab.h>
18#include <linux/highmem.h>
19#include <linux/printk.h>
20#include <linux/memblock.h>
21#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
24#include <linux/moduleparam.h>
25#include <linux/mutex.h>
26#include <linux/vmalloc.h>
27#include <linux/pagemap.h>
28#include <linux/uaccess.h>
29#include <linux/mem_encrypt.h>
30#include <asm/io.h>
31#include "internal.h"
32
33
34
35
36static LIST_HEAD(vmcore_list);
37
38
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
41static size_t elfcorebuf_sz_orig;
42
43static char *elfnotes_buf;
44static size_t elfnotes_sz;
45
46static size_t elfnotes_orig_sz;
47
48
49static u64 vmcore_size;
50
51static struct proc_dir_entry *proc_vmcore;
52
53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
57
58static bool vmcoredd_disabled;
59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60#endif
61
62
63static size_t vmcoredd_orig_sz;
64
65
66
67
68
69static int (*oldmem_pfn_is_ram)(unsigned long pfn);
70
71int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
72{
73 if (oldmem_pfn_is_ram)
74 return -EBUSY;
75 oldmem_pfn_is_ram = fn;
76 return 0;
77}
78EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
79
80void unregister_oldmem_pfn_is_ram(void)
81{
82 oldmem_pfn_is_ram = NULL;
83 wmb();
84}
85EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
86
87static int pfn_is_ram(unsigned long pfn)
88{
89 int (*fn)(unsigned long pfn);
90
91 int ret = 1;
92
93
94
95
96
97
98 fn = oldmem_pfn_is_ram;
99 if (fn)
100 ret = fn(pfn);
101
102 return ret;
103}
104
105
106ssize_t read_from_oldmem(char *buf, size_t count,
107 u64 *ppos, int userbuf,
108 bool encrypted)
109{
110 unsigned long pfn, offset;
111 size_t nr_bytes;
112 ssize_t read = 0, tmp;
113
114 if (!count)
115 return 0;
116
117 offset = (unsigned long)(*ppos % PAGE_SIZE);
118 pfn = (unsigned long)(*ppos / PAGE_SIZE);
119
120 do {
121 if (count > (PAGE_SIZE - offset))
122 nr_bytes = PAGE_SIZE - offset;
123 else
124 nr_bytes = count;
125
126
127 if (pfn_is_ram(pfn) == 0)
128 memset(buf, 0, nr_bytes);
129 else {
130 if (encrypted)
131 tmp = copy_oldmem_page_encrypted(pfn, buf,
132 nr_bytes,
133 offset,
134 userbuf);
135 else
136 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
137 offset, userbuf);
138
139 if (tmp < 0)
140 return tmp;
141 }
142 *ppos += nr_bytes;
143 count -= nr_bytes;
144 buf += nr_bytes;
145 read += nr_bytes;
146 ++pfn;
147 offset = 0;
148 } while (count);
149
150 return read;
151}
152
153
154
155
156int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
157{
158 return 0;
159}
160
161
162
163
164void __weak elfcorehdr_free(unsigned long long addr)
165{}
166
167
168
169
170ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
171{
172 return read_from_oldmem(buf, count, ppos, 0, false);
173}
174
175
176
177
178ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
179{
180 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
181}
182
183
184
185
186int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
187 unsigned long from, unsigned long pfn,
188 unsigned long size, pgprot_t prot)
189{
190 prot = pgprot_encrypted(prot);
191 return remap_pfn_range(vma, from, pfn, size, prot);
192}
193
194
195
196
197ssize_t __weak
198copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
199 unsigned long offset, int userbuf)
200{
201 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
202}
203
204
205
206
207static int copy_to(void *target, void *src, size_t size, int userbuf)
208{
209 if (userbuf) {
210 if (copy_to_user((char __user *) target, src, size))
211 return -EFAULT;
212 } else {
213 memcpy(target, src, size);
214 }
215 return 0;
216}
217
218#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
219static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
220{
221 struct vmcoredd_node *dump;
222 u64 offset = 0;
223 int ret = 0;
224 size_t tsz;
225 char *buf;
226
227 mutex_lock(&vmcoredd_mutex);
228 list_for_each_entry(dump, &vmcoredd_list, list) {
229 if (start < offset + dump->size) {
230 tsz = min(offset + (u64)dump->size - start, (u64)size);
231 buf = dump->buf + start - offset;
232 if (copy_to(dst, buf, tsz, userbuf)) {
233 ret = -EFAULT;
234 goto out_unlock;
235 }
236
237 size -= tsz;
238 start += tsz;
239 dst += tsz;
240
241
242 if (!size)
243 goto out_unlock;
244 }
245 offset += dump->size;
246 }
247
248out_unlock:
249 mutex_unlock(&vmcoredd_mutex);
250 return ret;
251}
252
253#ifdef CONFIG_MMU
254static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
255 u64 start, size_t size)
256{
257 struct vmcoredd_node *dump;
258 u64 offset = 0;
259 int ret = 0;
260 size_t tsz;
261 char *buf;
262
263 mutex_lock(&vmcoredd_mutex);
264 list_for_each_entry(dump, &vmcoredd_list, list) {
265 if (start < offset + dump->size) {
266 tsz = min(offset + (u64)dump->size - start, (u64)size);
267 buf = dump->buf + start - offset;
268 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
269 tsz)) {
270 ret = -EFAULT;
271 goto out_unlock;
272 }
273
274 size -= tsz;
275 start += tsz;
276 dst += tsz;
277
278
279 if (!size)
280 goto out_unlock;
281 }
282 offset += dump->size;
283 }
284
285out_unlock:
286 mutex_unlock(&vmcoredd_mutex);
287 return ret;
288}
289#endif
290#endif
291
292
293
294
295static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
296 int userbuf)
297{
298 ssize_t acc = 0, tmp;
299 size_t tsz;
300 u64 start;
301 struct vmcore *m = NULL;
302
303 if (buflen == 0 || *fpos >= vmcore_size)
304 return 0;
305
306
307 if (buflen > vmcore_size - *fpos)
308 buflen = vmcore_size - *fpos;
309
310
311 if (*fpos < elfcorebuf_sz) {
312 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
313 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
314 return -EFAULT;
315 buflen -= tsz;
316 *fpos += tsz;
317 buffer += tsz;
318 acc += tsz;
319
320
321 if (buflen == 0)
322 return acc;
323 }
324
325
326 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
327 void *kaddr;
328
329
330
331
332
333
334
335
336
337
338#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
339
340 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
341 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
342 (size_t)*fpos, buflen);
343 start = *fpos - elfcorebuf_sz;
344 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
345 return -EFAULT;
346
347 buflen -= tsz;
348 *fpos += tsz;
349 buffer += tsz;
350 acc += tsz;
351
352
353 if (!buflen)
354 return acc;
355 }
356#endif
357
358
359 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
360 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
361 if (copy_to(buffer, kaddr, tsz, userbuf))
362 return -EFAULT;
363
364 buflen -= tsz;
365 *fpos += tsz;
366 buffer += tsz;
367 acc += tsz;
368
369
370 if (buflen == 0)
371 return acc;
372 }
373
374 list_for_each_entry(m, &vmcore_list, list) {
375 if (*fpos < m->offset + m->size) {
376 tsz = (size_t)min_t(unsigned long long,
377 m->offset + m->size - *fpos,
378 buflen);
379 start = m->paddr + *fpos - m->offset;
380 tmp = read_from_oldmem(buffer, tsz, &start,
381 userbuf, mem_encrypt_active());
382 if (tmp < 0)
383 return tmp;
384 buflen -= tsz;
385 *fpos += tsz;
386 buffer += tsz;
387 acc += tsz;
388
389
390 if (buflen == 0)
391 return acc;
392 }
393 }
394
395 return acc;
396}
397
398static ssize_t read_vmcore(struct file *file, char __user *buffer,
399 size_t buflen, loff_t *fpos)
400{
401 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
402}
403
404
405
406
407
408
409
410
411static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
412{
413#ifdef CONFIG_S390
414 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
415 pgoff_t index = vmf->pgoff;
416 struct page *page;
417 loff_t offset;
418 char *buf;
419 int rc;
420
421 page = find_or_create_page(mapping, index, GFP_KERNEL);
422 if (!page)
423 return VM_FAULT_OOM;
424 if (!PageUptodate(page)) {
425 offset = (loff_t) index << PAGE_SHIFT;
426 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
427 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
428 if (rc < 0) {
429 unlock_page(page);
430 put_page(page);
431 return vmf_error(rc);
432 }
433 SetPageUptodate(page);
434 }
435 unlock_page(page);
436 vmf->page = page;
437 return 0;
438#else
439 return VM_FAULT_SIGBUS;
440#endif
441}
442
443static const struct vm_operations_struct vmcore_mmap_ops = {
444 .fault = mmap_vmcore_fault,
445};
446
447
448
449
450
451
452
453
454
455
456
457static inline char *vmcore_alloc_buf(size_t size)
458{
459#ifdef CONFIG_MMU
460 return vmalloc_user(size);
461#else
462 return vzalloc(size);
463#endif
464}
465
466
467
468
469
470
471
472
473#ifdef CONFIG_MMU
474
475
476
477
478
479
480
481
482
483
484
485
486static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
487 unsigned long from, unsigned long pfn,
488 unsigned long size, pgprot_t prot)
489{
490 unsigned long map_size;
491 unsigned long pos_start, pos_end, pos;
492 unsigned long zeropage_pfn = my_zero_pfn(0);
493 size_t len = 0;
494
495 pos_start = pfn;
496 pos_end = pfn + (size >> PAGE_SHIFT);
497
498 for (pos = pos_start; pos < pos_end; ++pos) {
499 if (!pfn_is_ram(pos)) {
500
501
502
503
504
505 if (pos > pos_start) {
506
507 map_size = (pos - pos_start) << PAGE_SHIFT;
508 if (remap_oldmem_pfn_range(vma, from + len,
509 pos_start, map_size,
510 prot))
511 goto fail;
512 len += map_size;
513 }
514
515 if (remap_oldmem_pfn_range(vma, from + len,
516 zeropage_pfn,
517 PAGE_SIZE, prot))
518 goto fail;
519 len += PAGE_SIZE;
520 pos_start = pos + 1;
521 }
522 }
523 if (pos > pos_start) {
524
525 map_size = (pos - pos_start) << PAGE_SHIFT;
526 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
527 map_size, prot))
528 goto fail;
529 }
530 return 0;
531fail:
532 do_munmap(vma->vm_mm, from, len, NULL);
533 return -EAGAIN;
534}
535
536static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
537 unsigned long from, unsigned long pfn,
538 unsigned long size, pgprot_t prot)
539{
540
541
542
543
544 if (oldmem_pfn_is_ram)
545 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
546 else
547 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
548}
549
550static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
551{
552 size_t size = vma->vm_end - vma->vm_start;
553 u64 start, end, len, tsz;
554 struct vmcore *m;
555
556 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
557 end = start + size;
558
559 if (size > vmcore_size || end > vmcore_size)
560 return -EINVAL;
561
562 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
563 return -EPERM;
564
565 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
566 vma->vm_flags |= VM_MIXEDMAP;
567 vma->vm_ops = &vmcore_mmap_ops;
568
569 len = 0;
570
571 if (start < elfcorebuf_sz) {
572 u64 pfn;
573
574 tsz = min(elfcorebuf_sz - (size_t)start, size);
575 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
576 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
577 vma->vm_page_prot))
578 return -EAGAIN;
579 size -= tsz;
580 start += tsz;
581 len += tsz;
582
583 if (size == 0)
584 return 0;
585 }
586
587 if (start < elfcorebuf_sz + elfnotes_sz) {
588 void *kaddr;
589
590
591
592
593
594
595
596
597
598
599
600
601#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
602
603 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
604 u64 start_off;
605
606 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
607 (size_t)start, size);
608 start_off = start - elfcorebuf_sz;
609 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
610 start_off, tsz))
611 goto fail;
612
613 size -= tsz;
614 start += tsz;
615 len += tsz;
616
617
618 if (!size)
619 return 0;
620 }
621#endif
622
623
624 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
625 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
626 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
627 kaddr, 0, tsz))
628 goto fail;
629
630 size -= tsz;
631 start += tsz;
632 len += tsz;
633
634 if (size == 0)
635 return 0;
636 }
637
638 list_for_each_entry(m, &vmcore_list, list) {
639 if (start < m->offset + m->size) {
640 u64 paddr = 0;
641
642 tsz = (size_t)min_t(unsigned long long,
643 m->offset + m->size - start, size);
644 paddr = m->paddr + start - m->offset;
645 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
646 paddr >> PAGE_SHIFT, tsz,
647 vma->vm_page_prot))
648 goto fail;
649 size -= tsz;
650 start += tsz;
651 len += tsz;
652
653 if (size == 0)
654 return 0;
655 }
656 }
657
658 return 0;
659fail:
660 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
661 return -EAGAIN;
662}
663#else
664static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
665{
666 return -ENOSYS;
667}
668#endif
669
670static const struct proc_ops vmcore_proc_ops = {
671 .proc_read = read_vmcore,
672 .proc_lseek = default_llseek,
673 .proc_mmap = mmap_vmcore,
674};
675
676static struct vmcore* __init get_new_element(void)
677{
678 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
679}
680
681static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
682 struct list_head *vc_list)
683{
684 u64 size;
685 struct vmcore *m;
686
687 size = elfsz + elfnotesegsz;
688 list_for_each_entry(m, vc_list, list) {
689 size += m->size;
690 }
691 return size;
692}
693
694
695
696
697
698
699
700
701
702
703static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
704{
705 int i, rc=0;
706 Elf64_Phdr *phdr_ptr;
707 Elf64_Nhdr *nhdr_ptr;
708
709 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
710 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
711 void *notes_section;
712 u64 offset, max_sz, sz, real_sz = 0;
713 if (phdr_ptr->p_type != PT_NOTE)
714 continue;
715 max_sz = phdr_ptr->p_memsz;
716 offset = phdr_ptr->p_offset;
717 notes_section = kmalloc(max_sz, GFP_KERNEL);
718 if (!notes_section)
719 return -ENOMEM;
720 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
721 if (rc < 0) {
722 kfree(notes_section);
723 return rc;
724 }
725 nhdr_ptr = notes_section;
726 while (nhdr_ptr->n_namesz != 0) {
727 sz = sizeof(Elf64_Nhdr) +
728 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
729 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
730 if ((real_sz + sz) > max_sz) {
731 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
732 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
733 break;
734 }
735 real_sz += sz;
736 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
737 }
738 kfree(notes_section);
739 phdr_ptr->p_memsz = real_sz;
740 if (real_sz == 0) {
741 pr_warn("Warning: Zero PT_NOTE entries found\n");
742 }
743 }
744
745 return 0;
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
767 int *nr_ptnote, u64 *sz_ptnote)
768{
769 int i;
770 Elf64_Phdr *phdr_ptr;
771
772 *nr_ptnote = *sz_ptnote = 0;
773
774 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
775 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
776 if (phdr_ptr->p_type != PT_NOTE)
777 continue;
778 *nr_ptnote += 1;
779 *sz_ptnote += phdr_ptr->p_memsz;
780 }
781
782 return 0;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
802{
803 int i, rc=0;
804 Elf64_Phdr *phdr_ptr;
805
806 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
807
808 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
809 u64 offset;
810 if (phdr_ptr->p_type != PT_NOTE)
811 continue;
812 offset = phdr_ptr->p_offset;
813 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
814 &offset);
815 if (rc < 0)
816 return rc;
817 notes_buf += phdr_ptr->p_memsz;
818 }
819
820 return 0;
821}
822
823
824static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
825 char **notes_buf, size_t *notes_sz)
826{
827 int i, nr_ptnote=0, rc=0;
828 char *tmp;
829 Elf64_Ehdr *ehdr_ptr;
830 Elf64_Phdr phdr;
831 u64 phdr_sz = 0, note_off;
832
833 ehdr_ptr = (Elf64_Ehdr *)elfptr;
834
835 rc = update_note_header_size_elf64(ehdr_ptr);
836 if (rc < 0)
837 return rc;
838
839 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
840 if (rc < 0)
841 return rc;
842
843 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
844 *notes_buf = vmcore_alloc_buf(*notes_sz);
845 if (!*notes_buf)
846 return -ENOMEM;
847
848 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
849 if (rc < 0)
850 return rc;
851
852
853 phdr.p_type = PT_NOTE;
854 phdr.p_flags = 0;
855 note_off = sizeof(Elf64_Ehdr) +
856 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
857 phdr.p_offset = roundup(note_off, PAGE_SIZE);
858 phdr.p_vaddr = phdr.p_paddr = 0;
859 phdr.p_filesz = phdr.p_memsz = phdr_sz;
860 phdr.p_align = 0;
861
862
863 tmp = elfptr + sizeof(Elf64_Ehdr);
864 memcpy(tmp, &phdr, sizeof(phdr));
865 tmp += sizeof(phdr);
866
867
868 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
869 *elfsz = *elfsz - i;
870 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
871 memset(elfptr + *elfsz, 0, i);
872 *elfsz = roundup(*elfsz, PAGE_SIZE);
873
874
875 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
876
877
878
879
880 elfnotes_orig_sz = phdr.p_memsz;
881
882 return 0;
883}
884
885
886
887
888
889
890
891
892
893
894static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
895{
896 int i, rc=0;
897 Elf32_Phdr *phdr_ptr;
898 Elf32_Nhdr *nhdr_ptr;
899
900 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
901 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
902 void *notes_section;
903 u64 offset, max_sz, sz, real_sz = 0;
904 if (phdr_ptr->p_type != PT_NOTE)
905 continue;
906 max_sz = phdr_ptr->p_memsz;
907 offset = phdr_ptr->p_offset;
908 notes_section = kmalloc(max_sz, GFP_KERNEL);
909 if (!notes_section)
910 return -ENOMEM;
911 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
912 if (rc < 0) {
913 kfree(notes_section);
914 return rc;
915 }
916 nhdr_ptr = notes_section;
917 while (nhdr_ptr->n_namesz != 0) {
918 sz = sizeof(Elf32_Nhdr) +
919 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
920 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
921 if ((real_sz + sz) > max_sz) {
922 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
923 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
924 break;
925 }
926 real_sz += sz;
927 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
928 }
929 kfree(notes_section);
930 phdr_ptr->p_memsz = real_sz;
931 if (real_sz == 0) {
932 pr_warn("Warning: Zero PT_NOTE entries found\n");
933 }
934 }
935
936 return 0;
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
958 int *nr_ptnote, u64 *sz_ptnote)
959{
960 int i;
961 Elf32_Phdr *phdr_ptr;
962
963 *nr_ptnote = *sz_ptnote = 0;
964
965 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
966 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
967 if (phdr_ptr->p_type != PT_NOTE)
968 continue;
969 *nr_ptnote += 1;
970 *sz_ptnote += phdr_ptr->p_memsz;
971 }
972
973 return 0;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
993{
994 int i, rc=0;
995 Elf32_Phdr *phdr_ptr;
996
997 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
998
999 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1000 u64 offset;
1001 if (phdr_ptr->p_type != PT_NOTE)
1002 continue;
1003 offset = phdr_ptr->p_offset;
1004 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1005 &offset);
1006 if (rc < 0)
1007 return rc;
1008 notes_buf += phdr_ptr->p_memsz;
1009 }
1010
1011 return 0;
1012}
1013
1014
1015static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1016 char **notes_buf, size_t *notes_sz)
1017{
1018 int i, nr_ptnote=0, rc=0;
1019 char *tmp;
1020 Elf32_Ehdr *ehdr_ptr;
1021 Elf32_Phdr phdr;
1022 u64 phdr_sz = 0, note_off;
1023
1024 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1025
1026 rc = update_note_header_size_elf32(ehdr_ptr);
1027 if (rc < 0)
1028 return rc;
1029
1030 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1031 if (rc < 0)
1032 return rc;
1033
1034 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1035 *notes_buf = vmcore_alloc_buf(*notes_sz);
1036 if (!*notes_buf)
1037 return -ENOMEM;
1038
1039 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1040 if (rc < 0)
1041 return rc;
1042
1043
1044 phdr.p_type = PT_NOTE;
1045 phdr.p_flags = 0;
1046 note_off = sizeof(Elf32_Ehdr) +
1047 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1048 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1049 phdr.p_vaddr = phdr.p_paddr = 0;
1050 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1051 phdr.p_align = 0;
1052
1053
1054 tmp = elfptr + sizeof(Elf32_Ehdr);
1055 memcpy(tmp, &phdr, sizeof(phdr));
1056 tmp += sizeof(phdr);
1057
1058
1059 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1060 *elfsz = *elfsz - i;
1061 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1062 memset(elfptr + *elfsz, 0, i);
1063 *elfsz = roundup(*elfsz, PAGE_SIZE);
1064
1065
1066 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1067
1068
1069
1070
1071 elfnotes_orig_sz = phdr.p_memsz;
1072
1073 return 0;
1074}
1075
1076
1077
1078static int __init process_ptload_program_headers_elf64(char *elfptr,
1079 size_t elfsz,
1080 size_t elfnotes_sz,
1081 struct list_head *vc_list)
1082{
1083 int i;
1084 Elf64_Ehdr *ehdr_ptr;
1085 Elf64_Phdr *phdr_ptr;
1086 loff_t vmcore_off;
1087 struct vmcore *new;
1088
1089 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1090 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1091
1092
1093 vmcore_off = elfsz + elfnotes_sz;
1094
1095 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1096 u64 paddr, start, end, size;
1097
1098 if (phdr_ptr->p_type != PT_LOAD)
1099 continue;
1100
1101 paddr = phdr_ptr->p_offset;
1102 start = rounddown(paddr, PAGE_SIZE);
1103 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1104 size = end - start;
1105
1106
1107 new = get_new_element();
1108 if (!new)
1109 return -ENOMEM;
1110 new->paddr = start;
1111 new->size = size;
1112 list_add_tail(&new->list, vc_list);
1113
1114
1115 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1116 vmcore_off = vmcore_off + size;
1117 }
1118 return 0;
1119}
1120
1121static int __init process_ptload_program_headers_elf32(char *elfptr,
1122 size_t elfsz,
1123 size_t elfnotes_sz,
1124 struct list_head *vc_list)
1125{
1126 int i;
1127 Elf32_Ehdr *ehdr_ptr;
1128 Elf32_Phdr *phdr_ptr;
1129 loff_t vmcore_off;
1130 struct vmcore *new;
1131
1132 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1133 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1134
1135
1136 vmcore_off = elfsz + elfnotes_sz;
1137
1138 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1139 u64 paddr, start, end, size;
1140
1141 if (phdr_ptr->p_type != PT_LOAD)
1142 continue;
1143
1144 paddr = phdr_ptr->p_offset;
1145 start = rounddown(paddr, PAGE_SIZE);
1146 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1147 size = end - start;
1148
1149
1150 new = get_new_element();
1151 if (!new)
1152 return -ENOMEM;
1153 new->paddr = start;
1154 new->size = size;
1155 list_add_tail(&new->list, vc_list);
1156
1157
1158 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1159 vmcore_off = vmcore_off + size;
1160 }
1161 return 0;
1162}
1163
1164
1165static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1166 struct list_head *vc_list)
1167{
1168 loff_t vmcore_off;
1169 struct vmcore *m;
1170
1171
1172 vmcore_off = elfsz + elfnotes_sz;
1173
1174 list_for_each_entry(m, vc_list, list) {
1175 m->offset = vmcore_off;
1176 vmcore_off += m->size;
1177 }
1178}
1179
1180static void free_elfcorebuf(void)
1181{
1182 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1183 elfcorebuf = NULL;
1184 vfree(elfnotes_buf);
1185 elfnotes_buf = NULL;
1186}
1187
1188static int __init parse_crash_elf64_headers(void)
1189{
1190 int rc=0;
1191 Elf64_Ehdr ehdr;
1192 u64 addr;
1193
1194 addr = elfcorehdr_addr;
1195
1196
1197 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1198 if (rc < 0)
1199 return rc;
1200
1201
1202 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1203 (ehdr.e_type != ET_CORE) ||
1204 !vmcore_elf64_check_arch(&ehdr) ||
1205 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1206 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1207 ehdr.e_version != EV_CURRENT ||
1208 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1209 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1210 ehdr.e_phnum == 0) {
1211 pr_warn("Warning: Core image elf header is not sane\n");
1212 return -EINVAL;
1213 }
1214
1215
1216 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1217 ehdr.e_phnum * sizeof(Elf64_Phdr);
1218 elfcorebuf_sz = elfcorebuf_sz_orig;
1219 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1220 get_order(elfcorebuf_sz_orig));
1221 if (!elfcorebuf)
1222 return -ENOMEM;
1223 addr = elfcorehdr_addr;
1224 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1225 if (rc < 0)
1226 goto fail;
1227
1228
1229 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1230 &elfnotes_buf, &elfnotes_sz);
1231 if (rc)
1232 goto fail;
1233 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1234 elfnotes_sz, &vmcore_list);
1235 if (rc)
1236 goto fail;
1237 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1238 return 0;
1239fail:
1240 free_elfcorebuf();
1241 return rc;
1242}
1243
1244static int __init parse_crash_elf32_headers(void)
1245{
1246 int rc=0;
1247 Elf32_Ehdr ehdr;
1248 u64 addr;
1249
1250 addr = elfcorehdr_addr;
1251
1252
1253 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1254 if (rc < 0)
1255 return rc;
1256
1257
1258 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1259 (ehdr.e_type != ET_CORE) ||
1260 !vmcore_elf32_check_arch(&ehdr) ||
1261 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1262 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1263 ehdr.e_version != EV_CURRENT ||
1264 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1265 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1266 ehdr.e_phnum == 0) {
1267 pr_warn("Warning: Core image elf header is not sane\n");
1268 return -EINVAL;
1269 }
1270
1271
1272 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1273 elfcorebuf_sz = elfcorebuf_sz_orig;
1274 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1275 get_order(elfcorebuf_sz_orig));
1276 if (!elfcorebuf)
1277 return -ENOMEM;
1278 addr = elfcorehdr_addr;
1279 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1280 if (rc < 0)
1281 goto fail;
1282
1283
1284 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1285 &elfnotes_buf, &elfnotes_sz);
1286 if (rc)
1287 goto fail;
1288 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1289 elfnotes_sz, &vmcore_list);
1290 if (rc)
1291 goto fail;
1292 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1293 return 0;
1294fail:
1295 free_elfcorebuf();
1296 return rc;
1297}
1298
1299static int __init parse_crash_elf_headers(void)
1300{
1301 unsigned char e_ident[EI_NIDENT];
1302 u64 addr;
1303 int rc=0;
1304
1305 addr = elfcorehdr_addr;
1306 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1307 if (rc < 0)
1308 return rc;
1309 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1310 pr_warn("Warning: Core image elf header not found\n");
1311 return -EINVAL;
1312 }
1313
1314 if (e_ident[EI_CLASS] == ELFCLASS64) {
1315 rc = parse_crash_elf64_headers();
1316 if (rc)
1317 return rc;
1318 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1319 rc = parse_crash_elf32_headers();
1320 if (rc)
1321 return rc;
1322 } else {
1323 pr_warn("Warning: Core image elf header is not sane\n");
1324 return -EINVAL;
1325 }
1326
1327
1328 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1329 &vmcore_list);
1330
1331 return 0;
1332}
1333
1334#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1345 u32 size)
1346{
1347 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1348
1349 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1350 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1351 vdd_hdr->n_type = NT_VMCOREDD;
1352
1353 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1354 sizeof(vdd_hdr->name));
1355 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1368 size_t vmcoreddsz)
1369{
1370 unsigned char *e_ident = (unsigned char *)elfptr;
1371 u64 start, end, size;
1372 loff_t vmcore_off;
1373 u32 i;
1374
1375 vmcore_off = elfcorebuf_sz + elfnotesz;
1376
1377 if (e_ident[EI_CLASS] == ELFCLASS64) {
1378 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1379 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1380
1381
1382 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1383 if (phdr->p_type == PT_NOTE) {
1384
1385 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1386 phdr->p_filesz = phdr->p_memsz;
1387 continue;
1388 }
1389
1390 start = rounddown(phdr->p_offset, PAGE_SIZE);
1391 end = roundup(phdr->p_offset + phdr->p_memsz,
1392 PAGE_SIZE);
1393 size = end - start;
1394 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1395 vmcore_off += size;
1396 }
1397 } else {
1398 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1399 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1400
1401
1402 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1403 if (phdr->p_type == PT_NOTE) {
1404
1405 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1406 phdr->p_filesz = phdr->p_memsz;
1407 continue;
1408 }
1409
1410 start = rounddown(phdr->p_offset, PAGE_SIZE);
1411 end = roundup(phdr->p_offset + phdr->p_memsz,
1412 PAGE_SIZE);
1413 size = end - start;
1414 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1415 vmcore_off += size;
1416 }
1417 }
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static void vmcoredd_update_size(size_t dump_size)
1430{
1431 vmcoredd_orig_sz += dump_size;
1432 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1433 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1434 vmcoredd_orig_sz);
1435
1436
1437 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1438
1439 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1440 &vmcore_list);
1441 proc_vmcore->size = vmcore_size;
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452int vmcore_add_device_dump(struct vmcoredd_data *data)
1453{
1454 struct vmcoredd_node *dump;
1455 void *buf = NULL;
1456 size_t data_size;
1457 int ret;
1458
1459 if (vmcoredd_disabled) {
1460 pr_err_once("Device dump is disabled\n");
1461 return -EINVAL;
1462 }
1463
1464 if (!data || !strlen(data->dump_name) ||
1465 !data->vmcoredd_callback || !data->size)
1466 return -EINVAL;
1467
1468 dump = vzalloc(sizeof(*dump));
1469 if (!dump) {
1470 ret = -ENOMEM;
1471 goto out_err;
1472 }
1473
1474
1475 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1476 PAGE_SIZE);
1477
1478
1479 buf = vmcore_alloc_buf(data_size);
1480 if (!buf) {
1481 ret = -ENOMEM;
1482 goto out_err;
1483 }
1484
1485 vmcoredd_write_header(buf, data, data_size -
1486 sizeof(struct vmcoredd_header));
1487
1488
1489 ret = data->vmcoredd_callback(data, buf +
1490 sizeof(struct vmcoredd_header));
1491 if (ret)
1492 goto out_err;
1493
1494 dump->buf = buf;
1495 dump->size = data_size;
1496
1497
1498 mutex_lock(&vmcoredd_mutex);
1499 list_add_tail(&dump->list, &vmcoredd_list);
1500 mutex_unlock(&vmcoredd_mutex);
1501
1502 vmcoredd_update_size(data_size);
1503 return 0;
1504
1505out_err:
1506 vfree(buf);
1507 vfree(dump);
1508
1509 return ret;
1510}
1511EXPORT_SYMBOL(vmcore_add_device_dump);
1512#endif
1513
1514
1515static void vmcore_free_device_dumps(void)
1516{
1517#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1518 mutex_lock(&vmcoredd_mutex);
1519 while (!list_empty(&vmcoredd_list)) {
1520 struct vmcoredd_node *dump;
1521
1522 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1523 list);
1524 list_del(&dump->list);
1525 vfree(dump->buf);
1526 vfree(dump);
1527 }
1528 mutex_unlock(&vmcoredd_mutex);
1529#endif
1530}
1531
1532
1533static int __init vmcore_init(void)
1534{
1535 int rc = 0;
1536
1537
1538 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1539 if (rc)
1540 return rc;
1541
1542
1543
1544
1545 if (!(is_vmcore_usable()))
1546 return rc;
1547 rc = parse_crash_elf_headers();
1548 if (rc) {
1549 pr_warn("Kdump: vmcore not initialized\n");
1550 return rc;
1551 }
1552 elfcorehdr_free(elfcorehdr_addr);
1553 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1554
1555 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1556 if (proc_vmcore)
1557 proc_vmcore->size = vmcore_size;
1558 return 0;
1559}
1560fs_initcall(vmcore_init);
1561
1562
1563void vmcore_cleanup(void)
1564{
1565 if (proc_vmcore) {
1566 proc_remove(proc_vmcore);
1567 proc_vmcore = NULL;
1568 }
1569
1570
1571 while (!list_empty(&vmcore_list)) {
1572 struct vmcore *m;
1573
1574 m = list_first_entry(&vmcore_list, struct vmcore, list);
1575 list_del(&m->list);
1576 kfree(m);
1577 }
1578 free_elfcorebuf();
1579
1580
1581 vmcore_free_device_dumps();
1582}
1583