1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/memblock.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/moduleparam.h>
24#include <linux/mutex.h>
25#include <linux/vmalloc.h>
26#include <linux/pagemap.h>
27#include <linux/uaccess.h>
28#include <linux/mem_encrypt.h>
29#include <asm/pgtable.h>
30#include <asm/io.h>
31#include "internal.h"
32
33
34
35
36static LIST_HEAD(vmcore_list);
37
38
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
41static size_t elfcorebuf_sz_orig;
42
43static char *elfnotes_buf;
44static size_t elfnotes_sz;
45
46static size_t elfnotes_orig_sz;
47
48
49static u64 vmcore_size;
50
51static struct proc_dir_entry *proc_vmcore;
52
53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
57
58static bool vmcoredd_disabled;
59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60#endif
61
62
63static size_t vmcoredd_orig_sz;
64
65
66
67
68
69static int (*oldmem_pfn_is_ram)(unsigned long pfn);
70
71int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
72{
73 if (oldmem_pfn_is_ram)
74 return -EBUSY;
75 oldmem_pfn_is_ram = fn;
76 return 0;
77}
78EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
79
80void unregister_oldmem_pfn_is_ram(void)
81{
82 oldmem_pfn_is_ram = NULL;
83 wmb();
84}
85EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
86
87static int pfn_is_ram(unsigned long pfn)
88{
89 int (*fn)(unsigned long pfn);
90
91 int ret = 1;
92
93
94
95
96
97
98 fn = oldmem_pfn_is_ram;
99 if (fn)
100 ret = fn(pfn);
101
102 return ret;
103}
104
105
106ssize_t read_from_oldmem(char *buf, size_t count,
107 u64 *ppos, int userbuf,
108 bool encrypted)
109{
110 unsigned long pfn, offset;
111 size_t nr_bytes;
112 ssize_t read = 0, tmp;
113
114 if (!count)
115 return 0;
116
117 offset = (unsigned long)(*ppos % PAGE_SIZE);
118 pfn = (unsigned long)(*ppos / PAGE_SIZE);
119
120 do {
121 if (count > (PAGE_SIZE - offset))
122 nr_bytes = PAGE_SIZE - offset;
123 else
124 nr_bytes = count;
125
126
127 if (pfn_is_ram(pfn) == 0)
128 memset(buf, 0, nr_bytes);
129 else {
130 if (encrypted)
131 tmp = copy_oldmem_page_encrypted(pfn, buf,
132 nr_bytes,
133 offset,
134 userbuf);
135 else
136 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
137 offset, userbuf);
138
139 if (tmp < 0)
140 return tmp;
141 }
142 *ppos += nr_bytes;
143 count -= nr_bytes;
144 buf += nr_bytes;
145 read += nr_bytes;
146 ++pfn;
147 offset = 0;
148 } while (count);
149
150 return read;
151}
152
153
154
155
156int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
157{
158 return 0;
159}
160
161
162
163
164void __weak elfcorehdr_free(unsigned long long addr)
165{}
166
167
168
169
170ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
171{
172 return read_from_oldmem(buf, count, ppos, 0, false);
173}
174
175
176
177
178ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
179{
180 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
181}
182
183
184
185
186int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
187 unsigned long from, unsigned long pfn,
188 unsigned long size, pgprot_t prot)
189{
190 prot = pgprot_encrypted(prot);
191 return remap_pfn_range(vma, from, pfn, size, prot);
192}
193
194
195
196
197ssize_t __weak
198copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
199 unsigned long offset, int userbuf)
200{
201 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
202}
203
204
205
206
207static int copy_to(void *target, void *src, size_t size, int userbuf)
208{
209 if (userbuf) {
210 if (copy_to_user((char __user *) target, src, size))
211 return -EFAULT;
212 } else {
213 memcpy(target, src, size);
214 }
215 return 0;
216}
217
218#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
219static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
220{
221 struct vmcoredd_node *dump;
222 u64 offset = 0;
223 int ret = 0;
224 size_t tsz;
225 char *buf;
226
227 mutex_lock(&vmcoredd_mutex);
228 list_for_each_entry(dump, &vmcoredd_list, list) {
229 if (start < offset + dump->size) {
230 tsz = min(offset + (u64)dump->size - start, (u64)size);
231 buf = dump->buf + start - offset;
232 if (copy_to(dst, buf, tsz, userbuf)) {
233 ret = -EFAULT;
234 goto out_unlock;
235 }
236
237 size -= tsz;
238 start += tsz;
239 dst += tsz;
240
241
242 if (!size)
243 goto out_unlock;
244 }
245 offset += dump->size;
246 }
247
248out_unlock:
249 mutex_unlock(&vmcoredd_mutex);
250 return ret;
251}
252
253static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
254 u64 start, size_t size)
255{
256 struct vmcoredd_node *dump;
257 u64 offset = 0;
258 int ret = 0;
259 size_t tsz;
260 char *buf;
261
262 mutex_lock(&vmcoredd_mutex);
263 list_for_each_entry(dump, &vmcoredd_list, list) {
264 if (start < offset + dump->size) {
265 tsz = min(offset + (u64)dump->size - start, (u64)size);
266 buf = dump->buf + start - offset;
267 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
268 tsz)) {
269 ret = -EFAULT;
270 goto out_unlock;
271 }
272
273 size -= tsz;
274 start += tsz;
275 dst += tsz;
276
277
278 if (!size)
279 goto out_unlock;
280 }
281 offset += dump->size;
282 }
283
284out_unlock:
285 mutex_unlock(&vmcoredd_mutex);
286 return ret;
287}
288#endif
289
290
291
292
293static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
294 int userbuf)
295{
296 ssize_t acc = 0, tmp;
297 size_t tsz;
298 u64 start;
299 struct vmcore *m = NULL;
300
301 if (buflen == 0 || *fpos >= vmcore_size)
302 return 0;
303
304
305 if (buflen > vmcore_size - *fpos)
306 buflen = vmcore_size - *fpos;
307
308
309 if (*fpos < elfcorebuf_sz) {
310 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
311 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
312 return -EFAULT;
313 buflen -= tsz;
314 *fpos += tsz;
315 buffer += tsz;
316 acc += tsz;
317
318
319 if (buflen == 0)
320 return acc;
321 }
322
323
324 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
325 void *kaddr;
326
327
328
329
330
331
332
333
334
335
336#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
337
338 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
339 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
340 (size_t)*fpos, buflen);
341 start = *fpos - elfcorebuf_sz;
342 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
343 return -EFAULT;
344
345 buflen -= tsz;
346 *fpos += tsz;
347 buffer += tsz;
348 acc += tsz;
349
350
351 if (!buflen)
352 return acc;
353 }
354#endif
355
356
357 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
358 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
359 if (copy_to(buffer, kaddr, tsz, userbuf))
360 return -EFAULT;
361
362 buflen -= tsz;
363 *fpos += tsz;
364 buffer += tsz;
365 acc += tsz;
366
367
368 if (buflen == 0)
369 return acc;
370 }
371
372 list_for_each_entry(m, &vmcore_list, list) {
373 if (*fpos < m->offset + m->size) {
374 tsz = (size_t)min_t(unsigned long long,
375 m->offset + m->size - *fpos,
376 buflen);
377 start = m->paddr + *fpos - m->offset;
378 tmp = read_from_oldmem(buffer, tsz, &start,
379 userbuf, mem_encrypt_active());
380 if (tmp < 0)
381 return tmp;
382 buflen -= tsz;
383 *fpos += tsz;
384 buffer += tsz;
385 acc += tsz;
386
387
388 if (buflen == 0)
389 return acc;
390 }
391 }
392
393 return acc;
394}
395
396static ssize_t read_vmcore(struct file *file, char __user *buffer,
397 size_t buflen, loff_t *fpos)
398{
399 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
400}
401
402
403
404
405
406
407
408
409static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
410{
411#ifdef CONFIG_S390
412 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
413 pgoff_t index = vmf->pgoff;
414 struct page *page;
415 loff_t offset;
416 char *buf;
417 int rc;
418
419 page = find_or_create_page(mapping, index, GFP_KERNEL);
420 if (!page)
421 return VM_FAULT_OOM;
422 if (!PageUptodate(page)) {
423 offset = (loff_t) index << PAGE_SHIFT;
424 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
425 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
426 if (rc < 0) {
427 unlock_page(page);
428 put_page(page);
429 return vmf_error(rc);
430 }
431 SetPageUptodate(page);
432 }
433 unlock_page(page);
434 vmf->page = page;
435 return 0;
436#else
437 return VM_FAULT_SIGBUS;
438#endif
439}
440
441static const struct vm_operations_struct vmcore_mmap_ops = {
442 .fault = mmap_vmcore_fault,
443};
444
445
446
447
448
449
450
451
452
453
454
455static inline char *vmcore_alloc_buf(size_t size)
456{
457#ifdef CONFIG_MMU
458 return vmalloc_user(size);
459#else
460 return vzalloc(size);
461#endif
462}
463
464
465
466
467
468
469
470
471#ifdef CONFIG_MMU
472
473
474
475
476
477
478
479
480
481
482
483
484static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
485 unsigned long from, unsigned long pfn,
486 unsigned long size, pgprot_t prot)
487{
488 unsigned long map_size;
489 unsigned long pos_start, pos_end, pos;
490 unsigned long zeropage_pfn = my_zero_pfn(0);
491 size_t len = 0;
492
493 pos_start = pfn;
494 pos_end = pfn + (size >> PAGE_SHIFT);
495
496 for (pos = pos_start; pos < pos_end; ++pos) {
497 if (!pfn_is_ram(pos)) {
498
499
500
501
502
503 if (pos > pos_start) {
504
505 map_size = (pos - pos_start) << PAGE_SHIFT;
506 if (remap_oldmem_pfn_range(vma, from + len,
507 pos_start, map_size,
508 prot))
509 goto fail;
510 len += map_size;
511 }
512
513 if (remap_oldmem_pfn_range(vma, from + len,
514 zeropage_pfn,
515 PAGE_SIZE, prot))
516 goto fail;
517 len += PAGE_SIZE;
518 pos_start = pos + 1;
519 }
520 }
521 if (pos > pos_start) {
522
523 map_size = (pos - pos_start) << PAGE_SHIFT;
524 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
525 map_size, prot))
526 goto fail;
527 }
528 return 0;
529fail:
530 do_munmap(vma->vm_mm, from, len, NULL);
531 return -EAGAIN;
532}
533
534static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
535 unsigned long from, unsigned long pfn,
536 unsigned long size, pgprot_t prot)
537{
538
539
540
541
542 if (oldmem_pfn_is_ram)
543 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
544 else
545 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
546}
547
548static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
549{
550 size_t size = vma->vm_end - vma->vm_start;
551 u64 start, end, len, tsz;
552 struct vmcore *m;
553
554 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
555 end = start + size;
556
557 if (size > vmcore_size || end > vmcore_size)
558 return -EINVAL;
559
560 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
561 return -EPERM;
562
563 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
564 vma->vm_flags |= VM_MIXEDMAP;
565 vma->vm_ops = &vmcore_mmap_ops;
566
567 len = 0;
568
569 if (start < elfcorebuf_sz) {
570 u64 pfn;
571
572 tsz = min(elfcorebuf_sz - (size_t)start, size);
573 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
574 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
575 vma->vm_page_prot))
576 return -EAGAIN;
577 size -= tsz;
578 start += tsz;
579 len += tsz;
580
581 if (size == 0)
582 return 0;
583 }
584
585 if (start < elfcorebuf_sz + elfnotes_sz) {
586 void *kaddr;
587
588
589
590
591
592
593
594
595
596
597
598
599#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
600
601 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
602 u64 start_off;
603
604 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
605 (size_t)start, size);
606 start_off = start - elfcorebuf_sz;
607 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
608 start_off, tsz))
609 goto fail;
610
611 size -= tsz;
612 start += tsz;
613 len += tsz;
614
615
616 if (!size)
617 return 0;
618 }
619#endif
620
621
622 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
623 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
624 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
625 kaddr, 0, tsz))
626 goto fail;
627
628 size -= tsz;
629 start += tsz;
630 len += tsz;
631
632 if (size == 0)
633 return 0;
634 }
635
636 list_for_each_entry(m, &vmcore_list, list) {
637 if (start < m->offset + m->size) {
638 u64 paddr = 0;
639
640 tsz = (size_t)min_t(unsigned long long,
641 m->offset + m->size - start, size);
642 paddr = m->paddr + start - m->offset;
643 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
644 paddr >> PAGE_SHIFT, tsz,
645 vma->vm_page_prot))
646 goto fail;
647 size -= tsz;
648 start += tsz;
649 len += tsz;
650
651 if (size == 0)
652 return 0;
653 }
654 }
655
656 return 0;
657fail:
658 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
659 return -EAGAIN;
660}
661#else
662static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
663{
664 return -ENOSYS;
665}
666#endif
667
668static const struct file_operations proc_vmcore_operations = {
669 .read = read_vmcore,
670 .llseek = default_llseek,
671 .mmap = mmap_vmcore,
672};
673
674static struct vmcore* __init get_new_element(void)
675{
676 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
677}
678
679static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
680 struct list_head *vc_list)
681{
682 u64 size;
683 struct vmcore *m;
684
685 size = elfsz + elfnotesegsz;
686 list_for_each_entry(m, vc_list, list) {
687 size += m->size;
688 }
689 return size;
690}
691
692
693
694
695
696
697
698
699
700
701static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
702{
703 int i, rc=0;
704 Elf64_Phdr *phdr_ptr;
705 Elf64_Nhdr *nhdr_ptr;
706
707 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
708 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
709 void *notes_section;
710 u64 offset, max_sz, sz, real_sz = 0;
711 if (phdr_ptr->p_type != PT_NOTE)
712 continue;
713 max_sz = phdr_ptr->p_memsz;
714 offset = phdr_ptr->p_offset;
715 notes_section = kmalloc(max_sz, GFP_KERNEL);
716 if (!notes_section)
717 return -ENOMEM;
718 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
719 if (rc < 0) {
720 kfree(notes_section);
721 return rc;
722 }
723 nhdr_ptr = notes_section;
724 while (nhdr_ptr->n_namesz != 0) {
725 sz = sizeof(Elf64_Nhdr) +
726 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
727 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
728 if ((real_sz + sz) > max_sz) {
729 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
730 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
731 break;
732 }
733 real_sz += sz;
734 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
735 }
736 kfree(notes_section);
737 phdr_ptr->p_memsz = real_sz;
738 if (real_sz == 0) {
739 pr_warn("Warning: Zero PT_NOTE entries found\n");
740 }
741 }
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
765 int *nr_ptnote, u64 *sz_ptnote)
766{
767 int i;
768 Elf64_Phdr *phdr_ptr;
769
770 *nr_ptnote = *sz_ptnote = 0;
771
772 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
773 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
774 if (phdr_ptr->p_type != PT_NOTE)
775 continue;
776 *nr_ptnote += 1;
777 *sz_ptnote += phdr_ptr->p_memsz;
778 }
779
780 return 0;
781}
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
800{
801 int i, rc=0;
802 Elf64_Phdr *phdr_ptr;
803
804 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
805
806 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
807 u64 offset;
808 if (phdr_ptr->p_type != PT_NOTE)
809 continue;
810 offset = phdr_ptr->p_offset;
811 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
812 &offset);
813 if (rc < 0)
814 return rc;
815 notes_buf += phdr_ptr->p_memsz;
816 }
817
818 return 0;
819}
820
821
822static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
823 char **notes_buf, size_t *notes_sz)
824{
825 int i, nr_ptnote=0, rc=0;
826 char *tmp;
827 Elf64_Ehdr *ehdr_ptr;
828 Elf64_Phdr phdr;
829 u64 phdr_sz = 0, note_off;
830
831 ehdr_ptr = (Elf64_Ehdr *)elfptr;
832
833 rc = update_note_header_size_elf64(ehdr_ptr);
834 if (rc < 0)
835 return rc;
836
837 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
838 if (rc < 0)
839 return rc;
840
841 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
842 *notes_buf = vmcore_alloc_buf(*notes_sz);
843 if (!*notes_buf)
844 return -ENOMEM;
845
846 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
847 if (rc < 0)
848 return rc;
849
850
851 phdr.p_type = PT_NOTE;
852 phdr.p_flags = 0;
853 note_off = sizeof(Elf64_Ehdr) +
854 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
855 phdr.p_offset = roundup(note_off, PAGE_SIZE);
856 phdr.p_vaddr = phdr.p_paddr = 0;
857 phdr.p_filesz = phdr.p_memsz = phdr_sz;
858 phdr.p_align = 0;
859
860
861 tmp = elfptr + sizeof(Elf64_Ehdr);
862 memcpy(tmp, &phdr, sizeof(phdr));
863 tmp += sizeof(phdr);
864
865
866 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
867 *elfsz = *elfsz - i;
868 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
869 memset(elfptr + *elfsz, 0, i);
870 *elfsz = roundup(*elfsz, PAGE_SIZE);
871
872
873 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
874
875
876
877
878 elfnotes_orig_sz = phdr.p_memsz;
879
880 return 0;
881}
882
883
884
885
886
887
888
889
890
891
892static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
893{
894 int i, rc=0;
895 Elf32_Phdr *phdr_ptr;
896 Elf32_Nhdr *nhdr_ptr;
897
898 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
899 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
900 void *notes_section;
901 u64 offset, max_sz, sz, real_sz = 0;
902 if (phdr_ptr->p_type != PT_NOTE)
903 continue;
904 max_sz = phdr_ptr->p_memsz;
905 offset = phdr_ptr->p_offset;
906 notes_section = kmalloc(max_sz, GFP_KERNEL);
907 if (!notes_section)
908 return -ENOMEM;
909 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
910 if (rc < 0) {
911 kfree(notes_section);
912 return rc;
913 }
914 nhdr_ptr = notes_section;
915 while (nhdr_ptr->n_namesz != 0) {
916 sz = sizeof(Elf32_Nhdr) +
917 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
918 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
919 if ((real_sz + sz) > max_sz) {
920 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
921 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
922 break;
923 }
924 real_sz += sz;
925 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
926 }
927 kfree(notes_section);
928 phdr_ptr->p_memsz = real_sz;
929 if (real_sz == 0) {
930 pr_warn("Warning: Zero PT_NOTE entries found\n");
931 }
932 }
933
934 return 0;
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
956 int *nr_ptnote, u64 *sz_ptnote)
957{
958 int i;
959 Elf32_Phdr *phdr_ptr;
960
961 *nr_ptnote = *sz_ptnote = 0;
962
963 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
964 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
965 if (phdr_ptr->p_type != PT_NOTE)
966 continue;
967 *nr_ptnote += 1;
968 *sz_ptnote += phdr_ptr->p_memsz;
969 }
970
971 return 0;
972}
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
991{
992 int i, rc=0;
993 Elf32_Phdr *phdr_ptr;
994
995 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
996
997 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
998 u64 offset;
999 if (phdr_ptr->p_type != PT_NOTE)
1000 continue;
1001 offset = phdr_ptr->p_offset;
1002 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1003 &offset);
1004 if (rc < 0)
1005 return rc;
1006 notes_buf += phdr_ptr->p_memsz;
1007 }
1008
1009 return 0;
1010}
1011
1012
1013static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1014 char **notes_buf, size_t *notes_sz)
1015{
1016 int i, nr_ptnote=0, rc=0;
1017 char *tmp;
1018 Elf32_Ehdr *ehdr_ptr;
1019 Elf32_Phdr phdr;
1020 u64 phdr_sz = 0, note_off;
1021
1022 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1023
1024 rc = update_note_header_size_elf32(ehdr_ptr);
1025 if (rc < 0)
1026 return rc;
1027
1028 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1029 if (rc < 0)
1030 return rc;
1031
1032 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1033 *notes_buf = vmcore_alloc_buf(*notes_sz);
1034 if (!*notes_buf)
1035 return -ENOMEM;
1036
1037 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1038 if (rc < 0)
1039 return rc;
1040
1041
1042 phdr.p_type = PT_NOTE;
1043 phdr.p_flags = 0;
1044 note_off = sizeof(Elf32_Ehdr) +
1045 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1046 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1047 phdr.p_vaddr = phdr.p_paddr = 0;
1048 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1049 phdr.p_align = 0;
1050
1051
1052 tmp = elfptr + sizeof(Elf32_Ehdr);
1053 memcpy(tmp, &phdr, sizeof(phdr));
1054 tmp += sizeof(phdr);
1055
1056
1057 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1058 *elfsz = *elfsz - i;
1059 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1060 memset(elfptr + *elfsz, 0, i);
1061 *elfsz = roundup(*elfsz, PAGE_SIZE);
1062
1063
1064 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1065
1066
1067
1068
1069 elfnotes_orig_sz = phdr.p_memsz;
1070
1071 return 0;
1072}
1073
1074
1075
1076static int __init process_ptload_program_headers_elf64(char *elfptr,
1077 size_t elfsz,
1078 size_t elfnotes_sz,
1079 struct list_head *vc_list)
1080{
1081 int i;
1082 Elf64_Ehdr *ehdr_ptr;
1083 Elf64_Phdr *phdr_ptr;
1084 loff_t vmcore_off;
1085 struct vmcore *new;
1086
1087 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1088 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1089
1090
1091 vmcore_off = elfsz + elfnotes_sz;
1092
1093 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1094 u64 paddr, start, end, size;
1095
1096 if (phdr_ptr->p_type != PT_LOAD)
1097 continue;
1098
1099 paddr = phdr_ptr->p_offset;
1100 start = rounddown(paddr, PAGE_SIZE);
1101 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1102 size = end - start;
1103
1104
1105 new = get_new_element();
1106 if (!new)
1107 return -ENOMEM;
1108 new->paddr = start;
1109 new->size = size;
1110 list_add_tail(&new->list, vc_list);
1111
1112
1113 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1114 vmcore_off = vmcore_off + size;
1115 }
1116 return 0;
1117}
1118
1119static int __init process_ptload_program_headers_elf32(char *elfptr,
1120 size_t elfsz,
1121 size_t elfnotes_sz,
1122 struct list_head *vc_list)
1123{
1124 int i;
1125 Elf32_Ehdr *ehdr_ptr;
1126 Elf32_Phdr *phdr_ptr;
1127 loff_t vmcore_off;
1128 struct vmcore *new;
1129
1130 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1131 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1132
1133
1134 vmcore_off = elfsz + elfnotes_sz;
1135
1136 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1137 u64 paddr, start, end, size;
1138
1139 if (phdr_ptr->p_type != PT_LOAD)
1140 continue;
1141
1142 paddr = phdr_ptr->p_offset;
1143 start = rounddown(paddr, PAGE_SIZE);
1144 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1145 size = end - start;
1146
1147
1148 new = get_new_element();
1149 if (!new)
1150 return -ENOMEM;
1151 new->paddr = start;
1152 new->size = size;
1153 list_add_tail(&new->list, vc_list);
1154
1155
1156 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1157 vmcore_off = vmcore_off + size;
1158 }
1159 return 0;
1160}
1161
1162
1163static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1164 struct list_head *vc_list)
1165{
1166 loff_t vmcore_off;
1167 struct vmcore *m;
1168
1169
1170 vmcore_off = elfsz + elfnotes_sz;
1171
1172 list_for_each_entry(m, vc_list, list) {
1173 m->offset = vmcore_off;
1174 vmcore_off += m->size;
1175 }
1176}
1177
1178static void free_elfcorebuf(void)
1179{
1180 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1181 elfcorebuf = NULL;
1182 vfree(elfnotes_buf);
1183 elfnotes_buf = NULL;
1184}
1185
1186static int __init parse_crash_elf64_headers(void)
1187{
1188 int rc=0;
1189 Elf64_Ehdr ehdr;
1190 u64 addr;
1191
1192 addr = elfcorehdr_addr;
1193
1194
1195 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1196 if (rc < 0)
1197 return rc;
1198
1199
1200 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1201 (ehdr.e_type != ET_CORE) ||
1202 !vmcore_elf64_check_arch(&ehdr) ||
1203 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1204 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1205 ehdr.e_version != EV_CURRENT ||
1206 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1207 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1208 ehdr.e_phnum == 0) {
1209 pr_warn("Warning: Core image elf header is not sane\n");
1210 return -EINVAL;
1211 }
1212
1213
1214 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1215 ehdr.e_phnum * sizeof(Elf64_Phdr);
1216 elfcorebuf_sz = elfcorebuf_sz_orig;
1217 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1218 get_order(elfcorebuf_sz_orig));
1219 if (!elfcorebuf)
1220 return -ENOMEM;
1221 addr = elfcorehdr_addr;
1222 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1223 if (rc < 0)
1224 goto fail;
1225
1226
1227 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1228 &elfnotes_buf, &elfnotes_sz);
1229 if (rc)
1230 goto fail;
1231 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1232 elfnotes_sz, &vmcore_list);
1233 if (rc)
1234 goto fail;
1235 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1236 return 0;
1237fail:
1238 free_elfcorebuf();
1239 return rc;
1240}
1241
1242static int __init parse_crash_elf32_headers(void)
1243{
1244 int rc=0;
1245 Elf32_Ehdr ehdr;
1246 u64 addr;
1247
1248 addr = elfcorehdr_addr;
1249
1250
1251 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1252 if (rc < 0)
1253 return rc;
1254
1255
1256 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1257 (ehdr.e_type != ET_CORE) ||
1258 !vmcore_elf32_check_arch(&ehdr) ||
1259 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1260 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1261 ehdr.e_version != EV_CURRENT ||
1262 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1263 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1264 ehdr.e_phnum == 0) {
1265 pr_warn("Warning: Core image elf header is not sane\n");
1266 return -EINVAL;
1267 }
1268
1269
1270 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1271 elfcorebuf_sz = elfcorebuf_sz_orig;
1272 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1273 get_order(elfcorebuf_sz_orig));
1274 if (!elfcorebuf)
1275 return -ENOMEM;
1276 addr = elfcorehdr_addr;
1277 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1278 if (rc < 0)
1279 goto fail;
1280
1281
1282 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1283 &elfnotes_buf, &elfnotes_sz);
1284 if (rc)
1285 goto fail;
1286 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1287 elfnotes_sz, &vmcore_list);
1288 if (rc)
1289 goto fail;
1290 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1291 return 0;
1292fail:
1293 free_elfcorebuf();
1294 return rc;
1295}
1296
1297static int __init parse_crash_elf_headers(void)
1298{
1299 unsigned char e_ident[EI_NIDENT];
1300 u64 addr;
1301 int rc=0;
1302
1303 addr = elfcorehdr_addr;
1304 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1305 if (rc < 0)
1306 return rc;
1307 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1308 pr_warn("Warning: Core image elf header not found\n");
1309 return -EINVAL;
1310 }
1311
1312 if (e_ident[EI_CLASS] == ELFCLASS64) {
1313 rc = parse_crash_elf64_headers();
1314 if (rc)
1315 return rc;
1316 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1317 rc = parse_crash_elf32_headers();
1318 if (rc)
1319 return rc;
1320 } else {
1321 pr_warn("Warning: Core image elf header is not sane\n");
1322 return -EINVAL;
1323 }
1324
1325
1326 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1327 &vmcore_list);
1328
1329 return 0;
1330}
1331
1332#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1343 u32 size)
1344{
1345 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1346
1347 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1348 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1349 vdd_hdr->n_type = NT_VMCOREDD;
1350
1351 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1352 sizeof(vdd_hdr->name));
1353 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1366 size_t vmcoreddsz)
1367{
1368 unsigned char *e_ident = (unsigned char *)elfptr;
1369 u64 start, end, size;
1370 loff_t vmcore_off;
1371 u32 i;
1372
1373 vmcore_off = elfcorebuf_sz + elfnotesz;
1374
1375 if (e_ident[EI_CLASS] == ELFCLASS64) {
1376 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1377 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1378
1379
1380 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1381 if (phdr->p_type == PT_NOTE) {
1382
1383 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1384 phdr->p_filesz = phdr->p_memsz;
1385 continue;
1386 }
1387
1388 start = rounddown(phdr->p_offset, PAGE_SIZE);
1389 end = roundup(phdr->p_offset + phdr->p_memsz,
1390 PAGE_SIZE);
1391 size = end - start;
1392 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1393 vmcore_off += size;
1394 }
1395 } else {
1396 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1397 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1398
1399
1400 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1401 if (phdr->p_type == PT_NOTE) {
1402
1403 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1404 phdr->p_filesz = phdr->p_memsz;
1405 continue;
1406 }
1407
1408 start = rounddown(phdr->p_offset, PAGE_SIZE);
1409 end = roundup(phdr->p_offset + phdr->p_memsz,
1410 PAGE_SIZE);
1411 size = end - start;
1412 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1413 vmcore_off += size;
1414 }
1415 }
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427static void vmcoredd_update_size(size_t dump_size)
1428{
1429 vmcoredd_orig_sz += dump_size;
1430 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1431 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1432 vmcoredd_orig_sz);
1433
1434
1435 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1436
1437 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1438 &vmcore_list);
1439 proc_vmcore->size = vmcore_size;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int vmcore_add_device_dump(struct vmcoredd_data *data)
1451{
1452 struct vmcoredd_node *dump;
1453 void *buf = NULL;
1454 size_t data_size;
1455 int ret;
1456
1457 if (vmcoredd_disabled) {
1458 pr_err_once("Device dump is disabled\n");
1459 return -EINVAL;
1460 }
1461
1462 if (!data || !strlen(data->dump_name) ||
1463 !data->vmcoredd_callback || !data->size)
1464 return -EINVAL;
1465
1466 dump = vzalloc(sizeof(*dump));
1467 if (!dump) {
1468 ret = -ENOMEM;
1469 goto out_err;
1470 }
1471
1472
1473 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1474 PAGE_SIZE);
1475
1476
1477 buf = vmcore_alloc_buf(data_size);
1478 if (!buf) {
1479 ret = -ENOMEM;
1480 goto out_err;
1481 }
1482
1483 vmcoredd_write_header(buf, data, data_size -
1484 sizeof(struct vmcoredd_header));
1485
1486
1487 ret = data->vmcoredd_callback(data, buf +
1488 sizeof(struct vmcoredd_header));
1489 if (ret)
1490 goto out_err;
1491
1492 dump->buf = buf;
1493 dump->size = data_size;
1494
1495
1496 mutex_lock(&vmcoredd_mutex);
1497 list_add_tail(&dump->list, &vmcoredd_list);
1498 mutex_unlock(&vmcoredd_mutex);
1499
1500 vmcoredd_update_size(data_size);
1501 return 0;
1502
1503out_err:
1504 if (buf)
1505 vfree(buf);
1506
1507 if (dump)
1508 vfree(dump);
1509
1510 return ret;
1511}
1512EXPORT_SYMBOL(vmcore_add_device_dump);
1513#endif
1514
1515
1516static void vmcore_free_device_dumps(void)
1517{
1518#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1519 mutex_lock(&vmcoredd_mutex);
1520 while (!list_empty(&vmcoredd_list)) {
1521 struct vmcoredd_node *dump;
1522
1523 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1524 list);
1525 list_del(&dump->list);
1526 vfree(dump->buf);
1527 vfree(dump);
1528 }
1529 mutex_unlock(&vmcoredd_mutex);
1530#endif
1531}
1532
1533
1534static int __init vmcore_init(void)
1535{
1536 int rc = 0;
1537
1538
1539 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1540 if (rc)
1541 return rc;
1542
1543
1544
1545
1546 if (!(is_vmcore_usable()))
1547 return rc;
1548 rc = parse_crash_elf_headers();
1549 if (rc) {
1550 pr_warn("Kdump: vmcore not initialized\n");
1551 return rc;
1552 }
1553 elfcorehdr_free(elfcorehdr_addr);
1554 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1555
1556 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1557 if (proc_vmcore)
1558 proc_vmcore->size = vmcore_size;
1559 return 0;
1560}
1561fs_initcall(vmcore_init);
1562
1563
1564void vmcore_cleanup(void)
1565{
1566 if (proc_vmcore) {
1567 proc_remove(proc_vmcore);
1568 proc_vmcore = NULL;
1569 }
1570
1571
1572 while (!list_empty(&vmcore_list)) {
1573 struct vmcore *m;
1574
1575 m = list_first_entry(&vmcore_list, struct vmcore, list);
1576 list_del(&m->list);
1577 kfree(m);
1578 }
1579 free_elfcorebuf();
1580
1581
1582 vmcore_free_device_dumps();
1583}
1584