1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/vmalloc.h>
25#include <linux/pagemap.h>
26#include <linux/moduleparam.h>
27#include <linux/mem_encrypt.h>
28#include <asm/pgtable.h>
29#include <asm/uaccess.h>
30#include <asm/io.h>
31#include "internal.h"
32
33
34
35
36static LIST_HEAD(vmcore_list);
37
38
39static char *elfcorebuf;
40static size_t elfcorebuf_sz;
41static size_t elfcorebuf_sz_orig;
42
43static char *elfnotes_buf;
44static size_t elfnotes_sz;
45
46static size_t elfnotes_orig_sz;
47
48
49static u64 vmcore_size;
50
51static struct proc_dir_entry *proc_vmcore = NULL;
52
53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54
55static LIST_HEAD(vmcoredd_list);
56static DEFINE_MUTEX(vmcoredd_mutex);
57
58static bool vmcoredd_disabled;
59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60#endif
61
62
63static size_t vmcoredd_orig_sz;
64
65
66
67
68
69static int (*oldmem_pfn_is_ram)(unsigned long pfn);
70
71int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
72{
73 if (oldmem_pfn_is_ram)
74 return -EBUSY;
75 oldmem_pfn_is_ram = fn;
76 return 0;
77}
78EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
79
80void unregister_oldmem_pfn_is_ram(void)
81{
82 oldmem_pfn_is_ram = NULL;
83 wmb();
84}
85EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
86
87static int pfn_is_ram(unsigned long pfn)
88{
89 int (*fn)(unsigned long pfn);
90
91 int ret = 1;
92
93
94
95
96
97
98 fn = oldmem_pfn_is_ram;
99 if (fn)
100 ret = fn(pfn);
101
102 return ret;
103}
104
105
106ssize_t read_from_oldmem(char *buf, size_t count,
107 u64 *ppos, int userbuf,
108 bool encrypted)
109{
110 unsigned long pfn, offset;
111 size_t nr_bytes;
112 ssize_t read = 0, tmp;
113
114 if (!count)
115 return 0;
116
117 offset = (unsigned long)(*ppos % PAGE_SIZE);
118 pfn = (unsigned long)(*ppos / PAGE_SIZE);
119
120 do {
121 if (count > (PAGE_SIZE - offset))
122 nr_bytes = PAGE_SIZE - offset;
123 else
124 nr_bytes = count;
125
126
127 if (pfn_is_ram(pfn) == 0)
128 memset(buf, 0, nr_bytes);
129 else {
130 if (encrypted)
131 tmp = copy_oldmem_page_encrypted(pfn, buf,
132 nr_bytes,
133 offset,
134 userbuf);
135 else
136 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
137 offset, userbuf);
138
139 if (tmp < 0)
140 return tmp;
141 }
142 *ppos += nr_bytes;
143 count -= nr_bytes;
144 buf += nr_bytes;
145 read += nr_bytes;
146 ++pfn;
147 offset = 0;
148 } while (count);
149
150 return read;
151}
152
153
154
155
156int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
157{
158 return 0;
159}
160
161
162
163
164void __weak elfcorehdr_free(unsigned long long addr)
165{}
166
167
168
169
170ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
171{
172 return read_from_oldmem(buf, count, ppos, 0, false);
173}
174
175
176
177
178ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
179{
180 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
181}
182
183
184
185
186int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
187 unsigned long from, unsigned long pfn,
188 unsigned long size, pgprot_t prot)
189{
190 prot = pgprot_encrypted(prot);
191 return remap_pfn_range(vma, from, pfn, size, prot);
192}
193
194
195
196
197ssize_t __weak
198copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
199 unsigned long offset, int userbuf)
200{
201 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
202}
203
204
205
206
207static int copy_to(void *target, void *src, size_t size, int userbuf)
208{
209 if (userbuf) {
210 if (copy_to_user((char __user *) target, src, size))
211 return -EFAULT;
212 } else {
213 memcpy(target, src, size);
214 }
215 return 0;
216}
217
218#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
219static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
220{
221 struct vmcoredd_node *dump;
222 u64 offset = 0;
223 int ret = 0;
224 size_t tsz;
225 char *buf;
226
227 mutex_lock(&vmcoredd_mutex);
228 list_for_each_entry(dump, &vmcoredd_list, list) {
229 if (start < offset + dump->size) {
230 tsz = min(offset + (u64)dump->size - start, (u64)size);
231 buf = dump->buf + start - offset;
232 if (copy_to(dst, buf, tsz, userbuf)) {
233 ret = -EFAULT;
234 goto out_unlock;
235 }
236
237 size -= tsz;
238 start += tsz;
239 dst += tsz;
240
241
242 if (!size)
243 goto out_unlock;
244 }
245 offset += dump->size;
246 }
247
248out_unlock:
249 mutex_unlock(&vmcoredd_mutex);
250 return ret;
251}
252
253static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
254 u64 start, size_t size)
255{
256 struct vmcoredd_node *dump;
257 u64 offset = 0;
258 int ret = 0;
259 size_t tsz;
260 char *buf;
261
262 mutex_lock(&vmcoredd_mutex);
263 list_for_each_entry(dump, &vmcoredd_list, list) {
264 if (start < offset + dump->size) {
265 tsz = min(offset + (u64)dump->size - start, (u64)size);
266 buf = dump->buf + start - offset;
267 if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
268 ret = -EFAULT;
269 goto out_unlock;
270 }
271
272 size -= tsz;
273 start += tsz;
274 dst += tsz;
275
276
277 if (!size)
278 goto out_unlock;
279 }
280 offset += dump->size;
281 }
282
283out_unlock:
284 mutex_unlock(&vmcoredd_mutex);
285 return ret;
286}
287#endif
288
289
290
291
292static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
293 int userbuf)
294{
295 ssize_t acc = 0, tmp;
296 size_t tsz;
297 u64 start;
298 struct vmcore *m = NULL;
299
300 if (buflen == 0 || *fpos >= vmcore_size)
301 return 0;
302
303
304 if (buflen > vmcore_size - *fpos)
305 buflen = vmcore_size - *fpos;
306
307
308 if (*fpos < elfcorebuf_sz) {
309 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
310 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
311 return -EFAULT;
312 buflen -= tsz;
313 *fpos += tsz;
314 buffer += tsz;
315 acc += tsz;
316
317
318 if (buflen == 0)
319 return acc;
320 }
321
322
323 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
324 void *kaddr;
325
326
327
328
329
330
331
332
333
334
335#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
336
337 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
338 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
339 (size_t)*fpos, buflen);
340 start = *fpos - elfcorebuf_sz;
341 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
342 return -EFAULT;
343
344 buflen -= tsz;
345 *fpos += tsz;
346 buffer += tsz;
347 acc += tsz;
348
349
350 if (!buflen)
351 return acc;
352 }
353#endif
354
355
356 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
357 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
358 if (copy_to(buffer, kaddr, tsz, userbuf))
359 return -EFAULT;
360
361 buflen -= tsz;
362 *fpos += tsz;
363 buffer += tsz;
364 acc += tsz;
365
366
367 if (buflen == 0)
368 return acc;
369 }
370
371 list_for_each_entry(m, &vmcore_list, list) {
372 if (*fpos < m->offset + m->size) {
373 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
374 start = m->paddr + *fpos - m->offset;
375 tmp = read_from_oldmem(buffer, tsz, &start,
376 userbuf, mem_encrypt_active());
377 if (tmp < 0)
378 return tmp;
379 buflen -= tsz;
380 *fpos += tsz;
381 buffer += tsz;
382 acc += tsz;
383
384
385 if (buflen == 0)
386 return acc;
387 }
388 }
389
390 return acc;
391}
392
393static ssize_t read_vmcore(struct file *file, char __user *buffer,
394 size_t buflen, loff_t *fpos)
395{
396 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
397}
398
399
400
401
402
403
404
405
406static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
407{
408#ifdef CONFIG_S390
409 struct address_space *mapping = vma->vm_file->f_mapping;
410 pgoff_t index = vmf->pgoff;
411 struct page *page;
412 loff_t offset;
413 char *buf;
414 int rc;
415
416 page = find_or_create_page(mapping, index, GFP_KERNEL);
417 if (!page)
418 return VM_FAULT_OOM;
419 if (!PageUptodate(page)) {
420 offset = (loff_t) index << PAGE_CACHE_SHIFT;
421 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
422 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
423 if (rc < 0) {
424 unlock_page(page);
425 page_cache_release(page);
426 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
427 }
428 SetPageUptodate(page);
429 }
430 unlock_page(page);
431 vmf->page = page;
432 return 0;
433#else
434 return VM_FAULT_SIGBUS;
435#endif
436}
437
438static const struct vm_operations_struct vmcore_mmap_ops = {
439 .fault = mmap_vmcore_fault,
440};
441
442
443
444
445
446
447
448
449
450
451
452static inline char *vmcore_alloc_buf(size_t size)
453{
454#ifdef CONFIG_MMU
455 return vmalloc_user(size);
456#else
457 return vzalloc(size);
458#endif
459}
460
461
462
463
464
465
466
467
468#ifdef CONFIG_MMU
469
470
471
472
473
474
475
476
477
478
479
480
481static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
482 unsigned long from, unsigned long pfn,
483 unsigned long size, pgprot_t prot)
484{
485 unsigned long map_size;
486 unsigned long pos_start, pos_end, pos;
487 unsigned long zeropage_pfn = my_zero_pfn(0);
488 size_t len = 0;
489
490 pos_start = pfn;
491 pos_end = pfn + (size >> PAGE_SHIFT);
492
493 for (pos = pos_start; pos < pos_end; ++pos) {
494 if (!pfn_is_ram(pos)) {
495
496
497
498
499
500 if (pos > pos_start) {
501
502 map_size = (pos - pos_start) << PAGE_SHIFT;
503 if (remap_oldmem_pfn_range(vma, from + len,
504 pos_start, map_size,
505 prot))
506 goto fail;
507 len += map_size;
508 }
509
510 if (remap_oldmem_pfn_range(vma, from + len,
511 zeropage_pfn,
512 PAGE_SIZE, prot))
513 goto fail;
514 len += PAGE_SIZE;
515 pos_start = pos + 1;
516 }
517 }
518 if (pos > pos_start) {
519
520 map_size = (pos - pos_start) << PAGE_SHIFT;
521 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
522 map_size, prot))
523 goto fail;
524 }
525 return 0;
526fail:
527 do_munmap(vma->vm_mm, from, len, NULL);
528 return -EAGAIN;
529}
530
531static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
532 unsigned long from, unsigned long pfn,
533 unsigned long size, pgprot_t prot)
534{
535
536
537
538
539 if (oldmem_pfn_is_ram)
540 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
541 else
542 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
543}
544
545static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
546{
547 size_t size = vma->vm_end - vma->vm_start;
548 u64 start, end, len, tsz;
549 struct vmcore *m;
550
551 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
552 end = start + size;
553
554 if (size > vmcore_size || end > vmcore_size)
555 return -EINVAL;
556
557 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
558 return -EPERM;
559
560 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
561 vma->vm_flags |= VM_MIXEDMAP;
562 vma->vm_ops = &vmcore_mmap_ops;
563
564 len = 0;
565
566 if (start < elfcorebuf_sz) {
567 u64 pfn;
568
569 tsz = min(elfcorebuf_sz - (size_t)start, size);
570 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
571 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
572 vma->vm_page_prot))
573 return -EAGAIN;
574 size -= tsz;
575 start += tsz;
576 len += tsz;
577
578 if (size == 0)
579 return 0;
580 }
581
582 if (start < elfcorebuf_sz + elfnotes_sz) {
583 void *kaddr;
584
585
586
587
588
589
590
591
592
593
594
595
596#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
597
598 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
599 u64 start_off;
600
601 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
602 (size_t)start, size);
603 start_off = start - elfcorebuf_sz;
604 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
605 start_off, tsz))
606 goto fail;
607
608 size -= tsz;
609 start += tsz;
610 len += tsz;
611
612
613 if (!size)
614 return 0;
615 }
616#endif
617
618
619 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
620 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
621 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
622 kaddr, tsz))
623 goto fail;
624
625 size -= tsz;
626 start += tsz;
627 len += tsz;
628
629 if (size == 0)
630 return 0;
631 }
632
633 list_for_each_entry(m, &vmcore_list, list) {
634 if (start < m->offset + m->size) {
635 u64 paddr = 0;
636
637 tsz = min_t(size_t, m->offset + m->size - start, size);
638 paddr = m->paddr + start - m->offset;
639 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
640 paddr >> PAGE_SHIFT, tsz,
641 vma->vm_page_prot))
642 goto fail;
643 size -= tsz;
644 start += tsz;
645 len += tsz;
646
647 if (size == 0)
648 return 0;
649 }
650 }
651
652 return 0;
653fail:
654 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
655 return -EAGAIN;
656}
657#else
658static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
659{
660 return -ENOSYS;
661}
662#endif
663
664static const struct file_operations proc_vmcore_operations = {
665 .read = read_vmcore,
666 .llseek = default_llseek,
667 .mmap = mmap_vmcore,
668};
669
670static struct vmcore* __init get_new_element(void)
671{
672 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
673}
674
675static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
676 struct list_head *vc_list)
677{
678 u64 size;
679 struct vmcore *m;
680
681 size = elfsz + elfnotesegsz;
682 list_for_each_entry(m, vc_list, list) {
683 size += m->size;
684 }
685 return size;
686}
687
688
689
690
691
692
693
694
695
696
697static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
698{
699 int i, rc=0;
700 Elf64_Phdr *phdr_ptr;
701 Elf64_Nhdr *nhdr_ptr;
702
703 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
704 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
705 void *notes_section;
706 u64 offset, max_sz, sz, real_sz = 0;
707 if (phdr_ptr->p_type != PT_NOTE)
708 continue;
709 max_sz = phdr_ptr->p_memsz;
710 offset = phdr_ptr->p_offset;
711 notes_section = kmalloc(max_sz, GFP_KERNEL);
712 if (!notes_section)
713 return -ENOMEM;
714 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
715 if (rc < 0) {
716 kfree(notes_section);
717 return rc;
718 }
719 nhdr_ptr = notes_section;
720 while (nhdr_ptr->n_namesz != 0) {
721 sz = sizeof(Elf64_Nhdr) +
722 ((nhdr_ptr->n_namesz + 3) & ~3) +
723 ((nhdr_ptr->n_descsz + 3) & ~3);
724 if ((real_sz + sz) > max_sz) {
725 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
726 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
727 break;
728 }
729 real_sz += sz;
730 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
731 }
732 kfree(notes_section);
733 phdr_ptr->p_memsz = real_sz;
734 if (real_sz == 0) {
735 pr_warn("Warning: Zero PT_NOTE entries found\n");
736 }
737 }
738
739 return 0;
740}
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
761 int *nr_ptnote, u64 *sz_ptnote)
762{
763 int i;
764 Elf64_Phdr *phdr_ptr;
765
766 *nr_ptnote = *sz_ptnote = 0;
767
768 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
769 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
770 if (phdr_ptr->p_type != PT_NOTE)
771 continue;
772 *nr_ptnote += 1;
773 *sz_ptnote += phdr_ptr->p_memsz;
774 }
775
776 return 0;
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
796{
797 int i, rc=0;
798 Elf64_Phdr *phdr_ptr;
799
800 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
801
802 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
803 u64 offset;
804 if (phdr_ptr->p_type != PT_NOTE)
805 continue;
806 offset = phdr_ptr->p_offset;
807 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
808 &offset);
809 if (rc < 0)
810 return rc;
811 notes_buf += phdr_ptr->p_memsz;
812 }
813
814 return 0;
815}
816
817
818static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
819 char **notes_buf, size_t *notes_sz)
820{
821 int i, nr_ptnote=0, rc=0;
822 char *tmp;
823 Elf64_Ehdr *ehdr_ptr;
824 Elf64_Phdr phdr;
825 u64 phdr_sz = 0, note_off;
826
827 ehdr_ptr = (Elf64_Ehdr *)elfptr;
828
829 rc = update_note_header_size_elf64(ehdr_ptr);
830 if (rc < 0)
831 return rc;
832
833 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
834 if (rc < 0)
835 return rc;
836
837 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
838 *notes_buf = vmcore_alloc_buf(*notes_sz);
839 if (!*notes_buf)
840 return -ENOMEM;
841
842 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
843 if (rc < 0)
844 return rc;
845
846
847 phdr.p_type = PT_NOTE;
848 phdr.p_flags = 0;
849 note_off = sizeof(Elf64_Ehdr) +
850 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
851 phdr.p_offset = roundup(note_off, PAGE_SIZE);
852 phdr.p_vaddr = phdr.p_paddr = 0;
853 phdr.p_filesz = phdr.p_memsz = phdr_sz;
854 phdr.p_align = 0;
855
856
857 tmp = elfptr + sizeof(Elf64_Ehdr);
858 memcpy(tmp, &phdr, sizeof(phdr));
859 tmp += sizeof(phdr);
860
861
862 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
863 *elfsz = *elfsz - i;
864 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
865 memset(elfptr + *elfsz, 0, i);
866 *elfsz = roundup(*elfsz, PAGE_SIZE);
867
868
869 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
870
871
872
873
874 elfnotes_orig_sz = phdr.p_memsz;
875
876 return 0;
877}
878
879
880
881
882
883
884
885
886
887
888static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
889{
890 int i, rc=0;
891 Elf32_Phdr *phdr_ptr;
892 Elf32_Nhdr *nhdr_ptr;
893
894 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
895 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
896 void *notes_section;
897 u64 offset, max_sz, sz, real_sz = 0;
898 if (phdr_ptr->p_type != PT_NOTE)
899 continue;
900 max_sz = phdr_ptr->p_memsz;
901 offset = phdr_ptr->p_offset;
902 notes_section = kmalloc(max_sz, GFP_KERNEL);
903 if (!notes_section)
904 return -ENOMEM;
905 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
906 if (rc < 0) {
907 kfree(notes_section);
908 return rc;
909 }
910 nhdr_ptr = notes_section;
911 while (nhdr_ptr->n_namesz != 0) {
912 sz = sizeof(Elf32_Nhdr) +
913 ((nhdr_ptr->n_namesz + 3) & ~3) +
914 ((nhdr_ptr->n_descsz + 3) & ~3);
915 if ((real_sz + sz) > max_sz) {
916 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
917 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
918 break;
919 }
920 real_sz += sz;
921 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
922 }
923 kfree(notes_section);
924 phdr_ptr->p_memsz = real_sz;
925 if (real_sz == 0) {
926 pr_warn("Warning: Zero PT_NOTE entries found\n");
927 }
928 }
929
930 return 0;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
952 int *nr_ptnote, u64 *sz_ptnote)
953{
954 int i;
955 Elf32_Phdr *phdr_ptr;
956
957 *nr_ptnote = *sz_ptnote = 0;
958
959 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
960 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
961 if (phdr_ptr->p_type != PT_NOTE)
962 continue;
963 *nr_ptnote += 1;
964 *sz_ptnote += phdr_ptr->p_memsz;
965 }
966
967 return 0;
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
987{
988 int i, rc=0;
989 Elf32_Phdr *phdr_ptr;
990
991 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
992
993 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
994 u64 offset;
995 if (phdr_ptr->p_type != PT_NOTE)
996 continue;
997 offset = phdr_ptr->p_offset;
998 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
999 &offset);
1000 if (rc < 0)
1001 return rc;
1002 notes_buf += phdr_ptr->p_memsz;
1003 }
1004
1005 return 0;
1006}
1007
1008
1009static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1010 char **notes_buf, size_t *notes_sz)
1011{
1012 int i, nr_ptnote=0, rc=0;
1013 char *tmp;
1014 Elf32_Ehdr *ehdr_ptr;
1015 Elf32_Phdr phdr;
1016 u64 phdr_sz = 0, note_off;
1017
1018 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1019
1020 rc = update_note_header_size_elf32(ehdr_ptr);
1021 if (rc < 0)
1022 return rc;
1023
1024 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1025 if (rc < 0)
1026 return rc;
1027
1028 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1029 *notes_buf = vmcore_alloc_buf(*notes_sz);
1030 if (!*notes_buf)
1031 return -ENOMEM;
1032
1033 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1034 if (rc < 0)
1035 return rc;
1036
1037
1038 phdr.p_type = PT_NOTE;
1039 phdr.p_flags = 0;
1040 note_off = sizeof(Elf32_Ehdr) +
1041 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1042 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1043 phdr.p_vaddr = phdr.p_paddr = 0;
1044 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1045 phdr.p_align = 0;
1046
1047
1048 tmp = elfptr + sizeof(Elf32_Ehdr);
1049 memcpy(tmp, &phdr, sizeof(phdr));
1050 tmp += sizeof(phdr);
1051
1052
1053 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1054 *elfsz = *elfsz - i;
1055 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1056 memset(elfptr + *elfsz, 0, i);
1057 *elfsz = roundup(*elfsz, PAGE_SIZE);
1058
1059
1060 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1061
1062
1063
1064
1065 elfnotes_orig_sz = phdr.p_memsz;
1066
1067 return 0;
1068}
1069
1070
1071
1072static int __init process_ptload_program_headers_elf64(char *elfptr,
1073 size_t elfsz,
1074 size_t elfnotes_sz,
1075 struct list_head *vc_list)
1076{
1077 int i;
1078 Elf64_Ehdr *ehdr_ptr;
1079 Elf64_Phdr *phdr_ptr;
1080 loff_t vmcore_off;
1081 struct vmcore *new;
1082
1083 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1084 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1085
1086
1087 vmcore_off = elfsz + elfnotes_sz;
1088
1089 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1090 u64 paddr, start, end, size;
1091
1092 if (phdr_ptr->p_type != PT_LOAD)
1093 continue;
1094
1095 paddr = phdr_ptr->p_offset;
1096 start = rounddown(paddr, PAGE_SIZE);
1097 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1098 size = end - start;
1099
1100
1101 new = get_new_element();
1102 if (!new)
1103 return -ENOMEM;
1104 new->paddr = start;
1105 new->size = size;
1106 list_add_tail(&new->list, vc_list);
1107
1108
1109 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1110 vmcore_off = vmcore_off + size;
1111 }
1112 return 0;
1113}
1114
1115static int __init process_ptload_program_headers_elf32(char *elfptr,
1116 size_t elfsz,
1117 size_t elfnotes_sz,
1118 struct list_head *vc_list)
1119{
1120 int i;
1121 Elf32_Ehdr *ehdr_ptr;
1122 Elf32_Phdr *phdr_ptr;
1123 loff_t vmcore_off;
1124 struct vmcore *new;
1125
1126 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1127 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1128
1129
1130 vmcore_off = elfsz + elfnotes_sz;
1131
1132 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1133 u64 paddr, start, end, size;
1134
1135 if (phdr_ptr->p_type != PT_LOAD)
1136 continue;
1137
1138 paddr = phdr_ptr->p_offset;
1139 start = rounddown(paddr, PAGE_SIZE);
1140 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1141 size = end - start;
1142
1143
1144 new = get_new_element();
1145 if (!new)
1146 return -ENOMEM;
1147 new->paddr = start;
1148 new->size = size;
1149 list_add_tail(&new->list, vc_list);
1150
1151
1152 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1153 vmcore_off = vmcore_off + size;
1154 }
1155 return 0;
1156}
1157
1158
1159static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1160 struct list_head *vc_list)
1161{
1162 loff_t vmcore_off;
1163 struct vmcore *m;
1164
1165
1166 vmcore_off = elfsz + elfnotes_sz;
1167
1168 list_for_each_entry(m, vc_list, list) {
1169 m->offset = vmcore_off;
1170 vmcore_off += m->size;
1171 }
1172}
1173
1174static void free_elfcorebuf(void)
1175{
1176 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1177 elfcorebuf = NULL;
1178 vfree(elfnotes_buf);
1179 elfnotes_buf = NULL;
1180}
1181
1182static int __init parse_crash_elf64_headers(void)
1183{
1184 int rc=0;
1185 Elf64_Ehdr ehdr;
1186 u64 addr;
1187
1188 addr = elfcorehdr_addr;
1189
1190
1191 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1192 if (rc < 0)
1193 return rc;
1194
1195
1196 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1197 (ehdr.e_type != ET_CORE) ||
1198 !vmcore_elf64_check_arch(&ehdr) ||
1199 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1200 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1201 ehdr.e_version != EV_CURRENT ||
1202 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1203 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1204 ehdr.e_phnum == 0) {
1205 pr_warn("Warning: Core image elf header is not sane\n");
1206 return -EINVAL;
1207 }
1208
1209
1210 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1211 ehdr.e_phnum * sizeof(Elf64_Phdr);
1212 elfcorebuf_sz = elfcorebuf_sz_orig;
1213 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1214 get_order(elfcorebuf_sz_orig));
1215 if (!elfcorebuf)
1216 return -ENOMEM;
1217 addr = elfcorehdr_addr;
1218 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1219 if (rc < 0)
1220 goto fail;
1221
1222
1223 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1224 &elfnotes_buf, &elfnotes_sz);
1225 if (rc)
1226 goto fail;
1227 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1228 elfnotes_sz, &vmcore_list);
1229 if (rc)
1230 goto fail;
1231 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1232 return 0;
1233fail:
1234 free_elfcorebuf();
1235 return rc;
1236}
1237
1238static int __init parse_crash_elf32_headers(void)
1239{
1240 int rc=0;
1241 Elf32_Ehdr ehdr;
1242 u64 addr;
1243
1244 addr = elfcorehdr_addr;
1245
1246
1247 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1248 if (rc < 0)
1249 return rc;
1250
1251
1252 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1253 (ehdr.e_type != ET_CORE) ||
1254 !elf_check_arch(&ehdr) ||
1255 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1256 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1257 ehdr.e_version != EV_CURRENT ||
1258 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1259 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1260 ehdr.e_phnum == 0) {
1261 pr_warn("Warning: Core image elf header is not sane\n");
1262 return -EINVAL;
1263 }
1264
1265
1266 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1267 elfcorebuf_sz = elfcorebuf_sz_orig;
1268 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1269 get_order(elfcorebuf_sz_orig));
1270 if (!elfcorebuf)
1271 return -ENOMEM;
1272 addr = elfcorehdr_addr;
1273 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1274 if (rc < 0)
1275 goto fail;
1276
1277
1278 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1279 &elfnotes_buf, &elfnotes_sz);
1280 if (rc)
1281 goto fail;
1282 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1283 elfnotes_sz, &vmcore_list);
1284 if (rc)
1285 goto fail;
1286 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1287 return 0;
1288fail:
1289 free_elfcorebuf();
1290 return rc;
1291}
1292
1293static int __init parse_crash_elf_headers(void)
1294{
1295 unsigned char e_ident[EI_NIDENT];
1296 u64 addr;
1297 int rc=0;
1298
1299 addr = elfcorehdr_addr;
1300 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1301 if (rc < 0)
1302 return rc;
1303 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1304 pr_warn("Warning: Core image elf header not found\n");
1305 return -EINVAL;
1306 }
1307
1308 if (e_ident[EI_CLASS] == ELFCLASS64) {
1309 rc = parse_crash_elf64_headers();
1310 if (rc)
1311 return rc;
1312 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1313 rc = parse_crash_elf32_headers();
1314 if (rc)
1315 return rc;
1316 } else {
1317 pr_warn("Warning: Core image elf header is not sane\n");
1318 return -EINVAL;
1319 }
1320
1321
1322 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1323 &vmcore_list);
1324
1325 return 0;
1326}
1327
1328#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1339 u32 size)
1340{
1341 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1342
1343 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1344 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1345 vdd_hdr->n_type = NT_VMCOREDD;
1346
1347 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1348 sizeof(vdd_hdr->name));
1349 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1362 size_t vmcoreddsz)
1363{
1364 unsigned char *e_ident = (unsigned char *)elfptr;
1365 u64 start, end, size;
1366 loff_t vmcore_off;
1367 u32 i;
1368
1369 vmcore_off = elfcorebuf_sz + elfnotesz;
1370
1371 if (e_ident[EI_CLASS] == ELFCLASS64) {
1372 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1373 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1374
1375
1376 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1377 if (phdr->p_type == PT_NOTE) {
1378
1379 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1380 phdr->p_filesz = phdr->p_memsz;
1381 continue;
1382 }
1383
1384 start = rounddown(phdr->p_offset, PAGE_SIZE);
1385 end = roundup(phdr->p_offset + phdr->p_memsz,
1386 PAGE_SIZE);
1387 size = end - start;
1388 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1389 vmcore_off += size;
1390 }
1391 } else {
1392 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1393 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1394
1395
1396 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1397 if (phdr->p_type == PT_NOTE) {
1398
1399 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1400 phdr->p_filesz = phdr->p_memsz;
1401 continue;
1402 }
1403
1404 start = rounddown(phdr->p_offset, PAGE_SIZE);
1405 end = roundup(phdr->p_offset + phdr->p_memsz,
1406 PAGE_SIZE);
1407 size = end - start;
1408 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1409 vmcore_off += size;
1410 }
1411 }
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423static void vmcoredd_update_size(size_t dump_size)
1424{
1425 vmcoredd_orig_sz += dump_size;
1426 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1427 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1428 vmcoredd_orig_sz);
1429
1430
1431 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1432
1433 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1434 &vmcore_list);
1435 proc_vmcore->size = vmcore_size;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446int vmcore_add_device_dump(struct vmcoredd_data *data)
1447{
1448 struct vmcoredd_node *dump;
1449 void *buf = NULL;
1450 size_t data_size;
1451 int ret;
1452
1453 if (vmcoredd_disabled) {
1454 pr_err_once("Device dump is disabled\n");
1455 return -EINVAL;
1456 }
1457
1458 if (!data || !strlen(data->dump_name) ||
1459 !data->vmcoredd_callback || !data->size)
1460 return -EINVAL;
1461
1462 dump = vzalloc(sizeof(*dump));
1463 if (!dump) {
1464 ret = -ENOMEM;
1465 goto out_err;
1466 }
1467
1468
1469 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1470 PAGE_SIZE);
1471
1472
1473 buf = vmcore_alloc_buf(data_size);
1474 if (!buf) {
1475 ret = -ENOMEM;
1476 goto out_err;
1477 }
1478
1479 vmcoredd_write_header(buf, data, data_size -
1480 sizeof(struct vmcoredd_header));
1481
1482
1483 ret = data->vmcoredd_callback(data, buf +
1484 sizeof(struct vmcoredd_header));
1485 if (ret)
1486 goto out_err;
1487
1488 dump->buf = buf;
1489 dump->size = data_size;
1490
1491
1492 mutex_lock(&vmcoredd_mutex);
1493 list_add_tail(&dump->list, &vmcoredd_list);
1494 mutex_unlock(&vmcoredd_mutex);
1495
1496 vmcoredd_update_size(data_size);
1497 return 0;
1498
1499out_err:
1500 if (buf)
1501 vfree(buf);
1502
1503 if (dump)
1504 vfree(dump);
1505
1506 return ret;
1507}
1508EXPORT_SYMBOL(vmcore_add_device_dump);
1509#endif
1510
1511
1512static void vmcore_free_device_dumps(void)
1513{
1514#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1515 mutex_lock(&vmcoredd_mutex);
1516 while (!list_empty(&vmcoredd_list)) {
1517 struct vmcoredd_node *dump;
1518
1519 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1520 list);
1521 list_del(&dump->list);
1522 vfree(dump->buf);
1523 vfree(dump);
1524 }
1525 mutex_unlock(&vmcoredd_mutex);
1526#endif
1527}
1528
1529
1530static int __init vmcore_init(void)
1531{
1532 int rc = 0;
1533
1534
1535 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1536 if (rc)
1537 return rc;
1538
1539
1540
1541
1542 if (!(is_vmcore_usable()))
1543 return rc;
1544 rc = parse_crash_elf_headers();
1545 if (rc) {
1546 pr_warn("Kdump: vmcore not initialized\n");
1547 return rc;
1548 }
1549 elfcorehdr_free(elfcorehdr_addr);
1550 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1551
1552 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1553 if (proc_vmcore)
1554 proc_vmcore->size = vmcore_size;
1555 return 0;
1556}
1557module_init(vmcore_init)
1558
1559
1560void vmcore_cleanup(void)
1561{
1562 struct list_head *pos, *next;
1563
1564 if (proc_vmcore) {
1565 proc_remove(proc_vmcore);
1566 proc_vmcore = NULL;
1567 }
1568
1569
1570 list_for_each_safe(pos, next, &vmcore_list) {
1571 struct vmcore *m;
1572
1573 m = list_entry(pos, struct vmcore, list);
1574 list_del(&m->list);
1575 kfree(m);
1576 }
1577 free_elfcorebuf();
1578
1579
1580 vmcore_free_device_dumps();
1581}
1582EXPORT_SYMBOL_GPL(vmcore_cleanup);
1583