1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/memblock.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/mutex.h>
24#include <linux/vmalloc.h>
25#include <linux/pagemap.h>
26#include <linux/uaccess.h>
27#include <linux/mem_encrypt.h>
28#include <asm/pgtable.h>
29#include <asm/io.h>
30#include "internal.h"
31
32
33
34
35static LIST_HEAD(vmcore_list);
36
37
38static char *elfcorebuf;
39static size_t elfcorebuf_sz;
40static size_t elfcorebuf_sz_orig;
41
42static char *elfnotes_buf;
43static size_t elfnotes_sz;
44
45static size_t elfnotes_orig_sz;
46
47
48static u64 vmcore_size;
49
50static struct proc_dir_entry *proc_vmcore;
51
52#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
53
54static LIST_HEAD(vmcoredd_list);
55static DEFINE_MUTEX(vmcoredd_mutex);
56#endif
57
58
59static size_t vmcoredd_orig_sz;
60
61
62
63
64
65static int (*oldmem_pfn_is_ram)(unsigned long pfn);
66
67int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
68{
69 if (oldmem_pfn_is_ram)
70 return -EBUSY;
71 oldmem_pfn_is_ram = fn;
72 return 0;
73}
74EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
75
76void unregister_oldmem_pfn_is_ram(void)
77{
78 oldmem_pfn_is_ram = NULL;
79 wmb();
80}
81EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
82
83static int pfn_is_ram(unsigned long pfn)
84{
85 int (*fn)(unsigned long pfn);
86
87 int ret = 1;
88
89
90
91
92
93
94 fn = oldmem_pfn_is_ram;
95 if (fn)
96 ret = fn(pfn);
97
98 return ret;
99}
100
101
102static ssize_t read_from_oldmem(char *buf, size_t count,
103 u64 *ppos, int userbuf,
104 bool encrypted)
105{
106 unsigned long pfn, offset;
107 size_t nr_bytes;
108 ssize_t read = 0, tmp;
109
110 if (!count)
111 return 0;
112
113 offset = (unsigned long)(*ppos % PAGE_SIZE);
114 pfn = (unsigned long)(*ppos / PAGE_SIZE);
115
116 do {
117 if (count > (PAGE_SIZE - offset))
118 nr_bytes = PAGE_SIZE - offset;
119 else
120 nr_bytes = count;
121
122
123 if (pfn_is_ram(pfn) == 0)
124 memset(buf, 0, nr_bytes);
125 else {
126 if (encrypted)
127 tmp = copy_oldmem_page_encrypted(pfn, buf,
128 nr_bytes,
129 offset,
130 userbuf);
131 else
132 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
133 offset, userbuf);
134
135 if (tmp < 0)
136 return tmp;
137 }
138 *ppos += nr_bytes;
139 count -= nr_bytes;
140 buf += nr_bytes;
141 read += nr_bytes;
142 ++pfn;
143 offset = 0;
144 } while (count);
145
146 return read;
147}
148
149
150
151
152int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
153{
154 return 0;
155}
156
157
158
159
160void __weak elfcorehdr_free(unsigned long long addr)
161{}
162
163
164
165
166ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
167{
168 return read_from_oldmem(buf, count, ppos, 0, false);
169}
170
171
172
173
174ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
175{
176 return read_from_oldmem(buf, count, ppos, 0, sme_active());
177}
178
179
180
181
182int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
183 unsigned long from, unsigned long pfn,
184 unsigned long size, pgprot_t prot)
185{
186 prot = pgprot_encrypted(prot);
187 return remap_pfn_range(vma, from, pfn, size, prot);
188}
189
190
191
192
193ssize_t __weak
194copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
195 unsigned long offset, int userbuf)
196{
197 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
198}
199
200
201
202
203static int copy_to(void *target, void *src, size_t size, int userbuf)
204{
205 if (userbuf) {
206 if (copy_to_user((char __user *) target, src, size))
207 return -EFAULT;
208 } else {
209 memcpy(target, src, size);
210 }
211 return 0;
212}
213
214#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
215static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
216{
217 struct vmcoredd_node *dump;
218 u64 offset = 0;
219 int ret = 0;
220 size_t tsz;
221 char *buf;
222
223 mutex_lock(&vmcoredd_mutex);
224 list_for_each_entry(dump, &vmcoredd_list, list) {
225 if (start < offset + dump->size) {
226 tsz = min(offset + (u64)dump->size - start, (u64)size);
227 buf = dump->buf + start - offset;
228 if (copy_to(dst, buf, tsz, userbuf)) {
229 ret = -EFAULT;
230 goto out_unlock;
231 }
232
233 size -= tsz;
234 start += tsz;
235 dst += tsz;
236
237
238 if (!size)
239 goto out_unlock;
240 }
241 offset += dump->size;
242 }
243
244out_unlock:
245 mutex_unlock(&vmcoredd_mutex);
246 return ret;
247}
248
249#ifdef CONFIG_MMU
250static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
251 u64 start, size_t size)
252{
253 struct vmcoredd_node *dump;
254 u64 offset = 0;
255 int ret = 0;
256 size_t tsz;
257 char *buf;
258
259 mutex_lock(&vmcoredd_mutex);
260 list_for_each_entry(dump, &vmcoredd_list, list) {
261 if (start < offset + dump->size) {
262 tsz = min(offset + (u64)dump->size - start, (u64)size);
263 buf = dump->buf + start - offset;
264 if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
265 ret = -EFAULT;
266 goto out_unlock;
267 }
268
269 size -= tsz;
270 start += tsz;
271 dst += tsz;
272
273
274 if (!size)
275 goto out_unlock;
276 }
277 offset += dump->size;
278 }
279
280out_unlock:
281 mutex_unlock(&vmcoredd_mutex);
282 return ret;
283}
284#endif
285#endif
286
287
288
289
290static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
291 int userbuf)
292{
293 ssize_t acc = 0, tmp;
294 size_t tsz;
295 u64 start;
296 struct vmcore *m = NULL;
297
298 if (buflen == 0 || *fpos >= vmcore_size)
299 return 0;
300
301
302 if (buflen > vmcore_size - *fpos)
303 buflen = vmcore_size - *fpos;
304
305
306 if (*fpos < elfcorebuf_sz) {
307 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
308 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
309 return -EFAULT;
310 buflen -= tsz;
311 *fpos += tsz;
312 buffer += tsz;
313 acc += tsz;
314
315
316 if (buflen == 0)
317 return acc;
318 }
319
320
321 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
322 void *kaddr;
323
324
325
326
327
328
329
330
331
332
333#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
334
335 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
336 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
337 (size_t)*fpos, buflen);
338 start = *fpos - elfcorebuf_sz;
339 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
340 return -EFAULT;
341
342 buflen -= tsz;
343 *fpos += tsz;
344 buffer += tsz;
345 acc += tsz;
346
347
348 if (!buflen)
349 return acc;
350 }
351#endif
352
353
354 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
355 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
356 if (copy_to(buffer, kaddr, tsz, userbuf))
357 return -EFAULT;
358
359 buflen -= tsz;
360 *fpos += tsz;
361 buffer += tsz;
362 acc += tsz;
363
364
365 if (buflen == 0)
366 return acc;
367 }
368
369 list_for_each_entry(m, &vmcore_list, list) {
370 if (*fpos < m->offset + m->size) {
371 tsz = (size_t)min_t(unsigned long long,
372 m->offset + m->size - *fpos,
373 buflen);
374 start = m->paddr + *fpos - m->offset;
375 tmp = read_from_oldmem(buffer, tsz, &start,
376 userbuf, sme_active());
377 if (tmp < 0)
378 return tmp;
379 buflen -= tsz;
380 *fpos += tsz;
381 buffer += tsz;
382 acc += tsz;
383
384
385 if (buflen == 0)
386 return acc;
387 }
388 }
389
390 return acc;
391}
392
393static ssize_t read_vmcore(struct file *file, char __user *buffer,
394 size_t buflen, loff_t *fpos)
395{
396 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
397}
398
399
400
401
402
403
404
405
406static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
407{
408#ifdef CONFIG_S390
409 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
410 pgoff_t index = vmf->pgoff;
411 struct page *page;
412 loff_t offset;
413 char *buf;
414 int rc;
415
416 page = find_or_create_page(mapping, index, GFP_KERNEL);
417 if (!page)
418 return VM_FAULT_OOM;
419 if (!PageUptodate(page)) {
420 offset = (loff_t) index << PAGE_SHIFT;
421 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
422 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
423 if (rc < 0) {
424 unlock_page(page);
425 put_page(page);
426 return vmf_error(rc);
427 }
428 SetPageUptodate(page);
429 }
430 unlock_page(page);
431 vmf->page = page;
432 return 0;
433#else
434 return VM_FAULT_SIGBUS;
435#endif
436}
437
438static const struct vm_operations_struct vmcore_mmap_ops = {
439 .fault = mmap_vmcore_fault,
440};
441
442
443
444
445
446
447
448
449
450
451
452static inline char *vmcore_alloc_buf(size_t size)
453{
454#ifdef CONFIG_MMU
455 return vmalloc_user(size);
456#else
457 return vzalloc(size);
458#endif
459}
460
461
462
463
464
465
466
467
468#ifdef CONFIG_MMU
469
470
471
472
473
474
475
476
477
478
479
480
481static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
482 unsigned long from, unsigned long pfn,
483 unsigned long size, pgprot_t prot)
484{
485 unsigned long map_size;
486 unsigned long pos_start, pos_end, pos;
487 unsigned long zeropage_pfn = my_zero_pfn(0);
488 size_t len = 0;
489
490 pos_start = pfn;
491 pos_end = pfn + (size >> PAGE_SHIFT);
492
493 for (pos = pos_start; pos < pos_end; ++pos) {
494 if (!pfn_is_ram(pos)) {
495
496
497
498
499
500 if (pos > pos_start) {
501
502 map_size = (pos - pos_start) << PAGE_SHIFT;
503 if (remap_oldmem_pfn_range(vma, from + len,
504 pos_start, map_size,
505 prot))
506 goto fail;
507 len += map_size;
508 }
509
510 if (remap_oldmem_pfn_range(vma, from + len,
511 zeropage_pfn,
512 PAGE_SIZE, prot))
513 goto fail;
514 len += PAGE_SIZE;
515 pos_start = pos + 1;
516 }
517 }
518 if (pos > pos_start) {
519
520 map_size = (pos - pos_start) << PAGE_SHIFT;
521 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
522 map_size, prot))
523 goto fail;
524 }
525 return 0;
526fail:
527 do_munmap(vma->vm_mm, from, len, NULL);
528 return -EAGAIN;
529}
530
531static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
532 unsigned long from, unsigned long pfn,
533 unsigned long size, pgprot_t prot)
534{
535
536
537
538
539 if (oldmem_pfn_is_ram)
540 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
541 else
542 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
543}
544
545static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
546{
547 size_t size = vma->vm_end - vma->vm_start;
548 u64 start, end, len, tsz;
549 struct vmcore *m;
550
551 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
552 end = start + size;
553
554 if (size > vmcore_size || end > vmcore_size)
555 return -EINVAL;
556
557 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
558 return -EPERM;
559
560 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
561 vma->vm_flags |= VM_MIXEDMAP;
562 vma->vm_ops = &vmcore_mmap_ops;
563
564 len = 0;
565
566 if (start < elfcorebuf_sz) {
567 u64 pfn;
568
569 tsz = min(elfcorebuf_sz - (size_t)start, size);
570 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
571 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
572 vma->vm_page_prot))
573 return -EAGAIN;
574 size -= tsz;
575 start += tsz;
576 len += tsz;
577
578 if (size == 0)
579 return 0;
580 }
581
582 if (start < elfcorebuf_sz + elfnotes_sz) {
583 void *kaddr;
584
585
586
587
588
589
590
591
592
593
594
595
596#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
597
598 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
599 u64 start_off;
600
601 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
602 (size_t)start, size);
603 start_off = start - elfcorebuf_sz;
604 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
605 start_off, tsz))
606 goto fail;
607
608 size -= tsz;
609 start += tsz;
610 len += tsz;
611
612
613 if (!size)
614 return 0;
615 }
616#endif
617
618
619 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
620 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
621 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
622 kaddr, tsz))
623 goto fail;
624
625 size -= tsz;
626 start += tsz;
627 len += tsz;
628
629 if (size == 0)
630 return 0;
631 }
632
633 list_for_each_entry(m, &vmcore_list, list) {
634 if (start < m->offset + m->size) {
635 u64 paddr = 0;
636
637 tsz = (size_t)min_t(unsigned long long,
638 m->offset + m->size - start, size);
639 paddr = m->paddr + start - m->offset;
640 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
641 paddr >> PAGE_SHIFT, tsz,
642 vma->vm_page_prot))
643 goto fail;
644 size -= tsz;
645 start += tsz;
646 len += tsz;
647
648 if (size == 0)
649 return 0;
650 }
651 }
652
653 return 0;
654fail:
655 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
656 return -EAGAIN;
657}
658#else
659static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
660{
661 return -ENOSYS;
662}
663#endif
664
665static const struct file_operations proc_vmcore_operations = {
666 .read = read_vmcore,
667 .llseek = default_llseek,
668 .mmap = mmap_vmcore,
669};
670
671static struct vmcore* __init get_new_element(void)
672{
673 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
674}
675
676static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
677 struct list_head *vc_list)
678{
679 u64 size;
680 struct vmcore *m;
681
682 size = elfsz + elfnotesegsz;
683 list_for_each_entry(m, vc_list, list) {
684 size += m->size;
685 }
686 return size;
687}
688
689
690
691
692
693
694
695
696
697
698static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
699{
700 int i, rc=0;
701 Elf64_Phdr *phdr_ptr;
702 Elf64_Nhdr *nhdr_ptr;
703
704 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
705 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
706 void *notes_section;
707 u64 offset, max_sz, sz, real_sz = 0;
708 if (phdr_ptr->p_type != PT_NOTE)
709 continue;
710 max_sz = phdr_ptr->p_memsz;
711 offset = phdr_ptr->p_offset;
712 notes_section = kmalloc(max_sz, GFP_KERNEL);
713 if (!notes_section)
714 return -ENOMEM;
715 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
716 if (rc < 0) {
717 kfree(notes_section);
718 return rc;
719 }
720 nhdr_ptr = notes_section;
721 while (nhdr_ptr->n_namesz != 0) {
722 sz = sizeof(Elf64_Nhdr) +
723 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
724 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
725 if ((real_sz + sz) > max_sz) {
726 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
727 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
728 break;
729 }
730 real_sz += sz;
731 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
732 }
733 kfree(notes_section);
734 phdr_ptr->p_memsz = real_sz;
735 if (real_sz == 0) {
736 pr_warn("Warning: Zero PT_NOTE entries found\n");
737 }
738 }
739
740 return 0;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
762 int *nr_ptnote, u64 *sz_ptnote)
763{
764 int i;
765 Elf64_Phdr *phdr_ptr;
766
767 *nr_ptnote = *sz_ptnote = 0;
768
769 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
770 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
771 if (phdr_ptr->p_type != PT_NOTE)
772 continue;
773 *nr_ptnote += 1;
774 *sz_ptnote += phdr_ptr->p_memsz;
775 }
776
777 return 0;
778}
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
797{
798 int i, rc=0;
799 Elf64_Phdr *phdr_ptr;
800
801 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
802
803 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
804 u64 offset;
805 if (phdr_ptr->p_type != PT_NOTE)
806 continue;
807 offset = phdr_ptr->p_offset;
808 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
809 &offset);
810 if (rc < 0)
811 return rc;
812 notes_buf += phdr_ptr->p_memsz;
813 }
814
815 return 0;
816}
817
818
819static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
820 char **notes_buf, size_t *notes_sz)
821{
822 int i, nr_ptnote=0, rc=0;
823 char *tmp;
824 Elf64_Ehdr *ehdr_ptr;
825 Elf64_Phdr phdr;
826 u64 phdr_sz = 0, note_off;
827
828 ehdr_ptr = (Elf64_Ehdr *)elfptr;
829
830 rc = update_note_header_size_elf64(ehdr_ptr);
831 if (rc < 0)
832 return rc;
833
834 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
835 if (rc < 0)
836 return rc;
837
838 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
839 *notes_buf = vmcore_alloc_buf(*notes_sz);
840 if (!*notes_buf)
841 return -ENOMEM;
842
843 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
844 if (rc < 0)
845 return rc;
846
847
848 phdr.p_type = PT_NOTE;
849 phdr.p_flags = 0;
850 note_off = sizeof(Elf64_Ehdr) +
851 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
852 phdr.p_offset = roundup(note_off, PAGE_SIZE);
853 phdr.p_vaddr = phdr.p_paddr = 0;
854 phdr.p_filesz = phdr.p_memsz = phdr_sz;
855 phdr.p_align = 0;
856
857
858 tmp = elfptr + sizeof(Elf64_Ehdr);
859 memcpy(tmp, &phdr, sizeof(phdr));
860 tmp += sizeof(phdr);
861
862
863 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
864 *elfsz = *elfsz - i;
865 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
866 memset(elfptr + *elfsz, 0, i);
867 *elfsz = roundup(*elfsz, PAGE_SIZE);
868
869
870 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
871
872
873
874
875 elfnotes_orig_sz = phdr.p_memsz;
876
877 return 0;
878}
879
880
881
882
883
884
885
886
887
888
889static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
890{
891 int i, rc=0;
892 Elf32_Phdr *phdr_ptr;
893 Elf32_Nhdr *nhdr_ptr;
894
895 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
896 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
897 void *notes_section;
898 u64 offset, max_sz, sz, real_sz = 0;
899 if (phdr_ptr->p_type != PT_NOTE)
900 continue;
901 max_sz = phdr_ptr->p_memsz;
902 offset = phdr_ptr->p_offset;
903 notes_section = kmalloc(max_sz, GFP_KERNEL);
904 if (!notes_section)
905 return -ENOMEM;
906 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
907 if (rc < 0) {
908 kfree(notes_section);
909 return rc;
910 }
911 nhdr_ptr = notes_section;
912 while (nhdr_ptr->n_namesz != 0) {
913 sz = sizeof(Elf32_Nhdr) +
914 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
915 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
916 if ((real_sz + sz) > max_sz) {
917 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
918 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
919 break;
920 }
921 real_sz += sz;
922 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
923 }
924 kfree(notes_section);
925 phdr_ptr->p_memsz = real_sz;
926 if (real_sz == 0) {
927 pr_warn("Warning: Zero PT_NOTE entries found\n");
928 }
929 }
930
931 return 0;
932}
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
953 int *nr_ptnote, u64 *sz_ptnote)
954{
955 int i;
956 Elf32_Phdr *phdr_ptr;
957
958 *nr_ptnote = *sz_ptnote = 0;
959
960 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
961 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
962 if (phdr_ptr->p_type != PT_NOTE)
963 continue;
964 *nr_ptnote += 1;
965 *sz_ptnote += phdr_ptr->p_memsz;
966 }
967
968 return 0;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
988{
989 int i, rc=0;
990 Elf32_Phdr *phdr_ptr;
991
992 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
993
994 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
995 u64 offset;
996 if (phdr_ptr->p_type != PT_NOTE)
997 continue;
998 offset = phdr_ptr->p_offset;
999 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1000 &offset);
1001 if (rc < 0)
1002 return rc;
1003 notes_buf += phdr_ptr->p_memsz;
1004 }
1005
1006 return 0;
1007}
1008
1009
1010static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1011 char **notes_buf, size_t *notes_sz)
1012{
1013 int i, nr_ptnote=0, rc=0;
1014 char *tmp;
1015 Elf32_Ehdr *ehdr_ptr;
1016 Elf32_Phdr phdr;
1017 u64 phdr_sz = 0, note_off;
1018
1019 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1020
1021 rc = update_note_header_size_elf32(ehdr_ptr);
1022 if (rc < 0)
1023 return rc;
1024
1025 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1026 if (rc < 0)
1027 return rc;
1028
1029 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1030 *notes_buf = vmcore_alloc_buf(*notes_sz);
1031 if (!*notes_buf)
1032 return -ENOMEM;
1033
1034 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1035 if (rc < 0)
1036 return rc;
1037
1038
1039 phdr.p_type = PT_NOTE;
1040 phdr.p_flags = 0;
1041 note_off = sizeof(Elf32_Ehdr) +
1042 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1043 phdr.p_offset = roundup(note_off, PAGE_SIZE);
1044 phdr.p_vaddr = phdr.p_paddr = 0;
1045 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1046 phdr.p_align = 0;
1047
1048
1049 tmp = elfptr + sizeof(Elf32_Ehdr);
1050 memcpy(tmp, &phdr, sizeof(phdr));
1051 tmp += sizeof(phdr);
1052
1053
1054 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1055 *elfsz = *elfsz - i;
1056 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1057 memset(elfptr + *elfsz, 0, i);
1058 *elfsz = roundup(*elfsz, PAGE_SIZE);
1059
1060
1061 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1062
1063
1064
1065
1066 elfnotes_orig_sz = phdr.p_memsz;
1067
1068 return 0;
1069}
1070
1071
1072
1073static int __init process_ptload_program_headers_elf64(char *elfptr,
1074 size_t elfsz,
1075 size_t elfnotes_sz,
1076 struct list_head *vc_list)
1077{
1078 int i;
1079 Elf64_Ehdr *ehdr_ptr;
1080 Elf64_Phdr *phdr_ptr;
1081 loff_t vmcore_off;
1082 struct vmcore *new;
1083
1084 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1085 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
1086
1087
1088 vmcore_off = elfsz + elfnotes_sz;
1089
1090 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1091 u64 paddr, start, end, size;
1092
1093 if (phdr_ptr->p_type != PT_LOAD)
1094 continue;
1095
1096 paddr = phdr_ptr->p_offset;
1097 start = rounddown(paddr, PAGE_SIZE);
1098 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1099 size = end - start;
1100
1101
1102 new = get_new_element();
1103 if (!new)
1104 return -ENOMEM;
1105 new->paddr = start;
1106 new->size = size;
1107 list_add_tail(&new->list, vc_list);
1108
1109
1110 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1111 vmcore_off = vmcore_off + size;
1112 }
1113 return 0;
1114}
1115
1116static int __init process_ptload_program_headers_elf32(char *elfptr,
1117 size_t elfsz,
1118 size_t elfnotes_sz,
1119 struct list_head *vc_list)
1120{
1121 int i;
1122 Elf32_Ehdr *ehdr_ptr;
1123 Elf32_Phdr *phdr_ptr;
1124 loff_t vmcore_off;
1125 struct vmcore *new;
1126
1127 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1128 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
1129
1130
1131 vmcore_off = elfsz + elfnotes_sz;
1132
1133 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1134 u64 paddr, start, end, size;
1135
1136 if (phdr_ptr->p_type != PT_LOAD)
1137 continue;
1138
1139 paddr = phdr_ptr->p_offset;
1140 start = rounddown(paddr, PAGE_SIZE);
1141 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1142 size = end - start;
1143
1144
1145 new = get_new_element();
1146 if (!new)
1147 return -ENOMEM;
1148 new->paddr = start;
1149 new->size = size;
1150 list_add_tail(&new->list, vc_list);
1151
1152
1153 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1154 vmcore_off = vmcore_off + size;
1155 }
1156 return 0;
1157}
1158
1159
1160static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1161 struct list_head *vc_list)
1162{
1163 loff_t vmcore_off;
1164 struct vmcore *m;
1165
1166
1167 vmcore_off = elfsz + elfnotes_sz;
1168
1169 list_for_each_entry(m, vc_list, list) {
1170 m->offset = vmcore_off;
1171 vmcore_off += m->size;
1172 }
1173}
1174
1175static void free_elfcorebuf(void)
1176{
1177 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1178 elfcorebuf = NULL;
1179 vfree(elfnotes_buf);
1180 elfnotes_buf = NULL;
1181}
1182
1183static int __init parse_crash_elf64_headers(void)
1184{
1185 int rc=0;
1186 Elf64_Ehdr ehdr;
1187 u64 addr;
1188
1189 addr = elfcorehdr_addr;
1190
1191
1192 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1193 if (rc < 0)
1194 return rc;
1195
1196
1197 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1198 (ehdr.e_type != ET_CORE) ||
1199 !vmcore_elf64_check_arch(&ehdr) ||
1200 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1201 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1202 ehdr.e_version != EV_CURRENT ||
1203 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1204 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1205 ehdr.e_phnum == 0) {
1206 pr_warn("Warning: Core image elf header is not sane\n");
1207 return -EINVAL;
1208 }
1209
1210
1211 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1212 ehdr.e_phnum * sizeof(Elf64_Phdr);
1213 elfcorebuf_sz = elfcorebuf_sz_orig;
1214 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1215 get_order(elfcorebuf_sz_orig));
1216 if (!elfcorebuf)
1217 return -ENOMEM;
1218 addr = elfcorehdr_addr;
1219 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1220 if (rc < 0)
1221 goto fail;
1222
1223
1224 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1225 &elfnotes_buf, &elfnotes_sz);
1226 if (rc)
1227 goto fail;
1228 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1229 elfnotes_sz, &vmcore_list);
1230 if (rc)
1231 goto fail;
1232 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1233 return 0;
1234fail:
1235 free_elfcorebuf();
1236 return rc;
1237}
1238
1239static int __init parse_crash_elf32_headers(void)
1240{
1241 int rc=0;
1242 Elf32_Ehdr ehdr;
1243 u64 addr;
1244
1245 addr = elfcorehdr_addr;
1246
1247
1248 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1249 if (rc < 0)
1250 return rc;
1251
1252
1253 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1254 (ehdr.e_type != ET_CORE) ||
1255 !vmcore_elf32_check_arch(&ehdr) ||
1256 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1257 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1258 ehdr.e_version != EV_CURRENT ||
1259 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1260 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1261 ehdr.e_phnum == 0) {
1262 pr_warn("Warning: Core image elf header is not sane\n");
1263 return -EINVAL;
1264 }
1265
1266
1267 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1268 elfcorebuf_sz = elfcorebuf_sz_orig;
1269 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1270 get_order(elfcorebuf_sz_orig));
1271 if (!elfcorebuf)
1272 return -ENOMEM;
1273 addr = elfcorehdr_addr;
1274 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1275 if (rc < 0)
1276 goto fail;
1277
1278
1279 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1280 &elfnotes_buf, &elfnotes_sz);
1281 if (rc)
1282 goto fail;
1283 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1284 elfnotes_sz, &vmcore_list);
1285 if (rc)
1286 goto fail;
1287 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1288 return 0;
1289fail:
1290 free_elfcorebuf();
1291 return rc;
1292}
1293
1294static int __init parse_crash_elf_headers(void)
1295{
1296 unsigned char e_ident[EI_NIDENT];
1297 u64 addr;
1298 int rc=0;
1299
1300 addr = elfcorehdr_addr;
1301 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1302 if (rc < 0)
1303 return rc;
1304 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1305 pr_warn("Warning: Core image elf header not found\n");
1306 return -EINVAL;
1307 }
1308
1309 if (e_ident[EI_CLASS] == ELFCLASS64) {
1310 rc = parse_crash_elf64_headers();
1311 if (rc)
1312 return rc;
1313 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1314 rc = parse_crash_elf32_headers();
1315 if (rc)
1316 return rc;
1317 } else {
1318 pr_warn("Warning: Core image elf header is not sane\n");
1319 return -EINVAL;
1320 }
1321
1322
1323 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1324 &vmcore_list);
1325
1326 return 0;
1327}
1328
1329#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1340 u32 size)
1341{
1342 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1343
1344 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1345 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1346 vdd_hdr->n_type = NT_VMCOREDD;
1347
1348 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1349 sizeof(vdd_hdr->name));
1350 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1363 size_t vmcoreddsz)
1364{
1365 unsigned char *e_ident = (unsigned char *)elfptr;
1366 u64 start, end, size;
1367 loff_t vmcore_off;
1368 u32 i;
1369
1370 vmcore_off = elfcorebuf_sz + elfnotesz;
1371
1372 if (e_ident[EI_CLASS] == ELFCLASS64) {
1373 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1374 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1375
1376
1377 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1378 if (phdr->p_type == PT_NOTE) {
1379
1380 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1381 phdr->p_filesz = phdr->p_memsz;
1382 continue;
1383 }
1384
1385 start = rounddown(phdr->p_offset, PAGE_SIZE);
1386 end = roundup(phdr->p_offset + phdr->p_memsz,
1387 PAGE_SIZE);
1388 size = end - start;
1389 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1390 vmcore_off += size;
1391 }
1392 } else {
1393 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1394 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1395
1396
1397 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1398 if (phdr->p_type == PT_NOTE) {
1399
1400 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1401 phdr->p_filesz = phdr->p_memsz;
1402 continue;
1403 }
1404
1405 start = rounddown(phdr->p_offset, PAGE_SIZE);
1406 end = roundup(phdr->p_offset + phdr->p_memsz,
1407 PAGE_SIZE);
1408 size = end - start;
1409 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1410 vmcore_off += size;
1411 }
1412 }
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424static void vmcoredd_update_size(size_t dump_size)
1425{
1426 vmcoredd_orig_sz += dump_size;
1427 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1428 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1429 vmcoredd_orig_sz);
1430
1431
1432 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1433
1434 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1435 &vmcore_list);
1436 proc_vmcore->size = vmcore_size;
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int vmcore_add_device_dump(struct vmcoredd_data *data)
1448{
1449 struct vmcoredd_node *dump;
1450 void *buf = NULL;
1451 size_t data_size;
1452 int ret;
1453
1454 if (!data || !strlen(data->dump_name) ||
1455 !data->vmcoredd_callback || !data->size)
1456 return -EINVAL;
1457
1458 dump = vzalloc(sizeof(*dump));
1459 if (!dump) {
1460 ret = -ENOMEM;
1461 goto out_err;
1462 }
1463
1464
1465 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1466 PAGE_SIZE);
1467
1468
1469 buf = vmcore_alloc_buf(data_size);
1470 if (!buf) {
1471 ret = -ENOMEM;
1472 goto out_err;
1473 }
1474
1475 vmcoredd_write_header(buf, data, data_size -
1476 sizeof(struct vmcoredd_header));
1477
1478
1479 ret = data->vmcoredd_callback(data, buf +
1480 sizeof(struct vmcoredd_header));
1481 if (ret)
1482 goto out_err;
1483
1484 dump->buf = buf;
1485 dump->size = data_size;
1486
1487
1488 mutex_lock(&vmcoredd_mutex);
1489 list_add_tail(&dump->list, &vmcoredd_list);
1490 mutex_unlock(&vmcoredd_mutex);
1491
1492 vmcoredd_update_size(data_size);
1493 return 0;
1494
1495out_err:
1496 if (buf)
1497 vfree(buf);
1498
1499 if (dump)
1500 vfree(dump);
1501
1502 return ret;
1503}
1504EXPORT_SYMBOL(vmcore_add_device_dump);
1505#endif
1506
1507
1508static void vmcore_free_device_dumps(void)
1509{
1510#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1511 mutex_lock(&vmcoredd_mutex);
1512 while (!list_empty(&vmcoredd_list)) {
1513 struct vmcoredd_node *dump;
1514
1515 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1516 list);
1517 list_del(&dump->list);
1518 vfree(dump->buf);
1519 vfree(dump);
1520 }
1521 mutex_unlock(&vmcoredd_mutex);
1522#endif
1523}
1524
1525
1526static int __init vmcore_init(void)
1527{
1528 int rc = 0;
1529
1530
1531 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1532 if (rc)
1533 return rc;
1534
1535
1536
1537
1538 if (!(is_vmcore_usable()))
1539 return rc;
1540 rc = parse_crash_elf_headers();
1541 if (rc) {
1542 pr_warn("Kdump: vmcore not initialized\n");
1543 return rc;
1544 }
1545 elfcorehdr_free(elfcorehdr_addr);
1546 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1547
1548 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1549 if (proc_vmcore)
1550 proc_vmcore->size = vmcore_size;
1551 return 0;
1552}
1553fs_initcall(vmcore_init);
1554
1555
1556void vmcore_cleanup(void)
1557{
1558 if (proc_vmcore) {
1559 proc_remove(proc_vmcore);
1560 proc_vmcore = NULL;
1561 }
1562
1563
1564 while (!list_empty(&vmcore_list)) {
1565 struct vmcore *m;
1566
1567 m = list_first_entry(&vmcore_list, struct vmcore, list);
1568 list_del(&m->list);
1569 kfree(m);
1570 }
1571 free_elfcorebuf();
1572
1573
1574 vmcore_free_device_dumps();
1575}
1576