1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/capability.h>
12#include <linux/mm.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/kexec.h>
17#include <linux/mutex.h>
18#include <linux/list.h>
19#include <linux/highmem.h>
20#include <linux/syscalls.h>
21#include <linux/reboot.h>
22#include <linux/ioport.h>
23#include <linux/hardirq.h>
24#include <linux/elf.h>
25#include <linux/elfcore.h>
26#include <linux/utsname.h>
27#include <linux/numa.h>
28#include <linux/suspend.h>
29#include <linux/device.h>
30#include <linux/freezer.h>
31#include <linux/pm.h>
32#include <linux/cpu.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/console.h>
36#include <linux/vmalloc.h>
37#include <linux/swap.h>
38#include <linux/syscore_ops.h>
39#include <linux/compiler.h>
40#include <linux/hugetlb.h>
41#include <linux/frame.h>
42
43#include <asm/page.h>
44#include <asm/sections.h>
45
46#include <crypto/hash.h>
47#include <crypto/sha.h>
48#include "kexec_internal.h"
49
50DEFINE_MUTEX(kexec_mutex);
51
52
53note_buf_t __percpu *crash_notes;
54
55
56bool kexec_in_progress = false;
57
58
59
60struct resource crashk_res = {
61 .name = "Crash kernel",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 .desc = IORES_DESC_CRASH_KERNEL
66};
67struct resource crashk_low_res = {
68 .name = "Crash kernel",
69 .start = 0,
70 .end = 0,
71 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
72 .desc = IORES_DESC_CRASH_KERNEL
73};
74
75int kexec_should_crash(struct task_struct *p)
76{
77
78
79
80
81
82 if (crash_kexec_post_notifiers)
83 return 0;
84
85
86
87
88 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
89 return 1;
90 return 0;
91}
92
93int kexec_crash_loaded(void)
94{
95 return !!kexec_crash_image;
96}
97EXPORT_SYMBOL_GPL(kexec_crash_loaded);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143#define KIMAGE_NO_DEST (-1UL)
144#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
145
146static struct page *kimage_alloc_page(struct kimage *image,
147 gfp_t gfp_mask,
148 unsigned long dest);
149
150int sanity_check_segment_list(struct kimage *image)
151{
152 int i;
153 unsigned long nr_segments = image->nr_segments;
154 unsigned long total_pages = 0;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 for (i = 0; i < nr_segments; i++) {
170 unsigned long mstart, mend;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
174 if (mstart > mend)
175 return -EADDRNOTAVAIL;
176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177 return -EADDRNOTAVAIL;
178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179 return -EADDRNOTAVAIL;
180 }
181
182
183
184
185
186
187 for (i = 0; i < nr_segments; i++) {
188 unsigned long mstart, mend;
189 unsigned long j;
190
191 mstart = image->segment[i].mem;
192 mend = mstart + image->segment[i].memsz;
193 for (j = 0; j < i; j++) {
194 unsigned long pstart, pend;
195
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198
199 if ((mend > pstart) && (mstart < pend))
200 return -EINVAL;
201 }
202 }
203
204
205
206
207
208
209 for (i = 0; i < nr_segments; i++) {
210 if (image->segment[i].bufsz > image->segment[i].memsz)
211 return -EINVAL;
212 }
213
214
215
216
217
218
219 for (i = 0; i < nr_segments; i++) {
220 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
221 return -EINVAL;
222
223 total_pages += PAGE_COUNT(image->segment[i].memsz);
224 }
225
226 if (total_pages > totalram_pages / 2)
227 return -EINVAL;
228
229
230
231
232
233
234
235
236
237
238
239 if (image->type == KEXEC_TYPE_CRASH) {
240 for (i = 0; i < nr_segments; i++) {
241 unsigned long mstart, mend;
242
243 mstart = image->segment[i].mem;
244 mend = mstart + image->segment[i].memsz - 1;
245
246 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
247 (mend > phys_to_boot_phys(crashk_res.end)))
248 return -EADDRNOTAVAIL;
249 }
250 }
251
252 return 0;
253}
254
255struct kimage *do_kimage_alloc_init(void)
256{
257 struct kimage *image;
258
259
260 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 if (!image)
262 return NULL;
263
264 image->head = 0;
265 image->entry = &image->head;
266 image->last_entry = &image->head;
267 image->control_page = ~0;
268 image->type = KEXEC_TYPE_DEFAULT;
269
270
271 INIT_LIST_HEAD(&image->control_pages);
272
273
274 INIT_LIST_HEAD(&image->dest_pages);
275
276
277 INIT_LIST_HEAD(&image->unusable_pages);
278
279 return image;
280}
281
282int kimage_is_destination_range(struct kimage *image,
283 unsigned long start,
284 unsigned long end)
285{
286 unsigned long i;
287
288 for (i = 0; i < image->nr_segments; i++) {
289 unsigned long mstart, mend;
290
291 mstart = image->segment[i].mem;
292 mend = mstart + image->segment[i].memsz;
293 if ((end > mstart) && (start < mend))
294 return 1;
295 }
296
297 return 0;
298}
299
300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
301{
302 struct page *pages;
303
304 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
305 if (pages) {
306 unsigned int count, i;
307
308 pages->mapping = NULL;
309 set_page_private(pages, order);
310 count = 1 << order;
311 for (i = 0; i < count; i++)
312 SetPageReserved(pages + i);
313
314 arch_kexec_post_alloc_pages(page_address(pages), count,
315 gfp_mask);
316
317 if (gfp_mask & __GFP_ZERO)
318 for (i = 0; i < count; i++)
319 clear_highpage(pages + i);
320 }
321
322 return pages;
323}
324
325static void kimage_free_pages(struct page *page)
326{
327 unsigned int order, count, i;
328
329 order = page_private(page);
330 count = 1 << order;
331
332 arch_kexec_pre_free_pages(page_address(page), count);
333
334 for (i = 0; i < count; i++)
335 ClearPageReserved(page + i);
336 __free_pages(page, order);
337}
338
339void kimage_free_page_list(struct list_head *list)
340{
341 struct page *page, *next;
342
343 list_for_each_entry_safe(page, next, list, lru) {
344 list_del(&page->lru);
345 kimage_free_pages(page);
346 }
347}
348
349static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
350 unsigned int order)
351{
352
353
354
355
356
357
358
359
360
361
362
363
364
365 struct list_head extra_pages;
366 struct page *pages;
367 unsigned int count;
368
369 count = 1 << order;
370 INIT_LIST_HEAD(&extra_pages);
371
372
373
374
375 do {
376 unsigned long pfn, epfn, addr, eaddr;
377
378 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
379 if (!pages)
380 break;
381 pfn = page_to_boot_pfn(pages);
382 epfn = pfn + count;
383 addr = pfn << PAGE_SHIFT;
384 eaddr = epfn << PAGE_SHIFT;
385 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
386 kimage_is_destination_range(image, addr, eaddr)) {
387 list_add(&pages->lru, &extra_pages);
388 pages = NULL;
389 }
390 } while (!pages);
391
392 if (pages) {
393
394 list_add(&pages->lru, &image->control_pages);
395
396
397
398
399
400
401
402 }
403
404
405
406
407
408
409
410 kimage_free_page_list(&extra_pages);
411
412 return pages;
413}
414
415static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
416 unsigned int order)
417{
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 unsigned long hole_start, hole_end, size;
440 struct page *pages;
441
442 pages = NULL;
443 size = (1 << order) << PAGE_SHIFT;
444 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
445 hole_end = hole_start + size - 1;
446 while (hole_end <= crashk_res.end) {
447 unsigned long i;
448
449 cond_resched();
450
451 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
452 break;
453
454 for (i = 0; i < image->nr_segments; i++) {
455 unsigned long mstart, mend;
456
457 mstart = image->segment[i].mem;
458 mend = mstart + image->segment[i].memsz - 1;
459 if ((hole_end >= mstart) && (hole_start <= mend)) {
460
461 hole_start = (mend + (size - 1)) & ~(size - 1);
462 hole_end = hole_start + size - 1;
463 break;
464 }
465 }
466
467 if (i == image->nr_segments) {
468 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
469 image->control_page = hole_end;
470 break;
471 }
472 }
473
474 return pages;
475}
476
477
478struct page *kimage_alloc_control_pages(struct kimage *image,
479 unsigned int order)
480{
481 struct page *pages = NULL;
482
483 switch (image->type) {
484 case KEXEC_TYPE_DEFAULT:
485 pages = kimage_alloc_normal_control_pages(image, order);
486 break;
487 case KEXEC_TYPE_CRASH:
488 pages = kimage_alloc_crash_control_pages(image, order);
489 break;
490 }
491
492 return pages;
493}
494
495int kimage_crash_copy_vmcoreinfo(struct kimage *image)
496{
497 struct page *vmcoreinfo_page;
498 void *safecopy;
499
500 if (image->type != KEXEC_TYPE_CRASH)
501 return 0;
502
503
504
505
506
507
508
509
510
511
512 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
513 if (!vmcoreinfo_page) {
514 pr_warn("Could not allocate vmcoreinfo buffer\n");
515 return -ENOMEM;
516 }
517 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
518 if (!safecopy) {
519 pr_warn("Could not vmap vmcoreinfo buffer\n");
520 return -ENOMEM;
521 }
522
523 image->vmcoreinfo_data_copy = safecopy;
524 crash_update_vmcoreinfo_safecopy(safecopy);
525
526 return 0;
527}
528
529static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
530{
531 if (*image->entry != 0)
532 image->entry++;
533
534 if (image->entry == image->last_entry) {
535 kimage_entry_t *ind_page;
536 struct page *page;
537
538 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
539 if (!page)
540 return -ENOMEM;
541
542 ind_page = page_address(page);
543 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
544 image->entry = ind_page;
545 image->last_entry = ind_page +
546 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
547 }
548 *image->entry = entry;
549 image->entry++;
550 *image->entry = 0;
551
552 return 0;
553}
554
555static int kimage_set_destination(struct kimage *image,
556 unsigned long destination)
557{
558 int result;
559
560 destination &= PAGE_MASK;
561 result = kimage_add_entry(image, destination | IND_DESTINATION);
562
563 return result;
564}
565
566
567static int kimage_add_page(struct kimage *image, unsigned long page)
568{
569 int result;
570
571 page &= PAGE_MASK;
572 result = kimage_add_entry(image, page | IND_SOURCE);
573
574 return result;
575}
576
577
578static void kimage_free_extra_pages(struct kimage *image)
579{
580
581 kimage_free_page_list(&image->dest_pages);
582
583
584 kimage_free_page_list(&image->unusable_pages);
585
586}
587void kimage_terminate(struct kimage *image)
588{
589 if (*image->entry != 0)
590 image->entry++;
591
592 *image->entry = IND_DONE;
593}
594
595#define for_each_kimage_entry(image, ptr, entry) \
596 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
597 ptr = (entry & IND_INDIRECTION) ? \
598 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
599
600static void kimage_free_entry(kimage_entry_t entry)
601{
602 struct page *page;
603
604 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
605 kimage_free_pages(page);
606}
607
608void kimage_free(struct kimage *image)
609{
610 kimage_entry_t *ptr, entry;
611 kimage_entry_t ind = 0;
612
613 if (!image)
614 return;
615
616 if (image->vmcoreinfo_data_copy) {
617 crash_update_vmcoreinfo_safecopy(NULL);
618 vunmap(image->vmcoreinfo_data_copy);
619 }
620
621 kimage_free_extra_pages(image);
622 for_each_kimage_entry(image, ptr, entry) {
623 if (entry & IND_INDIRECTION) {
624
625 if (ind & IND_INDIRECTION)
626 kimage_free_entry(ind);
627
628
629
630 ind = entry;
631 } else if (entry & IND_SOURCE)
632 kimage_free_entry(entry);
633 }
634
635 if (ind & IND_INDIRECTION)
636 kimage_free_entry(ind);
637
638
639 machine_kexec_cleanup(image);
640
641
642 kimage_free_page_list(&image->control_pages);
643
644
645
646
647
648 if (image->file_mode)
649 kimage_file_post_load_cleanup(image);
650
651 kfree(image);
652}
653
654static kimage_entry_t *kimage_dst_used(struct kimage *image,
655 unsigned long page)
656{
657 kimage_entry_t *ptr, entry;
658 unsigned long destination = 0;
659
660 for_each_kimage_entry(image, ptr, entry) {
661 if (entry & IND_DESTINATION)
662 destination = entry & PAGE_MASK;
663 else if (entry & IND_SOURCE) {
664 if (page == destination)
665 return ptr;
666 destination += PAGE_SIZE;
667 }
668 }
669
670 return NULL;
671}
672
673static struct page *kimage_alloc_page(struct kimage *image,
674 gfp_t gfp_mask,
675 unsigned long destination)
676{
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 struct page *page;
696 unsigned long addr;
697
698
699
700
701
702 list_for_each_entry(page, &image->dest_pages, lru) {
703 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
704 if (addr == destination) {
705 list_del(&page->lru);
706 return page;
707 }
708 }
709 page = NULL;
710 while (1) {
711 kimage_entry_t *old;
712
713
714 page = kimage_alloc_pages(gfp_mask, 0);
715 if (!page)
716 return NULL;
717
718 if (page_to_boot_pfn(page) >
719 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
720 list_add(&page->lru, &image->unusable_pages);
721 continue;
722 }
723 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
724
725
726 if (addr == destination)
727 break;
728
729
730 if (!kimage_is_destination_range(image, addr,
731 addr + PAGE_SIZE))
732 break;
733
734
735
736
737
738
739 old = kimage_dst_used(image, addr);
740 if (old) {
741
742 unsigned long old_addr;
743 struct page *old_page;
744
745 old_addr = *old & PAGE_MASK;
746 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
747 copy_highpage(page, old_page);
748 *old = addr | (*old & ~PAGE_MASK);
749
750
751
752
753
754 if (!(gfp_mask & __GFP_HIGHMEM) &&
755 PageHighMem(old_page)) {
756 kimage_free_pages(old_page);
757 continue;
758 }
759 addr = old_addr;
760 page = old_page;
761 break;
762 }
763
764 list_add(&page->lru, &image->dest_pages);
765 }
766
767 return page;
768}
769
770static int kimage_load_normal_segment(struct kimage *image,
771 struct kexec_segment *segment)
772{
773 unsigned long maddr;
774 size_t ubytes, mbytes;
775 int result;
776 unsigned char __user *buf = NULL;
777 unsigned char *kbuf = NULL;
778
779 result = 0;
780 if (image->file_mode)
781 kbuf = segment->kbuf;
782 else
783 buf = segment->buf;
784 ubytes = segment->bufsz;
785 mbytes = segment->memsz;
786 maddr = segment->mem;
787
788 result = kimage_set_destination(image, maddr);
789 if (result < 0)
790 goto out;
791
792 while (mbytes) {
793 struct page *page;
794 char *ptr;
795 size_t uchunk, mchunk;
796
797 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
798 if (!page) {
799 result = -ENOMEM;
800 goto out;
801 }
802 result = kimage_add_page(image, page_to_boot_pfn(page)
803 << PAGE_SHIFT);
804 if (result < 0)
805 goto out;
806
807 ptr = kmap(page);
808
809 clear_page(ptr);
810 ptr += maddr & ~PAGE_MASK;
811 mchunk = min_t(size_t, mbytes,
812 PAGE_SIZE - (maddr & ~PAGE_MASK));
813 uchunk = min(ubytes, mchunk);
814
815
816 if (image->file_mode)
817 memcpy(ptr, kbuf, uchunk);
818 else
819 result = copy_from_user(ptr, buf, uchunk);
820 kunmap(page);
821 if (result) {
822 result = -EFAULT;
823 goto out;
824 }
825 ubytes -= uchunk;
826 maddr += mchunk;
827 if (image->file_mode)
828 kbuf += mchunk;
829 else
830 buf += mchunk;
831 mbytes -= mchunk;
832
833 cond_resched();
834 }
835out:
836 return result;
837}
838
839static int kimage_load_crash_segment(struct kimage *image,
840 struct kexec_segment *segment)
841{
842
843
844
845
846 unsigned long maddr;
847 size_t ubytes, mbytes;
848 int result;
849 unsigned char __user *buf = NULL;
850 unsigned char *kbuf = NULL;
851
852 result = 0;
853 if (image->file_mode)
854 kbuf = segment->kbuf;
855 else
856 buf = segment->buf;
857 ubytes = segment->bufsz;
858 mbytes = segment->memsz;
859 maddr = segment->mem;
860 while (mbytes) {
861 struct page *page;
862 char *ptr;
863 size_t uchunk, mchunk;
864
865 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
866 if (!page) {
867 result = -ENOMEM;
868 goto out;
869 }
870 ptr = kmap(page);
871 ptr += maddr & ~PAGE_MASK;
872 mchunk = min_t(size_t, mbytes,
873 PAGE_SIZE - (maddr & ~PAGE_MASK));
874 uchunk = min(ubytes, mchunk);
875 if (mchunk > uchunk) {
876
877 memset(ptr + uchunk, 0, mchunk - uchunk);
878 }
879
880
881 if (image->file_mode)
882 memcpy(ptr, kbuf, uchunk);
883 else
884 result = copy_from_user(ptr, buf, uchunk);
885 kexec_flush_icache_page(page);
886 kunmap(page);
887 if (result) {
888 result = -EFAULT;
889 goto out;
890 }
891 ubytes -= uchunk;
892 maddr += mchunk;
893 if (image->file_mode)
894 kbuf += mchunk;
895 else
896 buf += mchunk;
897 mbytes -= mchunk;
898
899 cond_resched();
900 }
901out:
902 return result;
903}
904
905int kimage_load_segment(struct kimage *image,
906 struct kexec_segment *segment)
907{
908 int result = -ENOMEM;
909
910 switch (image->type) {
911 case KEXEC_TYPE_DEFAULT:
912 result = kimage_load_normal_segment(image, segment);
913 break;
914 case KEXEC_TYPE_CRASH:
915 result = kimage_load_crash_segment(image, segment);
916 break;
917 }
918
919 return result;
920}
921
922struct kimage *kexec_image;
923struct kimage *kexec_crash_image;
924int kexec_load_disabled;
925
926
927
928
929
930
931void __noclone __crash_kexec(struct pt_regs *regs)
932{
933
934
935
936
937
938
939
940
941 if (mutex_trylock(&kexec_mutex)) {
942 if (kexec_crash_image) {
943 struct pt_regs fixed_regs;
944
945 crash_setup_regs(&fixed_regs, regs);
946 crash_save_vmcoreinfo();
947 machine_crash_shutdown(&fixed_regs);
948 machine_kexec(kexec_crash_image);
949 }
950 mutex_unlock(&kexec_mutex);
951 }
952}
953STACK_FRAME_NON_STANDARD(__crash_kexec);
954
955void crash_kexec(struct pt_regs *regs)
956{
957 int old_cpu, this_cpu;
958
959
960
961
962
963
964 this_cpu = raw_smp_processor_id();
965 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
966 if (old_cpu == PANIC_CPU_INVALID) {
967
968 printk_safe_flush_on_panic();
969 __crash_kexec(regs);
970
971
972
973
974
975 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
976 }
977}
978
979size_t crash_get_memory_size(void)
980{
981 size_t size = 0;
982
983 mutex_lock(&kexec_mutex);
984 if (crashk_res.end != crashk_res.start)
985 size = resource_size(&crashk_res);
986 mutex_unlock(&kexec_mutex);
987 return size;
988}
989
990void __weak crash_free_reserved_phys_range(unsigned long begin,
991 unsigned long end)
992{
993 unsigned long addr;
994
995 for (addr = begin; addr < end; addr += PAGE_SIZE)
996 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
997}
998
999int crash_shrink_memory(unsigned long new_size)
1000{
1001 int ret = 0;
1002 unsigned long start, end;
1003 unsigned long old_size;
1004 struct resource *ram_res;
1005
1006 mutex_lock(&kexec_mutex);
1007
1008 if (kexec_crash_image) {
1009 ret = -ENOENT;
1010 goto unlock;
1011 }
1012 start = crashk_res.start;
1013 end = crashk_res.end;
1014 old_size = (end == 0) ? 0 : end - start + 1;
1015 if (new_size >= old_size) {
1016 ret = (new_size == old_size) ? 0 : -EINVAL;
1017 goto unlock;
1018 }
1019
1020 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1021 if (!ram_res) {
1022 ret = -ENOMEM;
1023 goto unlock;
1024 }
1025
1026 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1027 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1028
1029 crash_free_reserved_phys_range(end, crashk_res.end);
1030
1031 if ((start == end) && (crashk_res.parent != NULL))
1032 release_resource(&crashk_res);
1033
1034 ram_res->start = end;
1035 ram_res->end = crashk_res.end;
1036 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1037 ram_res->name = "System RAM";
1038
1039 crashk_res.end = end - 1;
1040
1041 insert_resource(&iomem_resource, ram_res);
1042
1043unlock:
1044 mutex_unlock(&kexec_mutex);
1045 return ret;
1046}
1047
1048void crash_save_cpu(struct pt_regs *regs, int cpu)
1049{
1050 struct elf_prstatus prstatus;
1051 u32 *buf;
1052
1053 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1054 return;
1055
1056
1057
1058
1059
1060
1061
1062
1063 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1064 if (!buf)
1065 return;
1066 memset(&prstatus, 0, sizeof(prstatus));
1067 prstatus.pr_pid = current->pid;
1068 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1069 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1070 &prstatus, sizeof(prstatus));
1071 final_note(buf);
1072}
1073
1074static int __init crash_notes_memory_init(void)
1075{
1076
1077 size_t size, align;
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 size = sizeof(note_buf_t);
1090 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1091
1092
1093
1094
1095
1096 BUILD_BUG_ON(size > PAGE_SIZE);
1097
1098 crash_notes = __alloc_percpu(size, align);
1099 if (!crash_notes) {
1100 pr_warn("Memory allocation for saving cpu register states failed\n");
1101 return -ENOMEM;
1102 }
1103 return 0;
1104}
1105subsys_initcall(crash_notes_memory_init);
1106
1107
1108
1109
1110
1111
1112int kernel_kexec(void)
1113{
1114 int error = 0;
1115
1116 if (!mutex_trylock(&kexec_mutex))
1117 return -EBUSY;
1118 if (!kexec_image) {
1119 error = -EINVAL;
1120 goto Unlock;
1121 }
1122
1123#ifdef CONFIG_KEXEC_JUMP
1124 if (kexec_image->preserve_context) {
1125 lock_system_sleep();
1126 pm_prepare_console();
1127 error = freeze_processes();
1128 if (error) {
1129 error = -EBUSY;
1130 goto Restore_console;
1131 }
1132 suspend_console();
1133 error = dpm_suspend_start(PMSG_FREEZE);
1134 if (error)
1135 goto Resume_console;
1136
1137
1138
1139
1140
1141
1142
1143 error = dpm_suspend_end(PMSG_FREEZE);
1144 if (error)
1145 goto Resume_devices;
1146 error = disable_nonboot_cpus();
1147 if (error)
1148 goto Enable_cpus;
1149 local_irq_disable();
1150 error = syscore_suspend();
1151 if (error)
1152 goto Enable_irqs;
1153 } else
1154#endif
1155 {
1156 kexec_in_progress = true;
1157 kernel_restart_prepare(NULL);
1158 migrate_to_reboot_cpu();
1159
1160
1161
1162
1163
1164
1165
1166 cpu_hotplug_enable();
1167 pr_emerg("Starting new kernel\n");
1168 machine_shutdown();
1169 }
1170
1171 machine_kexec(kexec_image);
1172
1173#ifdef CONFIG_KEXEC_JUMP
1174 if (kexec_image->preserve_context) {
1175 syscore_resume();
1176 Enable_irqs:
1177 local_irq_enable();
1178 Enable_cpus:
1179 enable_nonboot_cpus();
1180 dpm_resume_start(PMSG_RESTORE);
1181 Resume_devices:
1182 dpm_resume_end(PMSG_RESTORE);
1183 Resume_console:
1184 resume_console();
1185 thaw_processes();
1186 Restore_console:
1187 pm_restore_console();
1188 unlock_system_sleep();
1189 }
1190#endif
1191
1192 Unlock:
1193 mutex_unlock(&kexec_mutex);
1194 return error;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204void __weak arch_kexec_protect_crashkres(void)
1205{}
1206
1207void __weak arch_kexec_unprotect_crashkres(void)
1208{}
1209