1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/capability.h>
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
15#include <linux/mutex.h>
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
20#include <linux/ioport.h>
21#include <linux/hardirq.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <linux/utsname.h>
25#include <linux/numa.h>
26#include <linux/suspend.h>
27#include <linux/device.h>
28#include <linux/freezer.h>
29#include <linux/panic_notifier.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/uaccess.h>
33#include <linux/io.h>
34#include <linux/console.h>
35#include <linux/vmalloc.h>
36#include <linux/swap.h>
37#include <linux/syscore_ops.h>
38#include <linux/compiler.h>
39#include <linux/hugetlb.h>
40#include <linux/objtool.h>
41#include <linux/kmsg_dump.h>
42
43#include <asm/page.h>
44#include <asm/sections.h>
45
46#include <crypto/hash.h>
47#include "kexec_internal.h"
48
49DEFINE_MUTEX(kexec_mutex);
50
51
52note_buf_t __percpu *crash_notes;
53
54
55bool kexec_in_progress = false;
56
57
58
59struct resource crashk_res = {
60 .name = "Crash kernel",
61 .start = 0,
62 .end = 0,
63 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
64 .desc = IORES_DESC_CRASH_KERNEL
65};
66struct resource crashk_low_res = {
67 .name = "Crash kernel",
68 .start = 0,
69 .end = 0,
70 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
71 .desc = IORES_DESC_CRASH_KERNEL
72};
73
74int kexec_should_crash(struct task_struct *p)
75{
76
77
78
79
80
81 if (crash_kexec_post_notifiers)
82 return 0;
83
84
85
86
87 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
88 return 1;
89 return 0;
90}
91
92int kexec_crash_loaded(void)
93{
94 return !!kexec_crash_image;
95}
96EXPORT_SYMBOL_GPL(kexec_crash_loaded);
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142#define KIMAGE_NO_DEST (-1UL)
143#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
144
145static struct page *kimage_alloc_page(struct kimage *image,
146 gfp_t gfp_mask,
147 unsigned long dest);
148
149int sanity_check_segment_list(struct kimage *image)
150{
151 int i;
152 unsigned long nr_segments = image->nr_segments;
153 unsigned long total_pages = 0;
154 unsigned long nr_pages = totalram_pages();
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 for (i = 0; i < nr_segments; i++) {
170 unsigned long mstart, mend;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
174 if (mstart > mend)
175 return -EADDRNOTAVAIL;
176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177 return -EADDRNOTAVAIL;
178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179 return -EADDRNOTAVAIL;
180 }
181
182
183
184
185
186
187 for (i = 0; i < nr_segments; i++) {
188 unsigned long mstart, mend;
189 unsigned long j;
190
191 mstart = image->segment[i].mem;
192 mend = mstart + image->segment[i].memsz;
193 for (j = 0; j < i; j++) {
194 unsigned long pstart, pend;
195
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198
199 if ((mend > pstart) && (mstart < pend))
200 return -EINVAL;
201 }
202 }
203
204
205
206
207
208
209 for (i = 0; i < nr_segments; i++) {
210 if (image->segment[i].bufsz > image->segment[i].memsz)
211 return -EINVAL;
212 }
213
214
215
216
217
218
219 for (i = 0; i < nr_segments; i++) {
220 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
221 return -EINVAL;
222
223 total_pages += PAGE_COUNT(image->segment[i].memsz);
224 }
225
226 if (total_pages > nr_pages / 2)
227 return -EINVAL;
228
229
230
231
232
233
234
235
236
237
238
239 if (image->type == KEXEC_TYPE_CRASH) {
240 for (i = 0; i < nr_segments; i++) {
241 unsigned long mstart, mend;
242
243 mstart = image->segment[i].mem;
244 mend = mstart + image->segment[i].memsz - 1;
245
246 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
247 (mend > phys_to_boot_phys(crashk_res.end)))
248 return -EADDRNOTAVAIL;
249 }
250 }
251
252 return 0;
253}
254
255struct kimage *do_kimage_alloc_init(void)
256{
257 struct kimage *image;
258
259
260 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 if (!image)
262 return NULL;
263
264 image->head = 0;
265 image->entry = &image->head;
266 image->last_entry = &image->head;
267 image->control_page = ~0;
268 image->type = KEXEC_TYPE_DEFAULT;
269
270
271 INIT_LIST_HEAD(&image->control_pages);
272
273
274 INIT_LIST_HEAD(&image->dest_pages);
275
276
277 INIT_LIST_HEAD(&image->unusable_pages);
278
279 return image;
280}
281
282int kimage_is_destination_range(struct kimage *image,
283 unsigned long start,
284 unsigned long end)
285{
286 unsigned long i;
287
288 for (i = 0; i < image->nr_segments; i++) {
289 unsigned long mstart, mend;
290
291 mstart = image->segment[i].mem;
292 mend = mstart + image->segment[i].memsz;
293 if ((end > mstart) && (start < mend))
294 return 1;
295 }
296
297 return 0;
298}
299
300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
301{
302 struct page *pages;
303
304 if (fatal_signal_pending(current))
305 return NULL;
306 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
307 if (pages) {
308 unsigned int count, i;
309
310 pages->mapping = NULL;
311 set_page_private(pages, order);
312 count = 1 << order;
313 for (i = 0; i < count; i++)
314 SetPageReserved(pages + i);
315
316 arch_kexec_post_alloc_pages(page_address(pages), count,
317 gfp_mask);
318
319 if (gfp_mask & __GFP_ZERO)
320 for (i = 0; i < count; i++)
321 clear_highpage(pages + i);
322 }
323
324 return pages;
325}
326
327static void kimage_free_pages(struct page *page)
328{
329 unsigned int order, count, i;
330
331 order = page_private(page);
332 count = 1 << order;
333
334 arch_kexec_pre_free_pages(page_address(page), count);
335
336 for (i = 0; i < count; i++)
337 ClearPageReserved(page + i);
338 __free_pages(page, order);
339}
340
341void kimage_free_page_list(struct list_head *list)
342{
343 struct page *page, *next;
344
345 list_for_each_entry_safe(page, next, list, lru) {
346 list_del(&page->lru);
347 kimage_free_pages(page);
348 }
349}
350
351static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
352 unsigned int order)
353{
354
355
356
357
358
359
360
361
362
363
364
365
366
367 struct list_head extra_pages;
368 struct page *pages;
369 unsigned int count;
370
371 count = 1 << order;
372 INIT_LIST_HEAD(&extra_pages);
373
374
375
376
377 do {
378 unsigned long pfn, epfn, addr, eaddr;
379
380 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
381 if (!pages)
382 break;
383 pfn = page_to_boot_pfn(pages);
384 epfn = pfn + count;
385 addr = pfn << PAGE_SHIFT;
386 eaddr = epfn << PAGE_SHIFT;
387 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
388 kimage_is_destination_range(image, addr, eaddr)) {
389 list_add(&pages->lru, &extra_pages);
390 pages = NULL;
391 }
392 } while (!pages);
393
394 if (pages) {
395
396 list_add(&pages->lru, &image->control_pages);
397
398
399
400
401
402
403
404 }
405
406
407
408
409
410
411
412 kimage_free_page_list(&extra_pages);
413
414 return pages;
415}
416
417static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
418 unsigned int order)
419{
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441 unsigned long hole_start, hole_end, size;
442 struct page *pages;
443
444 pages = NULL;
445 size = (1 << order) << PAGE_SHIFT;
446 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
447 hole_end = hole_start + size - 1;
448 while (hole_end <= crashk_res.end) {
449 unsigned long i;
450
451 cond_resched();
452
453 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
454 break;
455
456 for (i = 0; i < image->nr_segments; i++) {
457 unsigned long mstart, mend;
458
459 mstart = image->segment[i].mem;
460 mend = mstart + image->segment[i].memsz - 1;
461 if ((hole_end >= mstart) && (hole_start <= mend)) {
462
463 hole_start = (mend + (size - 1)) & ~(size - 1);
464 hole_end = hole_start + size - 1;
465 break;
466 }
467 }
468
469 if (i == image->nr_segments) {
470 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
471 image->control_page = hole_end;
472 break;
473 }
474 }
475
476
477 if (pages)
478 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
479
480 return pages;
481}
482
483
484struct page *kimage_alloc_control_pages(struct kimage *image,
485 unsigned int order)
486{
487 struct page *pages = NULL;
488
489 switch (image->type) {
490 case KEXEC_TYPE_DEFAULT:
491 pages = kimage_alloc_normal_control_pages(image, order);
492 break;
493 case KEXEC_TYPE_CRASH:
494 pages = kimage_alloc_crash_control_pages(image, order);
495 break;
496 }
497
498 return pages;
499}
500
501int kimage_crash_copy_vmcoreinfo(struct kimage *image)
502{
503 struct page *vmcoreinfo_page;
504 void *safecopy;
505
506 if (image->type != KEXEC_TYPE_CRASH)
507 return 0;
508
509
510
511
512
513
514
515
516
517
518 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
519 if (!vmcoreinfo_page) {
520 pr_warn("Could not allocate vmcoreinfo buffer\n");
521 return -ENOMEM;
522 }
523 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
524 if (!safecopy) {
525 pr_warn("Could not vmap vmcoreinfo buffer\n");
526 return -ENOMEM;
527 }
528
529 image->vmcoreinfo_data_copy = safecopy;
530 crash_update_vmcoreinfo_safecopy(safecopy);
531
532 return 0;
533}
534
535static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
536{
537 if (*image->entry != 0)
538 image->entry++;
539
540 if (image->entry == image->last_entry) {
541 kimage_entry_t *ind_page;
542 struct page *page;
543
544 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
545 if (!page)
546 return -ENOMEM;
547
548 ind_page = page_address(page);
549 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
550 image->entry = ind_page;
551 image->last_entry = ind_page +
552 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
553 }
554 *image->entry = entry;
555 image->entry++;
556 *image->entry = 0;
557
558 return 0;
559}
560
561static int kimage_set_destination(struct kimage *image,
562 unsigned long destination)
563{
564 int result;
565
566 destination &= PAGE_MASK;
567 result = kimage_add_entry(image, destination | IND_DESTINATION);
568
569 return result;
570}
571
572
573static int kimage_add_page(struct kimage *image, unsigned long page)
574{
575 int result;
576
577 page &= PAGE_MASK;
578 result = kimage_add_entry(image, page | IND_SOURCE);
579
580 return result;
581}
582
583
584static void kimage_free_extra_pages(struct kimage *image)
585{
586
587 kimage_free_page_list(&image->dest_pages);
588
589
590 kimage_free_page_list(&image->unusable_pages);
591
592}
593
594int __weak machine_kexec_post_load(struct kimage *image)
595{
596 return 0;
597}
598
599void kimage_terminate(struct kimage *image)
600{
601 if (*image->entry != 0)
602 image->entry++;
603
604 *image->entry = IND_DONE;
605}
606
607#define for_each_kimage_entry(image, ptr, entry) \
608 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
609 ptr = (entry & IND_INDIRECTION) ? \
610 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
611
612static void kimage_free_entry(kimage_entry_t entry)
613{
614 struct page *page;
615
616 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
617 kimage_free_pages(page);
618}
619
620void kimage_free(struct kimage *image)
621{
622 kimage_entry_t *ptr, entry;
623 kimage_entry_t ind = 0;
624
625 if (!image)
626 return;
627
628 if (image->vmcoreinfo_data_copy) {
629 crash_update_vmcoreinfo_safecopy(NULL);
630 vunmap(image->vmcoreinfo_data_copy);
631 }
632
633 kimage_free_extra_pages(image);
634 for_each_kimage_entry(image, ptr, entry) {
635 if (entry & IND_INDIRECTION) {
636
637 if (ind & IND_INDIRECTION)
638 kimage_free_entry(ind);
639
640
641
642 ind = entry;
643 } else if (entry & IND_SOURCE)
644 kimage_free_entry(entry);
645 }
646
647 if (ind & IND_INDIRECTION)
648 kimage_free_entry(ind);
649
650
651 machine_kexec_cleanup(image);
652
653
654 kimage_free_page_list(&image->control_pages);
655
656
657
658
659
660 if (image->file_mode)
661 kimage_file_post_load_cleanup(image);
662
663 kfree(image);
664}
665
666static kimage_entry_t *kimage_dst_used(struct kimage *image,
667 unsigned long page)
668{
669 kimage_entry_t *ptr, entry;
670 unsigned long destination = 0;
671
672 for_each_kimage_entry(image, ptr, entry) {
673 if (entry & IND_DESTINATION)
674 destination = entry & PAGE_MASK;
675 else if (entry & IND_SOURCE) {
676 if (page == destination)
677 return ptr;
678 destination += PAGE_SIZE;
679 }
680 }
681
682 return NULL;
683}
684
685static struct page *kimage_alloc_page(struct kimage *image,
686 gfp_t gfp_mask,
687 unsigned long destination)
688{
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707 struct page *page;
708 unsigned long addr;
709
710
711
712
713
714 list_for_each_entry(page, &image->dest_pages, lru) {
715 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
716 if (addr == destination) {
717 list_del(&page->lru);
718 return page;
719 }
720 }
721 page = NULL;
722 while (1) {
723 kimage_entry_t *old;
724
725
726 page = kimage_alloc_pages(gfp_mask, 0);
727 if (!page)
728 return NULL;
729
730 if (page_to_boot_pfn(page) >
731 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
732 list_add(&page->lru, &image->unusable_pages);
733 continue;
734 }
735 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
736
737
738 if (addr == destination)
739 break;
740
741
742 if (!kimage_is_destination_range(image, addr,
743 addr + PAGE_SIZE))
744 break;
745
746
747
748
749
750
751 old = kimage_dst_used(image, addr);
752 if (old) {
753
754 unsigned long old_addr;
755 struct page *old_page;
756
757 old_addr = *old & PAGE_MASK;
758 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
759 copy_highpage(page, old_page);
760 *old = addr | (*old & ~PAGE_MASK);
761
762
763
764
765
766 if (!(gfp_mask & __GFP_HIGHMEM) &&
767 PageHighMem(old_page)) {
768 kimage_free_pages(old_page);
769 continue;
770 }
771 addr = old_addr;
772 page = old_page;
773 break;
774 }
775
776 list_add(&page->lru, &image->dest_pages);
777 }
778
779 return page;
780}
781
782static int kimage_load_normal_segment(struct kimage *image,
783 struct kexec_segment *segment)
784{
785 unsigned long maddr;
786 size_t ubytes, mbytes;
787 int result;
788 unsigned char __user *buf = NULL;
789 unsigned char *kbuf = NULL;
790
791 result = 0;
792 if (image->file_mode)
793 kbuf = segment->kbuf;
794 else
795 buf = segment->buf;
796 ubytes = segment->bufsz;
797 mbytes = segment->memsz;
798 maddr = segment->mem;
799
800 result = kimage_set_destination(image, maddr);
801 if (result < 0)
802 goto out;
803
804 while (mbytes) {
805 struct page *page;
806 char *ptr;
807 size_t uchunk, mchunk;
808
809 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
810 if (!page) {
811 result = -ENOMEM;
812 goto out;
813 }
814 result = kimage_add_page(image, page_to_boot_pfn(page)
815 << PAGE_SHIFT);
816 if (result < 0)
817 goto out;
818
819 ptr = kmap(page);
820
821 clear_page(ptr);
822 ptr += maddr & ~PAGE_MASK;
823 mchunk = min_t(size_t, mbytes,
824 PAGE_SIZE - (maddr & ~PAGE_MASK));
825 uchunk = min(ubytes, mchunk);
826
827
828 if (image->file_mode)
829 memcpy(ptr, kbuf, uchunk);
830 else
831 result = copy_from_user(ptr, buf, uchunk);
832 kunmap(page);
833 if (result) {
834 result = -EFAULT;
835 goto out;
836 }
837 ubytes -= uchunk;
838 maddr += mchunk;
839 if (image->file_mode)
840 kbuf += mchunk;
841 else
842 buf += mchunk;
843 mbytes -= mchunk;
844
845 cond_resched();
846 }
847out:
848 return result;
849}
850
851static int kimage_load_crash_segment(struct kimage *image,
852 struct kexec_segment *segment)
853{
854
855
856
857
858 unsigned long maddr;
859 size_t ubytes, mbytes;
860 int result;
861 unsigned char __user *buf = NULL;
862 unsigned char *kbuf = NULL;
863
864 result = 0;
865 if (image->file_mode)
866 kbuf = segment->kbuf;
867 else
868 buf = segment->buf;
869 ubytes = segment->bufsz;
870 mbytes = segment->memsz;
871 maddr = segment->mem;
872 while (mbytes) {
873 struct page *page;
874 char *ptr;
875 size_t uchunk, mchunk;
876
877 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
878 if (!page) {
879 result = -ENOMEM;
880 goto out;
881 }
882 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
883 ptr = kmap(page);
884 ptr += maddr & ~PAGE_MASK;
885 mchunk = min_t(size_t, mbytes,
886 PAGE_SIZE - (maddr & ~PAGE_MASK));
887 uchunk = min(ubytes, mchunk);
888 if (mchunk > uchunk) {
889
890 memset(ptr + uchunk, 0, mchunk - uchunk);
891 }
892
893
894 if (image->file_mode)
895 memcpy(ptr, kbuf, uchunk);
896 else
897 result = copy_from_user(ptr, buf, uchunk);
898 kexec_flush_icache_page(page);
899 kunmap(page);
900 arch_kexec_pre_free_pages(page_address(page), 1);
901 if (result) {
902 result = -EFAULT;
903 goto out;
904 }
905 ubytes -= uchunk;
906 maddr += mchunk;
907 if (image->file_mode)
908 kbuf += mchunk;
909 else
910 buf += mchunk;
911 mbytes -= mchunk;
912
913 cond_resched();
914 }
915out:
916 return result;
917}
918
919int kimage_load_segment(struct kimage *image,
920 struct kexec_segment *segment)
921{
922 int result = -ENOMEM;
923
924 switch (image->type) {
925 case KEXEC_TYPE_DEFAULT:
926 result = kimage_load_normal_segment(image, segment);
927 break;
928 case KEXEC_TYPE_CRASH:
929 result = kimage_load_crash_segment(image, segment);
930 break;
931 }
932
933 return result;
934}
935
936struct kimage *kexec_image;
937struct kimage *kexec_crash_image;
938int kexec_load_disabled;
939
940
941
942
943
944
945void __noclone __crash_kexec(struct pt_regs *regs)
946{
947
948
949
950
951
952
953
954
955 if (mutex_trylock(&kexec_mutex)) {
956 if (kexec_crash_image) {
957 struct pt_regs fixed_regs;
958
959 crash_setup_regs(&fixed_regs, regs);
960 crash_save_vmcoreinfo();
961 machine_crash_shutdown(&fixed_regs);
962 machine_kexec(kexec_crash_image);
963 }
964 mutex_unlock(&kexec_mutex);
965 }
966}
967STACK_FRAME_NON_STANDARD(__crash_kexec);
968
969void crash_kexec(struct pt_regs *regs)
970{
971 int old_cpu, this_cpu;
972
973
974
975
976
977
978 this_cpu = raw_smp_processor_id();
979 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
980 if (old_cpu == PANIC_CPU_INVALID) {
981
982 __crash_kexec(regs);
983
984
985
986
987
988 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
989 }
990}
991
992size_t crash_get_memory_size(void)
993{
994 size_t size = 0;
995
996 mutex_lock(&kexec_mutex);
997 if (crashk_res.end != crashk_res.start)
998 size = resource_size(&crashk_res);
999 mutex_unlock(&kexec_mutex);
1000 return size;
1001}
1002
1003void __weak crash_free_reserved_phys_range(unsigned long begin,
1004 unsigned long end)
1005{
1006 unsigned long addr;
1007
1008 for (addr = begin; addr < end; addr += PAGE_SIZE)
1009 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1010}
1011
1012int crash_shrink_memory(unsigned long new_size)
1013{
1014 int ret = 0;
1015 unsigned long start, end;
1016 unsigned long old_size;
1017 struct resource *ram_res;
1018
1019 mutex_lock(&kexec_mutex);
1020
1021 if (kexec_crash_image) {
1022 ret = -ENOENT;
1023 goto unlock;
1024 }
1025 start = crashk_res.start;
1026 end = crashk_res.end;
1027 old_size = (end == 0) ? 0 : end - start + 1;
1028 if (new_size >= old_size) {
1029 ret = (new_size == old_size) ? 0 : -EINVAL;
1030 goto unlock;
1031 }
1032
1033 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1034 if (!ram_res) {
1035 ret = -ENOMEM;
1036 goto unlock;
1037 }
1038
1039 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1040 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1041
1042 crash_free_reserved_phys_range(end, crashk_res.end);
1043
1044 if ((start == end) && (crashk_res.parent != NULL))
1045 release_resource(&crashk_res);
1046
1047 ram_res->start = end;
1048 ram_res->end = crashk_res.end;
1049 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1050 ram_res->name = "System RAM";
1051
1052 crashk_res.end = end - 1;
1053
1054 insert_resource(&iomem_resource, ram_res);
1055
1056unlock:
1057 mutex_unlock(&kexec_mutex);
1058 return ret;
1059}
1060
1061void crash_save_cpu(struct pt_regs *regs, int cpu)
1062{
1063 struct elf_prstatus prstatus;
1064 u32 *buf;
1065
1066 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1067 return;
1068
1069
1070
1071
1072
1073
1074
1075
1076 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1077 if (!buf)
1078 return;
1079 memset(&prstatus, 0, sizeof(prstatus));
1080 prstatus.common.pr_pid = current->pid;
1081 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1082 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1083 &prstatus, sizeof(prstatus));
1084 final_note(buf);
1085}
1086
1087static int __init crash_notes_memory_init(void)
1088{
1089
1090 size_t size, align;
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 size = sizeof(note_buf_t);
1103 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1104
1105
1106
1107
1108
1109 BUILD_BUG_ON(size > PAGE_SIZE);
1110
1111 crash_notes = __alloc_percpu(size, align);
1112 if (!crash_notes) {
1113 pr_warn("Memory allocation for saving cpu register states failed\n");
1114 return -ENOMEM;
1115 }
1116 return 0;
1117}
1118subsys_initcall(crash_notes_memory_init);
1119
1120
1121
1122
1123
1124
1125int kernel_kexec(void)
1126{
1127 int error = 0;
1128
1129 if (!mutex_trylock(&kexec_mutex))
1130 return -EBUSY;
1131 if (!kexec_image) {
1132 error = -EINVAL;
1133 goto Unlock;
1134 }
1135
1136#ifdef CONFIG_KEXEC_JUMP
1137 if (kexec_image->preserve_context) {
1138 pm_prepare_console();
1139 error = freeze_processes();
1140 if (error) {
1141 error = -EBUSY;
1142 goto Restore_console;
1143 }
1144 suspend_console();
1145 error = dpm_suspend_start(PMSG_FREEZE);
1146 if (error)
1147 goto Resume_console;
1148
1149
1150
1151
1152
1153
1154
1155 error = dpm_suspend_end(PMSG_FREEZE);
1156 if (error)
1157 goto Resume_devices;
1158 error = suspend_disable_secondary_cpus();
1159 if (error)
1160 goto Enable_cpus;
1161 local_irq_disable();
1162 error = syscore_suspend();
1163 if (error)
1164 goto Enable_irqs;
1165 } else
1166#endif
1167 {
1168 kexec_in_progress = true;
1169 kernel_restart_prepare("kexec reboot");
1170 migrate_to_reboot_cpu();
1171
1172
1173
1174
1175
1176
1177
1178 cpu_hotplug_enable();
1179 pr_notice("Starting new kernel\n");
1180 machine_shutdown();
1181 }
1182
1183 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1184 machine_kexec(kexec_image);
1185
1186#ifdef CONFIG_KEXEC_JUMP
1187 if (kexec_image->preserve_context) {
1188 syscore_resume();
1189 Enable_irqs:
1190 local_irq_enable();
1191 Enable_cpus:
1192 suspend_enable_secondary_cpus();
1193 dpm_resume_start(PMSG_RESTORE);
1194 Resume_devices:
1195 dpm_resume_end(PMSG_RESTORE);
1196 Resume_console:
1197 resume_console();
1198 thaw_processes();
1199 Restore_console:
1200 pm_restore_console();
1201 }
1202#endif
1203
1204 Unlock:
1205 mutex_unlock(&kexec_mutex);
1206 return error;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216void __weak arch_kexec_protect_crashkres(void)
1217{}
1218
1219void __weak arch_kexec_unprotect_crashkres(void)
1220{}
1221