1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/capability.h>
12#include <linux/mm.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/kexec.h>
17#include <linux/mutex.h>
18#include <linux/list.h>
19#include <linux/highmem.h>
20#include <linux/syscalls.h>
21#include <linux/reboot.h>
22#include <linux/ioport.h>
23#include <linux/hardirq.h>
24#include <linux/elf.h>
25#include <linux/elfcore.h>
26#include <linux/utsname.h>
27#include <linux/numa.h>
28#include <linux/suspend.h>
29#include <linux/device.h>
30#include <linux/freezer.h>
31#include <linux/pm.h>
32#include <linux/cpu.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/console.h>
36#include <linux/vmalloc.h>
37#include <linux/swap.h>
38#include <linux/syscore_ops.h>
39#include <linux/compiler.h>
40#include <linux/hugetlb.h>
41#include <linux/frame.h>
42
43#include <asm/page.h>
44#include <asm/sections.h>
45
46#include <crypto/hash.h>
47#include <crypto/sha.h>
48#include "kexec_internal.h"
49
50DEFINE_MUTEX(kexec_mutex);
51
52
53note_buf_t __percpu *crash_notes;
54
55
56bool kexec_in_progress = false;
57
58
59
60struct resource crashk_res = {
61 .name = "Crash kernel",
62 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
65 .desc = IORES_DESC_CRASH_KERNEL
66};
67struct resource crashk_low_res = {
68 .name = "Crash kernel",
69 .start = 0,
70 .end = 0,
71 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
72 .desc = IORES_DESC_CRASH_KERNEL
73};
74
75int kexec_should_crash(struct task_struct *p)
76{
77
78
79
80
81
82 if (crash_kexec_post_notifiers)
83 return 0;
84
85
86
87
88 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
89 return 1;
90 return 0;
91}
92
93int kexec_crash_loaded(void)
94{
95 return !!kexec_crash_image;
96}
97EXPORT_SYMBOL_GPL(kexec_crash_loaded);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143#define KIMAGE_NO_DEST (-1UL)
144#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
145
146static struct page *kimage_alloc_page(struct kimage *image,
147 gfp_t gfp_mask,
148 unsigned long dest);
149
150int sanity_check_segment_list(struct kimage *image)
151{
152 int i;
153 unsigned long nr_segments = image->nr_segments;
154 unsigned long total_pages = 0;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 for (i = 0; i < nr_segments; i++) {
170 unsigned long mstart, mend;
171
172 mstart = image->segment[i].mem;
173 mend = mstart + image->segment[i].memsz;
174 if (mstart > mend)
175 return -EADDRNOTAVAIL;
176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177 return -EADDRNOTAVAIL;
178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179 return -EADDRNOTAVAIL;
180 }
181
182
183
184
185
186
187 for (i = 0; i < nr_segments; i++) {
188 unsigned long mstart, mend;
189 unsigned long j;
190
191 mstart = image->segment[i].mem;
192 mend = mstart + image->segment[i].memsz;
193 for (j = 0; j < i; j++) {
194 unsigned long pstart, pend;
195
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198
199 if ((mend > pstart) && (mstart < pend))
200 return -EINVAL;
201 }
202 }
203
204
205
206
207
208
209 for (i = 0; i < nr_segments; i++) {
210 if (image->segment[i].bufsz > image->segment[i].memsz)
211 return -EINVAL;
212 }
213
214
215
216
217
218
219 for (i = 0; i < nr_segments; i++) {
220 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
221 return -EINVAL;
222
223 total_pages += PAGE_COUNT(image->segment[i].memsz);
224 }
225
226 if (total_pages > totalram_pages / 2)
227 return -EINVAL;
228
229
230
231
232
233
234
235
236
237
238
239 if (image->type == KEXEC_TYPE_CRASH) {
240 for (i = 0; i < nr_segments; i++) {
241 unsigned long mstart, mend;
242
243 mstart = image->segment[i].mem;
244 mend = mstart + image->segment[i].memsz - 1;
245
246 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
247 (mend > phys_to_boot_phys(crashk_res.end)))
248 return -EADDRNOTAVAIL;
249 }
250 }
251
252 return 0;
253}
254
255struct kimage *do_kimage_alloc_init(void)
256{
257 struct kimage *image;
258
259
260 image = kzalloc(sizeof(*image), GFP_KERNEL);
261 if (!image)
262 return NULL;
263
264 image->head = 0;
265 image->entry = &image->head;
266 image->last_entry = &image->head;
267 image->control_page = ~0;
268 image->type = KEXEC_TYPE_DEFAULT;
269
270
271 INIT_LIST_HEAD(&image->control_pages);
272
273
274 INIT_LIST_HEAD(&image->dest_pages);
275
276
277 INIT_LIST_HEAD(&image->unusable_pages);
278
279 return image;
280}
281
282int kimage_is_destination_range(struct kimage *image,
283 unsigned long start,
284 unsigned long end)
285{
286 unsigned long i;
287
288 for (i = 0; i < image->nr_segments; i++) {
289 unsigned long mstart, mend;
290
291 mstart = image->segment[i].mem;
292 mend = mstart + image->segment[i].memsz;
293 if ((end > mstart) && (start < mend))
294 return 1;
295 }
296
297 return 0;
298}
299
300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
301{
302 struct page *pages;
303
304 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
305 if (pages) {
306 unsigned int count, i;
307
308 pages->mapping = NULL;
309 set_page_private(pages, order);
310 count = 1 << order;
311 for (i = 0; i < count; i++)
312 SetPageReserved(pages + i);
313
314 arch_kexec_post_alloc_pages(page_address(pages), count,
315 gfp_mask);
316
317 if (gfp_mask & __GFP_ZERO)
318 for (i = 0; i < count; i++)
319 clear_highpage(pages + i);
320 }
321
322 return pages;
323}
324
325static void kimage_free_pages(struct page *page)
326{
327 unsigned int order, count, i;
328
329 order = page_private(page);
330 count = 1 << order;
331
332 arch_kexec_pre_free_pages(page_address(page), count);
333
334 for (i = 0; i < count; i++)
335 ClearPageReserved(page + i);
336 __free_pages(page, order);
337}
338
339void kimage_free_page_list(struct list_head *list)
340{
341 struct page *page, *next;
342
343 list_for_each_entry_safe(page, next, list, lru) {
344 list_del(&page->lru);
345 kimage_free_pages(page);
346 }
347}
348
349static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
350 unsigned int order)
351{
352
353
354
355
356
357
358
359
360
361
362
363
364
365 struct list_head extra_pages;
366 struct page *pages;
367 unsigned int count;
368
369 count = 1 << order;
370 INIT_LIST_HEAD(&extra_pages);
371
372
373
374
375 do {
376 unsigned long pfn, epfn, addr, eaddr;
377
378 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
379 if (!pages)
380 break;
381 pfn = page_to_boot_pfn(pages);
382 epfn = pfn + count;
383 addr = pfn << PAGE_SHIFT;
384 eaddr = epfn << PAGE_SHIFT;
385 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
386 kimage_is_destination_range(image, addr, eaddr)) {
387 list_add(&pages->lru, &extra_pages);
388 pages = NULL;
389 }
390 } while (!pages);
391
392 if (pages) {
393
394 list_add(&pages->lru, &image->control_pages);
395
396
397
398
399
400
401
402 }
403
404
405
406
407
408
409
410 kimage_free_page_list(&extra_pages);
411
412 return pages;
413}
414
415static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
416 unsigned int order)
417{
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 unsigned long hole_start, hole_end, size;
440 struct page *pages;
441
442 pages = NULL;
443 size = (1 << order) << PAGE_SHIFT;
444 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
445 hole_end = hole_start + size - 1;
446 while (hole_end <= crashk_res.end) {
447 unsigned long i;
448
449 cond_resched();
450
451 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
452 break;
453
454 for (i = 0; i < image->nr_segments; i++) {
455 unsigned long mstart, mend;
456
457 mstart = image->segment[i].mem;
458 mend = mstart + image->segment[i].memsz - 1;
459 if ((hole_end >= mstart) && (hole_start <= mend)) {
460
461 hole_start = (mend + (size - 1)) & ~(size - 1);
462 hole_end = hole_start + size - 1;
463 break;
464 }
465 }
466
467 if (i == image->nr_segments) {
468 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
469 image->control_page = hole_end;
470 break;
471 }
472 }
473
474 return pages;
475}
476
477
478struct page *kimage_alloc_control_pages(struct kimage *image,
479 unsigned int order)
480{
481 struct page *pages = NULL;
482
483 switch (image->type) {
484 case KEXEC_TYPE_DEFAULT:
485 pages = kimage_alloc_normal_control_pages(image, order);
486 break;
487 case KEXEC_TYPE_CRASH:
488 pages = kimage_alloc_crash_control_pages(image, order);
489 break;
490 }
491
492 return pages;
493}
494
495int kimage_crash_copy_vmcoreinfo(struct kimage *image)
496{
497 struct page *vmcoreinfo_page;
498 void *safecopy;
499
500 if (image->type != KEXEC_TYPE_CRASH)
501 return 0;
502
503
504
505
506
507
508
509
510
511
512 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
513 if (!vmcoreinfo_page) {
514 pr_warn("Could not allocate vmcoreinfo buffer\n");
515 return -ENOMEM;
516 }
517 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
518 if (!safecopy) {
519 pr_warn("Could not vmap vmcoreinfo buffer\n");
520 return -ENOMEM;
521 }
522
523 image->vmcoreinfo_data_copy = safecopy;
524 crash_update_vmcoreinfo_safecopy(safecopy);
525
526 return 0;
527}
528
529static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
530{
531 if (*image->entry != 0)
532 image->entry++;
533
534 if (image->entry == image->last_entry) {
535 kimage_entry_t *ind_page;
536 struct page *page;
537
538 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
539 if (!page)
540 return -ENOMEM;
541
542 ind_page = page_address(page);
543 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
544 image->entry = ind_page;
545 image->last_entry = ind_page +
546 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
547 }
548 *image->entry = entry;
549 image->entry++;
550 *image->entry = 0;
551
552 return 0;
553}
554
555static int kimage_set_destination(struct kimage *image,
556 unsigned long destination)
557{
558 int result;
559
560 destination &= PAGE_MASK;
561 result = kimage_add_entry(image, destination | IND_DESTINATION);
562
563 return result;
564}
565
566
567static int kimage_add_page(struct kimage *image, unsigned long page)
568{
569 int result;
570
571 page &= PAGE_MASK;
572 result = kimage_add_entry(image, page | IND_SOURCE);
573
574 return result;
575}
576
577
578static void kimage_free_extra_pages(struct kimage *image)
579{
580
581 kimage_free_page_list(&image->dest_pages);
582
583
584 kimage_free_page_list(&image->unusable_pages);
585
586}
587void kimage_terminate(struct kimage *image)
588{
589 if (*image->entry != 0)
590 image->entry++;
591
592 *image->entry = IND_DONE;
593}
594
595#define for_each_kimage_entry(image, ptr, entry) \
596 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
597 ptr = (entry & IND_INDIRECTION) ? \
598 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
599
600static void kimage_free_entry(kimage_entry_t entry)
601{
602 struct page *page;
603
604 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
605 kimage_free_pages(page);
606}
607
608void kimage_free(struct kimage *image)
609{
610 kimage_entry_t *ptr, entry;
611 kimage_entry_t ind = 0;
612
613 if (!image)
614 return;
615
616 if (image->vmcoreinfo_data_copy) {
617 crash_update_vmcoreinfo_safecopy(NULL);
618 vunmap(image->vmcoreinfo_data_copy);
619 }
620
621 kimage_free_extra_pages(image);
622 for_each_kimage_entry(image, ptr, entry) {
623 if (entry & IND_INDIRECTION) {
624
625 if (ind & IND_INDIRECTION)
626 kimage_free_entry(ind);
627
628
629
630 ind = entry;
631 } else if (entry & IND_SOURCE)
632 kimage_free_entry(entry);
633 }
634
635 if (ind & IND_INDIRECTION)
636 kimage_free_entry(ind);
637
638
639 machine_kexec_cleanup(image);
640
641
642 kimage_free_page_list(&image->control_pages);
643
644
645
646
647
648 if (image->file_mode)
649 kimage_file_post_load_cleanup(image);
650
651 kfree(image);
652}
653
654static kimage_entry_t *kimage_dst_used(struct kimage *image,
655 unsigned long page)
656{
657 kimage_entry_t *ptr, entry;
658 unsigned long destination = 0;
659
660 for_each_kimage_entry(image, ptr, entry) {
661 if (entry & IND_DESTINATION)
662 destination = entry & PAGE_MASK;
663 else if (entry & IND_SOURCE) {
664 if (page == destination)
665 return ptr;
666 destination += PAGE_SIZE;
667 }
668 }
669
670 return NULL;
671}
672
673static struct page *kimage_alloc_page(struct kimage *image,
674 gfp_t gfp_mask,
675 unsigned long destination)
676{
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 struct page *page;
696 unsigned long addr;
697
698
699
700
701
702 list_for_each_entry(page, &image->dest_pages, lru) {
703 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
704 if (addr == destination) {
705 list_del(&page->lru);
706 return page;
707 }
708 }
709 page = NULL;
710 while (1) {
711 kimage_entry_t *old;
712
713
714 page = kimage_alloc_pages(gfp_mask, 0);
715 if (!page)
716 return NULL;
717
718 if (page_to_boot_pfn(page) >
719 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
720 list_add(&page->lru, &image->unusable_pages);
721 continue;
722 }
723 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
724
725
726 if (addr == destination)
727 break;
728
729
730 if (!kimage_is_destination_range(image, addr,
731 addr + PAGE_SIZE))
732 break;
733
734
735
736
737
738
739 old = kimage_dst_used(image, addr);
740 if (old) {
741
742 unsigned long old_addr;
743 struct page *old_page;
744
745 old_addr = *old & PAGE_MASK;
746 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
747 copy_highpage(page, old_page);
748 *old = addr | (*old & ~PAGE_MASK);
749
750
751
752
753
754 if (!(gfp_mask & __GFP_HIGHMEM) &&
755 PageHighMem(old_page)) {
756 kimage_free_pages(old_page);
757 continue;
758 }
759 addr = old_addr;
760 page = old_page;
761 break;
762 }
763
764 list_add(&page->lru, &image->dest_pages);
765 }
766
767 return page;
768}
769
770static int kimage_load_normal_segment(struct kimage *image,
771 struct kexec_segment *segment)
772{
773 unsigned long maddr;
774 size_t ubytes, mbytes;
775 int result;
776 unsigned char __user *buf = NULL;
777 unsigned char *kbuf = NULL;
778
779 result = 0;
780 if (image->file_mode)
781 kbuf = segment->kbuf;
782 else
783 buf = segment->buf;
784 ubytes = segment->bufsz;
785 mbytes = segment->memsz;
786 maddr = segment->mem;
787
788 result = kimage_set_destination(image, maddr);
789 if (result < 0)
790 goto out;
791
792 while (mbytes) {
793 struct page *page;
794 char *ptr;
795 size_t uchunk, mchunk;
796
797 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
798 if (!page) {
799 result = -ENOMEM;
800 goto out;
801 }
802 result = kimage_add_page(image, page_to_boot_pfn(page)
803 << PAGE_SHIFT);
804 if (result < 0)
805 goto out;
806
807 ptr = kmap(page);
808
809 clear_page(ptr);
810 ptr += maddr & ~PAGE_MASK;
811 mchunk = min_t(size_t, mbytes,
812 PAGE_SIZE - (maddr & ~PAGE_MASK));
813 uchunk = min(ubytes, mchunk);
814
815
816 if (image->file_mode)
817 memcpy(ptr, kbuf, uchunk);
818 else
819 result = copy_from_user(ptr, buf, uchunk);
820 kunmap(page);
821 if (result) {
822 result = -EFAULT;
823 goto out;
824 }
825 ubytes -= uchunk;
826 maddr += mchunk;
827 if (image->file_mode)
828 kbuf += mchunk;
829 else
830 buf += mchunk;
831 mbytes -= mchunk;
832 }
833out:
834 return result;
835}
836
837static int kimage_load_crash_segment(struct kimage *image,
838 struct kexec_segment *segment)
839{
840
841
842
843
844 unsigned long maddr;
845 size_t ubytes, mbytes;
846 int result;
847 unsigned char __user *buf = NULL;
848 unsigned char *kbuf = NULL;
849
850 result = 0;
851 if (image->file_mode)
852 kbuf = segment->kbuf;
853 else
854 buf = segment->buf;
855 ubytes = segment->bufsz;
856 mbytes = segment->memsz;
857 maddr = segment->mem;
858 while (mbytes) {
859 struct page *page;
860 char *ptr;
861 size_t uchunk, mchunk;
862
863 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
864 if (!page) {
865 result = -ENOMEM;
866 goto out;
867 }
868 ptr = kmap(page);
869 ptr += maddr & ~PAGE_MASK;
870 mchunk = min_t(size_t, mbytes,
871 PAGE_SIZE - (maddr & ~PAGE_MASK));
872 uchunk = min(ubytes, mchunk);
873 if (mchunk > uchunk) {
874
875 memset(ptr + uchunk, 0, mchunk - uchunk);
876 }
877
878
879 if (image->file_mode)
880 memcpy(ptr, kbuf, uchunk);
881 else
882 result = copy_from_user(ptr, buf, uchunk);
883 kexec_flush_icache_page(page);
884 kunmap(page);
885 if (result) {
886 result = -EFAULT;
887 goto out;
888 }
889 ubytes -= uchunk;
890 maddr += mchunk;
891 if (image->file_mode)
892 kbuf += mchunk;
893 else
894 buf += mchunk;
895 mbytes -= mchunk;
896 }
897out:
898 return result;
899}
900
901int kimage_load_segment(struct kimage *image,
902 struct kexec_segment *segment)
903{
904 int result = -ENOMEM;
905
906 switch (image->type) {
907 case KEXEC_TYPE_DEFAULT:
908 result = kimage_load_normal_segment(image, segment);
909 break;
910 case KEXEC_TYPE_CRASH:
911 result = kimage_load_crash_segment(image, segment);
912 break;
913 }
914
915 return result;
916}
917
918struct kimage *kexec_image;
919struct kimage *kexec_crash_image;
920int kexec_load_disabled;
921
922
923
924
925
926
927void __noclone __crash_kexec(struct pt_regs *regs)
928{
929
930
931
932
933
934
935
936
937 if (mutex_trylock(&kexec_mutex)) {
938 if (kexec_crash_image) {
939 struct pt_regs fixed_regs;
940
941 crash_setup_regs(&fixed_regs, regs);
942 crash_save_vmcoreinfo();
943 machine_crash_shutdown(&fixed_regs);
944 machine_kexec(kexec_crash_image);
945 }
946 mutex_unlock(&kexec_mutex);
947 }
948}
949STACK_FRAME_NON_STANDARD(__crash_kexec);
950
951void crash_kexec(struct pt_regs *regs)
952{
953 int old_cpu, this_cpu;
954
955
956
957
958
959
960 this_cpu = raw_smp_processor_id();
961 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
962 if (old_cpu == PANIC_CPU_INVALID) {
963
964 printk_safe_flush_on_panic();
965 __crash_kexec(regs);
966
967
968
969
970
971 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
972 }
973}
974
975size_t crash_get_memory_size(void)
976{
977 size_t size = 0;
978
979 mutex_lock(&kexec_mutex);
980 if (crashk_res.end != crashk_res.start)
981 size = resource_size(&crashk_res);
982 mutex_unlock(&kexec_mutex);
983 return size;
984}
985
986void __weak crash_free_reserved_phys_range(unsigned long begin,
987 unsigned long end)
988{
989 unsigned long addr;
990
991 for (addr = begin; addr < end; addr += PAGE_SIZE)
992 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
993}
994
995int crash_shrink_memory(unsigned long new_size)
996{
997 int ret = 0;
998 unsigned long start, end;
999 unsigned long old_size;
1000 struct resource *ram_res;
1001
1002 mutex_lock(&kexec_mutex);
1003
1004 if (kexec_crash_image) {
1005 ret = -ENOENT;
1006 goto unlock;
1007 }
1008 start = crashk_res.start;
1009 end = crashk_res.end;
1010 old_size = (end == 0) ? 0 : end - start + 1;
1011 if (new_size >= old_size) {
1012 ret = (new_size == old_size) ? 0 : -EINVAL;
1013 goto unlock;
1014 }
1015
1016 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1017 if (!ram_res) {
1018 ret = -ENOMEM;
1019 goto unlock;
1020 }
1021
1022 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1023 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1024
1025 crash_free_reserved_phys_range(end, crashk_res.end);
1026
1027 if ((start == end) && (crashk_res.parent != NULL))
1028 release_resource(&crashk_res);
1029
1030 ram_res->start = end;
1031 ram_res->end = crashk_res.end;
1032 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1033 ram_res->name = "System RAM";
1034
1035 crashk_res.end = end - 1;
1036
1037 insert_resource(&iomem_resource, ram_res);
1038
1039unlock:
1040 mutex_unlock(&kexec_mutex);
1041 return ret;
1042}
1043
1044void crash_save_cpu(struct pt_regs *regs, int cpu)
1045{
1046 struct elf_prstatus prstatus;
1047 u32 *buf;
1048
1049 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1050 return;
1051
1052
1053
1054
1055
1056
1057
1058
1059 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1060 if (!buf)
1061 return;
1062 memset(&prstatus, 0, sizeof(prstatus));
1063 prstatus.pr_pid = current->pid;
1064 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1065 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1066 &prstatus, sizeof(prstatus));
1067 final_note(buf);
1068}
1069
1070static int __init crash_notes_memory_init(void)
1071{
1072
1073 size_t size, align;
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 size = sizeof(note_buf_t);
1086 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1087
1088
1089
1090
1091
1092 BUILD_BUG_ON(size > PAGE_SIZE);
1093
1094 crash_notes = __alloc_percpu(size, align);
1095 if (!crash_notes) {
1096 pr_warn("Memory allocation for saving cpu register states failed\n");
1097 return -ENOMEM;
1098 }
1099 return 0;
1100}
1101subsys_initcall(crash_notes_memory_init);
1102
1103
1104
1105
1106
1107
1108int kernel_kexec(void)
1109{
1110 int error = 0;
1111
1112 if (!mutex_trylock(&kexec_mutex))
1113 return -EBUSY;
1114 if (!kexec_image) {
1115 error = -EINVAL;
1116 goto Unlock;
1117 }
1118
1119#ifdef CONFIG_KEXEC_JUMP
1120 if (kexec_image->preserve_context) {
1121 lock_system_sleep();
1122 pm_prepare_console();
1123 error = freeze_processes();
1124 if (error) {
1125 error = -EBUSY;
1126 goto Restore_console;
1127 }
1128 suspend_console();
1129 error = dpm_suspend_start(PMSG_FREEZE);
1130 if (error)
1131 goto Resume_console;
1132
1133
1134
1135
1136
1137
1138
1139 error = dpm_suspend_end(PMSG_FREEZE);
1140 if (error)
1141 goto Resume_devices;
1142 error = disable_nonboot_cpus();
1143 if (error)
1144 goto Enable_cpus;
1145 local_irq_disable();
1146 error = syscore_suspend();
1147 if (error)
1148 goto Enable_irqs;
1149 } else
1150#endif
1151 {
1152 kexec_in_progress = true;
1153 kernel_restart_prepare(NULL);
1154 migrate_to_reboot_cpu();
1155
1156
1157
1158
1159
1160
1161
1162 cpu_hotplug_enable();
1163 pr_emerg("Starting new kernel\n");
1164 machine_shutdown();
1165 }
1166
1167 machine_kexec(kexec_image);
1168
1169#ifdef CONFIG_KEXEC_JUMP
1170 if (kexec_image->preserve_context) {
1171 syscore_resume();
1172 Enable_irqs:
1173 local_irq_enable();
1174 Enable_cpus:
1175 enable_nonboot_cpus();
1176 dpm_resume_start(PMSG_RESTORE);
1177 Resume_devices:
1178 dpm_resume_end(PMSG_RESTORE);
1179 Resume_console:
1180 resume_console();
1181 thaw_processes();
1182 Restore_console:
1183 pm_restore_console();
1184 unlock_system_sleep();
1185 }
1186#endif
1187
1188 Unlock:
1189 mutex_unlock(&kexec_mutex);
1190 return error;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200void __weak arch_kexec_protect_crashkres(void)
1201{}
1202
1203void __weak arch_kexec_unprotect_crashkres(void)
1204{}
1205