1
2
3
4
5
6
7
8#include <linux/acpi.h>
9#include <linux/console.h>
10#include <linux/crash_dump.h>
11#include <linux/dma-map-ops.h>
12#include <linux/dmi.h>
13#include <linux/efi.h>
14#include <linux/init_ohci1394_dma.h>
15#include <linux/initrd.h>
16#include <linux/iscsi_ibft.h>
17#include <linux/memblock.h>
18#include <linux/panic_notifier.h>
19#include <linux/pci.h>
20#include <linux/root_dev.h>
21#include <linux/hugetlb.h>
22#include <linux/tboot.h>
23#include <linux/usb/xhci-dbgp.h>
24#include <linux/static_call.h>
25#include <linux/swiotlb.h>
26
27#include <uapi/linux/mount.h>
28
29#include <xen/xen.h>
30
31#include <asm/apic.h>
32#include <asm/numa.h>
33#include <asm/bios_ebda.h>
34#include <asm/bugs.h>
35#include <asm/cpu.h>
36#include <asm/efi.h>
37#include <asm/gart.h>
38#include <asm/hypervisor.h>
39#include <asm/io_apic.h>
40#include <asm/kasan.h>
41#include <asm/kaslr.h>
42#include <asm/mce.h>
43#include <asm/memtype.h>
44#include <asm/mtrr.h>
45#include <asm/realmode.h>
46#include <asm/olpc_ofw.h>
47#include <asm/pci-direct.h>
48#include <asm/prom.h>
49#include <asm/proto.h>
50#include <asm/thermal.h>
51#include <asm/unwind.h>
52#include <asm/vsyscall.h>
53#include <linux/vmalloc.h>
54
55
56
57
58
59
60
61
62unsigned long max_low_pfn_mapped;
63unsigned long max_pfn_mapped;
64
65#ifdef CONFIG_DMI
66RESERVE_BRK(dmi_alloc, 65536);
67#endif
68
69
70
71
72
73
74
75unsigned long _brk_start = (unsigned long)__brk_base;
76unsigned long _brk_end = (unsigned long)__brk_base;
77
78struct boot_params boot_params;
79
80
81
82
83
84
85
86static struct resource rodata_resource = {
87 .name = "Kernel rodata",
88 .start = 0,
89 .end = 0,
90 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
91};
92
93static struct resource data_resource = {
94 .name = "Kernel data",
95 .start = 0,
96 .end = 0,
97 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
98};
99
100static struct resource code_resource = {
101 .name = "Kernel code",
102 .start = 0,
103 .end = 0,
104 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
105};
106
107static struct resource bss_resource = {
108 .name = "Kernel bss",
109 .start = 0,
110 .end = 0,
111 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
112};
113
114
115#ifdef CONFIG_X86_32
116
117struct cpuinfo_x86 new_cpu_data;
118
119
120struct cpuinfo_x86 boot_cpu_data __read_mostly;
121EXPORT_SYMBOL(boot_cpu_data);
122
123unsigned int def_to_bigsmp;
124
125struct apm_info apm_info;
126EXPORT_SYMBOL(apm_info);
127
128#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
129 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
130struct ist_info ist_info;
131EXPORT_SYMBOL(ist_info);
132#else
133struct ist_info ist_info;
134#endif
135
136#else
137struct cpuinfo_x86 boot_cpu_data __read_mostly;
138EXPORT_SYMBOL(boot_cpu_data);
139#endif
140
141
142#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
143__visible unsigned long mmu_cr4_features __ro_after_init;
144#else
145__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
146#endif
147
148
149int bootloader_type, bootloader_version;
150
151
152
153
154struct screen_info screen_info;
155EXPORT_SYMBOL(screen_info);
156struct edid_info edid_info;
157EXPORT_SYMBOL_GPL(edid_info);
158
159extern int root_mountflags;
160
161unsigned long saved_video_mode;
162
163#define RAMDISK_IMAGE_START_MASK 0x07FF
164#define RAMDISK_PROMPT_FLAG 0x8000
165#define RAMDISK_LOAD_FLAG 0x4000
166
167static char __initdata command_line[COMMAND_LINE_SIZE];
168#ifdef CONFIG_CMDLINE_BOOL
169static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
170#endif
171
172#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
173struct edd edd;
174#ifdef CONFIG_EDD_MODULE
175EXPORT_SYMBOL(edd);
176#endif
177
178
179
180
181
182static inline void __init copy_edd(void)
183{
184 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
185 sizeof(edd.mbr_signature));
186 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
187 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
188 edd.edd_info_nr = boot_params.eddbuf_entries;
189}
190#else
191static inline void __init copy_edd(void)
192{
193}
194#endif
195
196void * __init extend_brk(size_t size, size_t align)
197{
198 size_t mask = align - 1;
199 void *ret;
200
201 BUG_ON(_brk_start == 0);
202 BUG_ON(align & mask);
203
204 _brk_end = (_brk_end + mask) & ~mask;
205 BUG_ON((char *)(_brk_end + size) > __brk_limit);
206
207 ret = (void *)_brk_end;
208 _brk_end += size;
209
210 memset(ret, 0, size);
211
212 return ret;
213}
214
215#ifdef CONFIG_X86_32
216static void __init cleanup_highmap(void)
217{
218}
219#endif
220
221static void __init reserve_brk(void)
222{
223 if (_brk_end > _brk_start)
224 memblock_reserve(__pa_symbol(_brk_start),
225 _brk_end - _brk_start);
226
227
228
229 _brk_start = 0;
230}
231
232u64 relocated_ramdisk;
233
234#ifdef CONFIG_BLK_DEV_INITRD
235
236static u64 __init get_ramdisk_image(void)
237{
238 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
239
240 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
241
242 if (ramdisk_image == 0)
243 ramdisk_image = phys_initrd_start;
244
245 return ramdisk_image;
246}
247static u64 __init get_ramdisk_size(void)
248{
249 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
250
251 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
252
253 if (ramdisk_size == 0)
254 ramdisk_size = phys_initrd_size;
255
256 return ramdisk_size;
257}
258
259static void __init relocate_initrd(void)
260{
261
262 u64 ramdisk_image = get_ramdisk_image();
263 u64 ramdisk_size = get_ramdisk_size();
264 u64 area_size = PAGE_ALIGN(ramdisk_size);
265
266
267 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
268 PFN_PHYS(max_pfn_mapped));
269 if (!relocated_ramdisk)
270 panic("Cannot find place for new RAMDISK of size %lld\n",
271 ramdisk_size);
272
273 initrd_start = relocated_ramdisk + PAGE_OFFSET;
274 initrd_end = initrd_start + ramdisk_size;
275 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
276 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
277
278 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
279
280 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
281 " [mem %#010llx-%#010llx]\n",
282 ramdisk_image, ramdisk_image + ramdisk_size - 1,
283 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
284}
285
286static void __init early_reserve_initrd(void)
287{
288
289 u64 ramdisk_image = get_ramdisk_image();
290 u64 ramdisk_size = get_ramdisk_size();
291 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
292
293 if (!boot_params.hdr.type_of_loader ||
294 !ramdisk_image || !ramdisk_size)
295 return;
296
297 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
298}
299
300static void __init reserve_initrd(void)
301{
302
303 u64 ramdisk_image = get_ramdisk_image();
304 u64 ramdisk_size = get_ramdisk_size();
305 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
306
307 if (!boot_params.hdr.type_of_loader ||
308 !ramdisk_image || !ramdisk_size)
309 return;
310
311 initrd_start = 0;
312
313 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
314 ramdisk_end - 1);
315
316 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
317 PFN_DOWN(ramdisk_end))) {
318
319 initrd_start = ramdisk_image + PAGE_OFFSET;
320 initrd_end = initrd_start + ramdisk_size;
321 return;
322 }
323
324 relocate_initrd();
325
326 memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image);
327}
328
329#else
330static void __init early_reserve_initrd(void)
331{
332}
333static void __init reserve_initrd(void)
334{
335}
336#endif
337
338static void __init parse_setup_data(void)
339{
340 struct setup_data *data;
341 u64 pa_data, pa_next;
342
343 pa_data = boot_params.hdr.setup_data;
344 while (pa_data) {
345 u32 data_len, data_type;
346
347 data = early_memremap(pa_data, sizeof(*data));
348 data_len = data->len + sizeof(struct setup_data);
349 data_type = data->type;
350 pa_next = data->next;
351 early_memunmap(data, sizeof(*data));
352
353 switch (data_type) {
354 case SETUP_E820_EXT:
355 e820__memory_setup_extended(pa_data, data_len);
356 break;
357 case SETUP_DTB:
358 add_dtb(pa_data);
359 break;
360 case SETUP_EFI:
361 parse_efi_setup(pa_data, data_len);
362 break;
363 default:
364 break;
365 }
366 pa_data = pa_next;
367 }
368}
369
370static void __init memblock_x86_reserve_range_setup_data(void)
371{
372 struct setup_indirect *indirect;
373 struct setup_data *data;
374 u64 pa_data, pa_next;
375 u32 len;
376
377 pa_data = boot_params.hdr.setup_data;
378 while (pa_data) {
379 data = early_memremap(pa_data, sizeof(*data));
380 if (!data) {
381 pr_warn("setup: failed to memremap setup_data entry\n");
382 return;
383 }
384
385 len = sizeof(*data);
386 pa_next = data->next;
387
388 memblock_reserve(pa_data, sizeof(*data) + data->len);
389
390 if (data->type == SETUP_INDIRECT) {
391 len += data->len;
392 early_memunmap(data, sizeof(*data));
393 data = early_memremap(pa_data, len);
394 if (!data) {
395 pr_warn("setup: failed to memremap indirect setup_data\n");
396 return;
397 }
398
399 indirect = (struct setup_indirect *)data->data;
400
401 if (indirect->type != SETUP_INDIRECT)
402 memblock_reserve(indirect->addr, indirect->len);
403 }
404
405 pa_data = pa_next;
406 early_memunmap(data, len);
407 }
408}
409
410
411
412
413
414
415#define CRASH_ALIGN SZ_16M
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430#ifdef CONFIG_X86_32
431# define CRASH_ADDR_LOW_MAX SZ_512M
432# define CRASH_ADDR_HIGH_MAX SZ_512M
433#else
434# define CRASH_ADDR_LOW_MAX SZ_4G
435# define CRASH_ADDR_HIGH_MAX SZ_64T
436#endif
437
438static int __init reserve_crashkernel_low(void)
439{
440#ifdef CONFIG_X86_64
441 unsigned long long base, low_base = 0, low_size = 0;
442 unsigned long low_mem_limit;
443 int ret;
444
445 low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
446
447
448 ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
449 if (ret) {
450
451
452
453
454
455
456
457
458
459 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
460 } else {
461
462 if (!low_size)
463 return 0;
464 }
465
466 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
467 if (!low_base) {
468 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
469 (unsigned long)(low_size >> 20));
470 return -ENOMEM;
471 }
472
473 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
474 (unsigned long)(low_size >> 20),
475 (unsigned long)(low_base >> 20),
476 (unsigned long)(low_mem_limit >> 20));
477
478 crashk_low_res.start = low_base;
479 crashk_low_res.end = low_base + low_size - 1;
480 insert_resource(&iomem_resource, &crashk_low_res);
481#endif
482 return 0;
483}
484
485static void __init reserve_crashkernel(void)
486{
487 unsigned long long crash_size, crash_base, total_mem;
488 bool high = false;
489 int ret;
490
491 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
492 return;
493
494 total_mem = memblock_phys_mem_size();
495
496
497 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
498 if (ret != 0 || crash_size <= 0) {
499
500 ret = parse_crashkernel_high(boot_command_line, total_mem,
501 &crash_size, &crash_base);
502 if (ret != 0 || crash_size <= 0)
503 return;
504 high = true;
505 }
506
507 if (xen_pv_domain()) {
508 pr_info("Ignoring crashkernel for a Xen PV domain\n");
509 return;
510 }
511
512
513 if (!crash_base) {
514
515
516
517
518
519
520
521
522 if (!high)
523 crash_base = memblock_phys_alloc_range(crash_size,
524 CRASH_ALIGN, CRASH_ALIGN,
525 CRASH_ADDR_LOW_MAX);
526 if (!crash_base)
527 crash_base = memblock_phys_alloc_range(crash_size,
528 CRASH_ALIGN, CRASH_ALIGN,
529 CRASH_ADDR_HIGH_MAX);
530 if (!crash_base) {
531 pr_info("crashkernel reservation failed - No suitable area found.\n");
532 return;
533 }
534 } else {
535 unsigned long long start;
536
537 start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
538 crash_base + crash_size);
539 if (start != crash_base) {
540 pr_info("crashkernel reservation failed - memory is in use.\n");
541 return;
542 }
543 }
544
545 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
546 memblock_phys_free(crash_base, crash_size);
547 return;
548 }
549
550 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
551 (unsigned long)(crash_size >> 20),
552 (unsigned long)(crash_base >> 20),
553 (unsigned long)(total_mem >> 20));
554
555 crashk_res.start = crash_base;
556 crashk_res.end = crash_base + crash_size - 1;
557 insert_resource(&iomem_resource, &crashk_res);
558}
559
560static struct resource standard_io_resources[] = {
561 { .name = "dma1", .start = 0x00, .end = 0x1f,
562 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
563 { .name = "pic1", .start = 0x20, .end = 0x21,
564 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
565 { .name = "timer0", .start = 0x40, .end = 0x43,
566 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
567 { .name = "timer1", .start = 0x50, .end = 0x53,
568 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
569 { .name = "keyboard", .start = 0x60, .end = 0x60,
570 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
571 { .name = "keyboard", .start = 0x64, .end = 0x64,
572 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
573 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
574 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
575 { .name = "pic2", .start = 0xa0, .end = 0xa1,
576 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
577 { .name = "dma2", .start = 0xc0, .end = 0xdf,
578 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
579 { .name = "fpu", .start = 0xf0, .end = 0xff,
580 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
581};
582
583void __init reserve_standard_io_resources(void)
584{
585 int i;
586
587
588 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
589 request_resource(&ioport_resource, &standard_io_resources[i]);
590
591}
592
593static bool __init snb_gfx_workaround_needed(void)
594{
595#ifdef CONFIG_PCI
596 int i;
597 u16 vendor, devid;
598 static const __initconst u16 snb_ids[] = {
599 0x0102,
600 0x0112,
601 0x0122,
602 0x0106,
603 0x0116,
604 0x0126,
605 0x010a,
606 };
607
608
609 if (!early_pci_allowed())
610 return false;
611
612 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
613 if (vendor != 0x8086)
614 return false;
615
616 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
617 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
618 if (devid == snb_ids[i])
619 return true;
620#endif
621
622 return false;
623}
624
625
626
627
628
629static void __init trim_snb_memory(void)
630{
631 static const __initconst unsigned long bad_pages[] = {
632 0x20050000,
633 0x20110000,
634 0x20130000,
635 0x20138000,
636 0x40004000,
637 };
638 int i;
639
640 if (!snb_gfx_workaround_needed())
641 return;
642
643 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
644
645
646
647
648
649
650
651
652
653
654
655
656 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
657 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
658 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
659 bad_pages[i]);
660 }
661}
662
663static void __init trim_bios_range(void)
664{
665
666
667
668
669
670
671
672
673
674 e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
675
676
677
678
679
680
681 e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
682
683 e820__update_table(e820_table);
684}
685
686
687static void __init e820_add_kernel_range(void)
688{
689 u64 start = __pa_symbol(_text);
690 u64 size = __pa_symbol(_end) - start;
691
692
693
694
695
696
697
698
699 if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
700 return;
701
702 pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
703 e820__range_remove(start, size, E820_TYPE_RAM, 0);
704 e820__range_add(start, size, E820_TYPE_RAM);
705}
706
707static void __init early_reserve_memory(void)
708{
709
710
711
712
713
714
715 memblock_reserve(__pa_symbol(_text),
716 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
717
718
719
720
721
722
723
724
725
726
727
728
729 memblock_reserve(0, SZ_64K);
730
731 early_reserve_initrd();
732
733 memblock_x86_reserve_range_setup_data();
734
735 reserve_ibft_region();
736 reserve_bios_regions();
737 trim_snb_memory();
738}
739
740
741
742
743static int
744dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
745{
746 if (kaslr_enabled()) {
747 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
748 kaslr_offset(),
749 __START_KERNEL,
750 __START_KERNEL_map,
751 MODULES_VADDR-1);
752 } else {
753 pr_emerg("Kernel Offset: disabled\n");
754 }
755
756 return 0;
757}
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772void __init setup_arch(char **cmdline_p)
773{
774#ifdef CONFIG_X86_32
775 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
776
777
778
779
780
781 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
782 initial_page_table + KERNEL_PGD_BOUNDARY,
783 KERNEL_PGD_PTRS);
784
785 load_cr3(swapper_pg_dir);
786
787
788
789
790
791
792
793
794
795 __flush_tlb_all();
796#else
797 printk(KERN_INFO "Command line: %s\n", boot_command_line);
798 boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
799#endif
800
801
802
803
804
805 olpc_ofw_detect();
806
807 idt_setup_early_traps();
808 early_cpu_init();
809 jump_label_init();
810 static_call_init();
811 early_ioremap_init();
812
813 setup_olpc_ofw_pgd();
814
815 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
816 screen_info = boot_params.screen_info;
817 edid_info = boot_params.edid_info;
818#ifdef CONFIG_X86_32
819 apm_info.bios = boot_params.apm_bios_info;
820 ist_info = boot_params.ist_info;
821#endif
822 saved_video_mode = boot_params.hdr.vid_mode;
823 bootloader_type = boot_params.hdr.type_of_loader;
824 if ((bootloader_type >> 4) == 0xe) {
825 bootloader_type &= 0xf;
826 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
827 }
828 bootloader_version = bootloader_type & 0xf;
829 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
830
831#ifdef CONFIG_BLK_DEV_RAM
832 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
833#endif
834#ifdef CONFIG_EFI
835 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
836 EFI32_LOADER_SIGNATURE, 4)) {
837 set_bit(EFI_BOOT, &efi.flags);
838 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
839 EFI64_LOADER_SIGNATURE, 4)) {
840 set_bit(EFI_BOOT, &efi.flags);
841 set_bit(EFI_64BIT, &efi.flags);
842 }
843#endif
844
845 x86_init.oem.arch_setup();
846
847
848
849
850
851
852
853
854
855
856
857
858
859 early_reserve_memory();
860
861 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
862 e820__memory_setup();
863 parse_setup_data();
864
865 copy_edd();
866
867 if (!boot_params.hdr.root_flags)
868 root_mountflags &= ~MS_RDONLY;
869 setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
870
871 code_resource.start = __pa_symbol(_text);
872 code_resource.end = __pa_symbol(_etext)-1;
873 rodata_resource.start = __pa_symbol(__start_rodata);
874 rodata_resource.end = __pa_symbol(__end_rodata)-1;
875 data_resource.start = __pa_symbol(_sdata);
876 data_resource.end = __pa_symbol(_edata)-1;
877 bss_resource.start = __pa_symbol(__bss_start);
878 bss_resource.end = __pa_symbol(__bss_stop)-1;
879
880#ifdef CONFIG_CMDLINE_BOOL
881#ifdef CONFIG_CMDLINE_OVERRIDE
882 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
883#else
884 if (builtin_cmdline[0]) {
885
886 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
887 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
888 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
889 }
890#endif
891#endif
892
893 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
894 *cmdline_p = command_line;
895
896
897
898
899
900
901
902
903 x86_configure_nx();
904
905 parse_early_param();
906
907 if (efi_enabled(EFI_BOOT))
908 efi_memblock_x86_reserve_range();
909
910#ifdef CONFIG_MEMORY_HOTPLUG
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930 if (movable_node_is_enabled())
931 memblock_set_bottom_up(true);
932#endif
933
934 x86_report_nx();
935
936 if (acpi_mps_check()) {
937#ifdef CONFIG_X86_LOCAL_APIC
938 disable_apic = 1;
939#endif
940 setup_clear_cpu_cap(X86_FEATURE_APIC);
941 }
942
943 e820__reserve_setup_data();
944 e820__finish_early_params();
945
946 if (efi_enabled(EFI_BOOT))
947 efi_init();
948
949 dmi_setup();
950
951
952
953
954
955 init_hypervisor_platform();
956
957 tsc_early_init();
958 x86_init.resources.probe_roms();
959
960
961 insert_resource(&iomem_resource, &code_resource);
962 insert_resource(&iomem_resource, &rodata_resource);
963 insert_resource(&iomem_resource, &data_resource);
964 insert_resource(&iomem_resource, &bss_resource);
965
966 e820_add_kernel_range();
967 trim_bios_range();
968#ifdef CONFIG_X86_32
969 if (ppro_with_ram_bug()) {
970 e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
971 E820_TYPE_RESERVED);
972 e820__update_table(e820_table);
973 printk(KERN_INFO "fixed physical RAM map:\n");
974 e820__print_table("bad_ppro");
975 }
976#else
977 early_gart_iommu_check();
978#endif
979
980
981
982
983
984 max_pfn = e820__end_of_ram_pfn();
985
986
987 if (IS_ENABLED(CONFIG_MTRR))
988 mtrr_bp_init();
989 else
990 pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
991
992 if (mtrr_trim_uncached_memory(max_pfn))
993 max_pfn = e820__end_of_ram_pfn();
994
995 max_possible_pfn = max_pfn;
996
997
998
999
1000
1001
1002 init_cache_modes();
1003
1004
1005
1006
1007
1008 kernel_randomize_memory();
1009
1010#ifdef CONFIG_X86_32
1011
1012 find_low_pfn_range();
1013#else
1014 check_x2apic();
1015
1016
1017
1018 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1019 max_low_pfn = e820__end_of_low_ram_pfn();
1020 else
1021 max_low_pfn = max_pfn;
1022
1023 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1024#endif
1025
1026
1027
1028
1029 find_smp_config();
1030
1031 early_alloc_pgt_buf();
1032
1033
1034
1035
1036
1037
1038 reserve_brk();
1039
1040 cleanup_highmap();
1041
1042 memblock_set_current_limit(ISA_END_ADDRESS);
1043 e820__memblock_setup();
1044
1045
1046
1047
1048
1049 sev_setup_arch();
1050
1051 efi_fake_memmap();
1052 efi_find_mirror();
1053 efi_esrt_init();
1054 efi_mokvar_table_init();
1055
1056
1057
1058
1059
1060 efi_reserve_boot_services();
1061
1062
1063 e820__memblock_alloc_reserved_mpc_new();
1064
1065#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1066 setup_bios_corruption_check();
1067#endif
1068
1069#ifdef CONFIG_X86_32
1070 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1071 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1072#endif
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 reserve_real_mode();
1089
1090 init_mem_mapping();
1091
1092 idt_setup_early_pf();
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1104
1105 memblock_set_current_limit(get_max_mapped());
1106
1107
1108
1109
1110
1111#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1112 if (init_ohci1394_dma_early)
1113 init_ohci1394_dma_on_all_controllers();
1114#endif
1115
1116 setup_log_buf(1);
1117
1118 if (efi_enabled(EFI_BOOT)) {
1119 switch (boot_params.secure_boot) {
1120 case efi_secureboot_mode_disabled:
1121 pr_info("Secure boot disabled\n");
1122 break;
1123 case efi_secureboot_mode_enabled:
1124 pr_info("Secure boot enabled\n");
1125 break;
1126 default:
1127 pr_info("Secure boot could not be determined\n");
1128 break;
1129 }
1130 }
1131
1132 reserve_initrd();
1133
1134 acpi_table_upgrade();
1135
1136 acpi_boot_table_init();
1137
1138 vsmp_init();
1139
1140 io_delay_init();
1141
1142 early_platform_quirks();
1143
1144 early_acpi_boot_init();
1145
1146 initmem_init();
1147 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1148
1149 if (boot_cpu_has(X86_FEATURE_GBPAGES))
1150 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1151
1152
1153
1154
1155
1156 reserve_crashkernel();
1157
1158 memblock_find_dma_reserve();
1159
1160 if (!early_xdbc_setup_hardware())
1161 early_xdbc_register_console();
1162
1163 x86_init.paging.pagetable_init();
1164
1165 kasan_init();
1166
1167
1168
1169
1170
1171
1172
1173 sync_initial_page_table();
1174
1175 tboot_probe();
1176
1177 map_vsyscall();
1178
1179 generic_apic_probe();
1180
1181 early_quirks();
1182
1183
1184
1185
1186 acpi_boot_init();
1187 x86_dtb_init();
1188
1189
1190
1191
1192 get_smp_config();
1193
1194
1195
1196
1197
1198 init_apic_mappings();
1199
1200 prefill_possible_map();
1201
1202 init_cpu_to_node();
1203 init_gi_nodes();
1204
1205 io_apic_init_mappings();
1206
1207 x86_init.hyper.guest_late_init();
1208
1209 e820__reserve_resources();
1210 e820__register_nosave_regions(max_pfn);
1211
1212 x86_init.resources.reserve_resources();
1213
1214 e820__setup_pci_gap();
1215
1216#ifdef CONFIG_VT
1217#if defined(CONFIG_VGA_CONSOLE)
1218 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1219 conswitchp = &vga_con;
1220#endif
1221#endif
1222 x86_init.oem.banner();
1223
1224 x86_init.timers.wallclock_init();
1225
1226
1227
1228
1229
1230
1231
1232 therm_lvt_init();
1233
1234 mcheck_init();
1235
1236 register_refined_jiffies(CLOCK_TICK_RATE);
1237
1238#ifdef CONFIG_EFI
1239 if (efi_enabled(EFI_BOOT))
1240 efi_apply_memmap_quirks();
1241#endif
1242
1243 unwind_init();
1244}
1245
1246#ifdef CONFIG_X86_32
1247
1248static struct resource video_ram_resource = {
1249 .name = "Video RAM area",
1250 .start = 0xa0000,
1251 .end = 0xbffff,
1252 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1253};
1254
1255void __init i386_reserve_resources(void)
1256{
1257 request_resource(&iomem_resource, &video_ram_resource);
1258 reserve_standard_io_resources();
1259}
1260
1261#endif
1262
1263static struct notifier_block kernel_offset_notifier = {
1264 .notifier_call = dump_kernel_offset
1265};
1266
1267static int __init register_kernel_offset_dumper(void)
1268{
1269 atomic_notifier_chain_register(&panic_notifier_list,
1270 &kernel_offset_notifier);
1271 return 0;
1272}
1273__initcall(register_kernel_offset_dumper);
1274