1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/mmzone.h>
27#include <linux/screen_info.h>
28#include <linux/ioport.h>
29#include <linux/acpi.h>
30#include <linux/sfi.h>
31#include <linux/apm_bios.h>
32#include <linux/initrd.h>
33#include <linux/bootmem.h>
34#include <linux/memblock.h>
35#include <linux/seq_file.h>
36#include <linux/console.h>
37#include <linux/root_dev.h>
38#include <linux/highmem.h>
39#include <linux/module.h>
40#include <linux/efi.h>
41#include <linux/init.h>
42#include <linux/edd.h>
43#include <linux/iscsi_ibft.h>
44#include <linux/nodemask.h>
45#include <linux/kexec.h>
46#include <linux/dmi.h>
47#include <linux/pfn.h>
48#include <linux/pci.h>
49#include <asm/pci-direct.h>
50#include <linux/init_ohci1394_dma.h>
51#include <linux/kvm_para.h>
52#include <linux/dma-contiguous.h>
53#include <linux/security.h>
54
55#include <linux/errno.h>
56#include <linux/kernel.h>
57#include <linux/stddef.h>
58#include <linux/unistd.h>
59#include <linux/ptrace.h>
60#include <linux/user.h>
61#include <linux/delay.h>
62
63#include <linux/kallsyms.h>
64#include <linux/cpufreq.h>
65#include <linux/dma-mapping.h>
66#include <linux/ctype.h>
67#include <linux/uaccess.h>
68
69#include <linux/percpu.h>
70#include <linux/crash_dump.h>
71#include <linux/tboot.h>
72#include <linux/jiffies.h>
73#include <linux/cpumask.h>
74
75#include <video/edid.h>
76
77#include <asm/mtrr.h>
78#include <asm/apic.h>
79#include <asm/realmode.h>
80#include <asm/e820.h>
81#include <asm/mpspec.h>
82#include <asm/setup.h>
83#include <asm/efi.h>
84#include <asm/timer.h>
85#include <asm/i8259.h>
86#include <asm/sections.h>
87#include <asm/io_apic.h>
88#include <asm/ist.h>
89#include <asm/setup_arch.h>
90#include <asm/bios_ebda.h>
91#include <asm/cacheflush.h>
92#include <asm/processor.h>
93#include <asm/bugs.h>
94
95#include <asm/vsyscall.h>
96#include <asm/cpu.h>
97#include <asm/desc.h>
98#include <asm/dma.h>
99#include <asm/iommu.h>
100#include <asm/gart.h>
101#include <asm/mmu_context.h>
102#include <asm/proto.h>
103
104#include <asm/paravirt.h>
105#include <asm/hypervisor.h>
106#include <asm/olpc_ofw.h>
107
108#include <asm/percpu.h>
109#include <asm/topology.h>
110#include <asm/apicdef.h>
111#include <asm/amd_nb.h>
112#include <asm/mce.h>
113#include <asm/alternative.h>
114#include <asm/prom.h>
115#include <asm/microcode.h>
116#include <asm/kaslr.h>
117
118
119
120
121
122
123
124
125unsigned long max_low_pfn_mapped;
126unsigned long max_pfn_mapped;
127
128#ifdef CONFIG_DMI
129RESERVE_BRK(dmi_alloc, 65536);
130#endif
131
132
133static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
134unsigned long _brk_end = (unsigned long)__brk_base;
135
136#ifdef CONFIG_X86_64
137int default_cpu_present_to_apicid(int mps_cpu)
138{
139 return __default_cpu_present_to_apicid(mps_cpu);
140}
141
142int default_check_phys_apicid_present(int phys_apicid)
143{
144 return __default_check_phys_apicid_present(phys_apicid);
145}
146#endif
147
148struct boot_params boot_params;
149
150
151
152
153static struct resource data_resource = {
154 .name = "Kernel data",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
158};
159
160static struct resource code_resource = {
161 .name = "Kernel code",
162 .start = 0,
163 .end = 0,
164 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
165};
166
167static struct resource bss_resource = {
168 .name = "Kernel bss",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
172};
173
174
175#ifdef CONFIG_X86_32
176
177struct cpuinfo_x86 new_cpu_data = {
178 .wp_works_ok = -1,
179};
180
181struct cpuinfo_x86 boot_cpu_data __read_mostly = {
182 .wp_works_ok = -1,
183};
184EXPORT_SYMBOL(boot_cpu_data);
185
186struct rh_cpuinfo_x86 rh_boot_cpu_data __read_mostly = {
187 .x86_cache_max_rmid = -1,
188 .x86_cache_occ_scale = -1,
189};
190EXPORT_SYMBOL(rh_boot_cpu_data);
191
192unsigned int def_to_bigsmp;
193
194
195unsigned int machine_id;
196unsigned int machine_submodel_id;
197unsigned int BIOS_revision;
198
199struct apm_info apm_info;
200EXPORT_SYMBOL(apm_info);
201
202#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
203 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
204struct ist_info ist_info;
205EXPORT_SYMBOL(ist_info);
206#else
207struct ist_info ist_info;
208#endif
209
210#else
211struct cpuinfo_x86 boot_cpu_data __read_mostly = {
212 .x86_phys_bits = MAX_PHYSMEM_BITS,
213};
214EXPORT_SYMBOL(boot_cpu_data);
215
216struct rh_cpuinfo_x86 rh_boot_cpu_data __read_mostly = {
217 .x86_cache_max_rmid = -1,
218 .x86_cache_occ_scale = -1,
219};
220EXPORT_SYMBOL(rh_boot_cpu_data);
221#endif
222
223
224#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
225unsigned long mmu_cr4_features;
226#else
227unsigned long mmu_cr4_features = X86_CR4_PAE;
228#endif
229
230
231int bootloader_type, bootloader_version;
232
233
234
235
236struct screen_info screen_info;
237EXPORT_SYMBOL(screen_info);
238struct edid_info edid_info;
239EXPORT_SYMBOL_GPL(edid_info);
240
241extern int root_mountflags;
242
243unsigned long saved_video_mode;
244
245#define RAMDISK_IMAGE_START_MASK 0x07FF
246#define RAMDISK_PROMPT_FLAG 0x8000
247#define RAMDISK_LOAD_FLAG 0x4000
248
249static char __initdata command_line[COMMAND_LINE_SIZE];
250#ifdef CONFIG_CMDLINE_BOOL
251static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
252#endif
253
254#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
255struct edd edd;
256#ifdef CONFIG_EDD_MODULE
257EXPORT_SYMBOL(edd);
258#endif
259
260
261
262
263
264static inline void __init copy_edd(void)
265{
266 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
267 sizeof(edd.mbr_signature));
268 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
269 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
270 edd.edd_info_nr = boot_params.eddbuf_entries;
271}
272#else
273static inline void __init copy_edd(void)
274{
275}
276#endif
277
278void * __init extend_brk(size_t size, size_t align)
279{
280 size_t mask = align - 1;
281 void *ret;
282
283 BUG_ON(_brk_start == 0);
284 BUG_ON(align & mask);
285
286 _brk_end = (_brk_end + mask) & ~mask;
287 BUG_ON((char *)(_brk_end + size) > __brk_limit);
288
289 ret = (void *)_brk_end;
290 _brk_end += size;
291
292 memset(ret, 0, size);
293
294 return ret;
295}
296
297#ifdef CONFIG_X86_32
298static void __init cleanup_highmap(void)
299{
300}
301#endif
302
303static void __init reserve_brk(void)
304{
305 if (_brk_end > _brk_start)
306 memblock_reserve(__pa_symbol(_brk_start),
307 _brk_end - _brk_start);
308
309
310
311 _brk_start = 0;
312}
313
314u64 relocated_ramdisk;
315
316#ifdef CONFIG_BLK_DEV_INITRD
317
318static u64 __init get_ramdisk_image(void)
319{
320 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
321
322 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
323
324 return ramdisk_image;
325}
326static u64 __init get_ramdisk_size(void)
327{
328 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
329
330 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
331
332 return ramdisk_size;
333}
334
335#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
336static void __init relocate_initrd(void)
337{
338
339 u64 ramdisk_image = get_ramdisk_image();
340 u64 ramdisk_size = get_ramdisk_size();
341 u64 area_size = PAGE_ALIGN(ramdisk_size);
342 unsigned long slop, clen, mapaddr;
343 char *p, *q;
344
345
346 relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
347 area_size, PAGE_SIZE);
348
349 if (!relocated_ramdisk)
350 panic("Cannot find place for new RAMDISK of size %lld\n",
351 ramdisk_size);
352
353
354
355 memblock_reserve(relocated_ramdisk, area_size);
356 initrd_start = relocated_ramdisk + PAGE_OFFSET;
357 initrd_end = initrd_start + ramdisk_size;
358 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
359 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
360
361 q = (char *)initrd_start;
362
363
364 while (ramdisk_size) {
365 slop = ramdisk_image & ~PAGE_MASK;
366 clen = ramdisk_size;
367 if (clen > MAX_MAP_CHUNK-slop)
368 clen = MAX_MAP_CHUNK-slop;
369 mapaddr = ramdisk_image & PAGE_MASK;
370 p = early_memremap(mapaddr, clen+slop);
371 memcpy(q, p+slop, clen);
372 early_iounmap(p, clen+slop);
373 q += clen;
374 ramdisk_image += clen;
375 ramdisk_size -= clen;
376 }
377
378 ramdisk_image = get_ramdisk_image();
379 ramdisk_size = get_ramdisk_size();
380 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
381 " [mem %#010llx-%#010llx]\n",
382 ramdisk_image, ramdisk_image + ramdisk_size - 1,
383 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
384}
385
386static void __init early_reserve_initrd(void)
387{
388
389 u64 ramdisk_image = get_ramdisk_image();
390 u64 ramdisk_size = get_ramdisk_size();
391 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
392
393 if (!boot_params.hdr.type_of_loader ||
394 !ramdisk_image || !ramdisk_size)
395 return;
396
397 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
398}
399static void __init reserve_initrd(void)
400{
401
402 u64 ramdisk_image = get_ramdisk_image();
403 u64 ramdisk_size = get_ramdisk_size();
404 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
405 u64 mapped_size;
406
407 if (!boot_params.hdr.type_of_loader ||
408 !ramdisk_image || !ramdisk_size)
409 return;
410
411 initrd_start = 0;
412
413 mapped_size = memblock_mem_size(max_pfn_mapped);
414 if (ramdisk_size >= (mapped_size>>1))
415 panic("initrd too large to handle, "
416 "disabling initrd (%lld needed, %lld available)\n",
417 ramdisk_size, mapped_size>>1);
418
419 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
420 ramdisk_end - 1);
421
422 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
423 PFN_DOWN(ramdisk_end))) {
424
425 initrd_start = ramdisk_image + PAGE_OFFSET;
426 initrd_end = initrd_start + ramdisk_size;
427 return;
428 }
429
430 relocate_initrd();
431
432 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
433}
434#else
435static void __init early_reserve_initrd(void)
436{
437}
438static void __init reserve_initrd(void)
439{
440}
441#endif
442
443static void __init parse_setup_data(void)
444{
445 struct setup_data *data;
446 u64 pa_data, pa_next;
447
448 pa_data = boot_params.hdr.setup_data;
449 while (pa_data) {
450 u32 data_len, map_len, data_type;
451
452 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
453 (u64)sizeof(struct setup_data));
454 data = early_memremap(pa_data, map_len);
455 data_len = data->len + sizeof(struct setup_data);
456 data_type = data->type;
457 pa_next = data->next;
458 early_iounmap(data, map_len);
459
460 switch (data_type) {
461 case SETUP_E820_EXT:
462 parse_e820_ext(pa_data, data_len);
463 break;
464 case SETUP_DTB:
465 add_dtb(pa_data);
466 break;
467 case SETUP_EFI:
468 parse_efi_setup(pa_data, data_len);
469 break;
470 default:
471 break;
472 }
473 pa_data = pa_next;
474 }
475}
476
477static void __init e820_reserve_setup_data(void)
478{
479 struct setup_data *data;
480 u64 pa_data;
481 int found = 0;
482
483 pa_data = boot_params.hdr.setup_data;
484 while (pa_data) {
485 data = early_memremap(pa_data, sizeof(*data));
486 e820_update_range(pa_data, sizeof(*data)+data->len,
487 E820_RAM, E820_RESERVED_KERN);
488 found = 1;
489 pa_data = data->next;
490 early_iounmap(data, sizeof(*data));
491 }
492 if (!found)
493 return;
494
495 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
496 memcpy(&e820_saved, &e820, sizeof(struct e820map));
497 printk(KERN_INFO "extended physical RAM map:\n");
498 e820_print_map("reserve setup_data");
499}
500
501static void __init memblock_x86_reserve_range_setup_data(void)
502{
503 struct setup_data *data;
504 u64 pa_data;
505
506 pa_data = boot_params.hdr.setup_data;
507 while (pa_data) {
508 data = early_memremap(pa_data, sizeof(*data));
509 memblock_reserve(pa_data, sizeof(*data) + data->len);
510 pa_data = data->next;
511 early_iounmap(data, sizeof(*data));
512 }
513}
514
515
516
517
518
519#ifdef CONFIG_KEXEC_CORE
520
521
522#define CRASH_ALIGN (16 << 20)
523
524
525
526
527
528
529#ifdef CONFIG_X86_32
530# define CRASH_ADDR_LOW_MAX (512 << 20)
531# define CRASH_ADDR_HIGH_MAX (512 << 20)
532#else
533# define CRASH_ADDR_LOW_MAX (896UL << 20)
534# define CRASH_ADDR_HIGH_MAX MAXMEM
535#endif
536
537static int __init reserve_crashkernel_low(void)
538{
539#ifdef CONFIG_X86_64
540 unsigned long long base, low_base = 0, low_size = 0;
541 unsigned long total_low_mem;
542 int ret;
543
544 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
545
546
547 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
548 if (ret) {
549
550
551
552
553
554
555
556
557
558 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
559 } else {
560
561 if (!low_size)
562 return 0;
563 }
564
565 low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
566 if (!low_base) {
567 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
568 (unsigned long)(low_size >> 20));
569 return -ENOMEM;
570 }
571
572 ret = memblock_reserve(low_base, low_size);
573 if (ret) {
574 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
575 return ret;
576 }
577
578 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
579 (unsigned long)(low_size >> 20),
580 (unsigned long)(low_base >> 20),
581 (unsigned long)(total_low_mem >> 20));
582
583 crashk_low_res.start = low_base;
584 crashk_low_res.end = low_base + low_size - 1;
585 insert_resource(&iomem_resource, &crashk_low_res);
586#endif
587 return 0;
588}
589
590static void __init reserve_crashkernel(void)
591{
592 unsigned long long crash_size, crash_base, total_mem;
593 bool high = false;
594 int ret;
595
596 total_mem = memblock_phys_mem_size();
597
598
599 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
600 if (ret != 0 || crash_size <= 0) {
601
602 ret = parse_crashkernel_high(boot_command_line, total_mem,
603 &crash_size, &crash_base);
604 if (ret != 0 || crash_size <= 0)
605 return;
606 high = true;
607 }
608
609
610 if (crash_base <= 0) {
611
612
613
614 crash_base = memblock_find_in_range(CRASH_ALIGN,
615 high ? CRASH_ADDR_HIGH_MAX
616 : CRASH_ADDR_LOW_MAX,
617 crash_size, CRASH_ALIGN);
618#ifdef CONFIG_X86_64
619
620
621
622 if (!high && !crash_base)
623 crash_base = memblock_find_in_range(CRASH_ALIGN,
624 (1ULL << 32),
625 crash_size, CRASH_ALIGN);
626
627
628
629 if (!high && !crash_base)
630 crash_base = memblock_find_in_range(CRASH_ALIGN,
631 CRASH_ADDR_HIGH_MAX,
632 crash_size, CRASH_ALIGN);
633#endif
634 if (!crash_base) {
635 pr_info("crashkernel reservation failed - No suitable area found.\n");
636 return;
637 }
638
639 } else {
640 unsigned long long start;
641
642 start = memblock_find_in_range(crash_base,
643 crash_base + crash_size,
644 crash_size, 1 << 20);
645 if (start != crash_base) {
646 pr_info("crashkernel reservation failed - memory is in use.\n");
647 return;
648 }
649 }
650 ret = memblock_reserve(crash_base, crash_size);
651 if (ret) {
652 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
653 return;
654 }
655
656 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
657 memblock_free(crash_base, crash_size);
658 return;
659 }
660
661 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
662 (unsigned long)(crash_size >> 20),
663 (unsigned long)(crash_base >> 20),
664 (unsigned long)(total_mem >> 20));
665
666 crashk_res.start = crash_base;
667 crashk_res.end = crash_base + crash_size - 1;
668 insert_resource(&iomem_resource, &crashk_res);
669}
670#else
671static void __init reserve_crashkernel(void)
672{
673}
674#endif
675
676static struct resource standard_io_resources[] = {
677 { .name = "dma1", .start = 0x00, .end = 0x1f,
678 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
679 { .name = "pic1", .start = 0x20, .end = 0x21,
680 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
681 { .name = "timer0", .start = 0x40, .end = 0x43,
682 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
683 { .name = "timer1", .start = 0x50, .end = 0x53,
684 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
685 { .name = "keyboard", .start = 0x60, .end = 0x60,
686 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
687 { .name = "keyboard", .start = 0x64, .end = 0x64,
688 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
689 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
690 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
691 { .name = "pic2", .start = 0xa0, .end = 0xa1,
692 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
693 { .name = "dma2", .start = 0xc0, .end = 0xdf,
694 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
695 { .name = "fpu", .start = 0xf0, .end = 0xff,
696 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
697};
698
699void __init reserve_standard_io_resources(void)
700{
701 int i;
702
703
704 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
705 request_resource(&ioport_resource, &standard_io_resources[i]);
706
707}
708
709static __init void reserve_ibft_region(void)
710{
711 unsigned long addr, size = 0;
712
713 addr = find_ibft_region(&size);
714
715 if (size)
716 memblock_reserve(addr, size);
717}
718
719static bool __init snb_gfx_workaround_needed(void)
720{
721#ifdef CONFIG_PCI
722 int i;
723 u16 vendor, devid;
724 static const __initconst u16 snb_ids[] = {
725 0x0102,
726 0x0112,
727 0x0122,
728 0x0106,
729 0x0116,
730 0x0126,
731 0x010a,
732 };
733
734
735 if (!early_pci_allowed())
736 return false;
737
738 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
739 if (vendor != 0x8086)
740 return false;
741
742 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
743 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
744 if (devid == snb_ids[i])
745 return true;
746#endif
747
748 return false;
749}
750
751
752
753
754
755static void __init trim_snb_memory(void)
756{
757 static const __initconst unsigned long bad_pages[] = {
758 0x20050000,
759 0x20110000,
760 0x20130000,
761 0x20138000,
762 0x40004000,
763 };
764 int i;
765
766 if (!snb_gfx_workaround_needed())
767 return;
768
769 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
770
771
772
773
774
775 memblock_reserve(0, 1<<20);
776
777 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
778 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
779 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
780 bad_pages[i]);
781 }
782}
783
784
785
786
787
788
789
790
791static void __init trim_platform_memory_ranges(void)
792{
793 trim_snb_memory();
794}
795
796static void __init trim_bios_range(void)
797{
798
799
800
801
802
803
804
805
806
807 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
808
809
810
811
812
813
814 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
815
816 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
817}
818
819
820static void __init e820_add_kernel_range(void)
821{
822 u64 start = __pa_symbol(_text);
823 u64 size = __pa_symbol(_end) - start;
824
825
826
827
828
829
830
831
832 if (e820_all_mapped(start, start + size, E820_RAM))
833 return;
834
835 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
836 e820_remove_range(start, size, E820_RAM, 0);
837 e820_add_region(start, size, E820_RAM);
838}
839
840static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
841
842static int __init parse_reservelow(char *p)
843{
844 unsigned long long size;
845
846 if (!p)
847 return -EINVAL;
848
849 size = memparse(p, &p);
850
851 if (size < 4096)
852 size = 4096;
853
854 if (size > 640*1024)
855 size = 640*1024;
856
857 reserve_low = size;
858
859 return 0;
860}
861
862early_param("reservelow", parse_reservelow);
863
864static void __init trim_low_memory_range(void)
865{
866 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
867}
868
869static void rh_check_supported(void)
870{
871
872 if (((boot_cpu_data.x86_max_cores * smp_num_siblings) == 1) &&
873 !x86_hyper && !cpu_has_hypervisor && !is_kdump_kernel()) {
874 pr_crit("Detected single cpu native boot.\n");
875 pr_crit("Important: In Red Hat Enterprise Linux 7, single threaded, single CPU 64-bit physical systems are unsupported by Red Hat. Please contact your Red Hat support representative for a list of certified and supported systems.");
876 }
877
878
879
880
881
882 if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) &&
883 (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)) {
884 pr_crit("Detected processor %s %s\n",
885 boot_cpu_data.x86_vendor_id,
886 boot_cpu_data.x86_model_id);
887 mark_hardware_unsupported("Processor");
888 }
889
890 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
891 (boot_cpu_data.x86 >= 0x17)) {
892
893 if (boot_cpu_data.x86 != 0x17 ||
894 !strstr(boot_cpu_data.x86_model_id, "AMD EPYC 7")) {
895 pr_crit("Detected CPU family %xh model %d\n",
896 boot_cpu_data.x86,
897 boot_cpu_data.x86_model);
898 mark_hardware_unsupported("AMD Processor");
899 }
900 }
901
902
903 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
904 ((boot_cpu_data.x86 == 6))) {
905 switch (boot_cpu_data.x86_model) {
906 case 158:
907 case 142:
908 case 133:
909 case 95:
910 case 94:
911 case 87:
912 case 86:
913 case 85:
914 case 79:
915 case 78:
916 case 77:
917 case 71:
918 case 70:
919 case 69:
920 break;
921 default:
922 if (boot_cpu_data.x86_model > 63) {
923 pr_crit("Detected CPU family %d model %d\n",
924 boot_cpu_data.x86,
925 boot_cpu_data.x86_model);
926 mark_hardware_unsupported("Intel Processor");
927 }
928 break;
929 }
930 }
931
932
933
934
935
936
937
938 if (acpi_disabled && !x86_hyper && !cpu_has_hypervisor)
939 pr_crit("ACPI has been disabled or is not available on this hardware. This may result in a single cpu boot, incorrect PCI IRQ routing, or boot failure.\n");
940}
941
942
943
944
945static int
946dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
947{
948 if (kaslr_enabled()) {
949 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
950 kaslr_offset(),
951 __START_KERNEL,
952 __START_KERNEL_map,
953 MODULES_VADDR-1);
954 } else {
955 pr_emerg("Kernel Offset: disabled\n");
956 }
957
958 return 0;
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974void __init setup_arch(char **cmdline_p)
975{
976 memblock_reserve(__pa_symbol(_text),
977 (unsigned long)__bss_stop - (unsigned long)_text);
978
979 early_reserve_initrd();
980
981
982
983
984
985
986
987#ifdef CONFIG_X86_32
988 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
989 visws_early_detect();
990
991
992
993
994
995 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
996 initial_page_table + KERNEL_PGD_BOUNDARY,
997 KERNEL_PGD_PTRS);
998
999 load_cr3(swapper_pg_dir);
1000 __flush_tlb_all();
1001#else
1002 printk(KERN_INFO "Command line: %s\n", boot_command_line);
1003#endif
1004
1005
1006
1007
1008
1009 olpc_ofw_detect();
1010
1011 early_trap_init();
1012 early_cpu_init();
1013 early_ioremap_init();
1014
1015 setup_olpc_ofw_pgd();
1016
1017 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
1018 screen_info = boot_params.screen_info;
1019 edid_info = boot_params.edid_info;
1020#ifdef CONFIG_X86_32
1021 apm_info.bios = boot_params.apm_bios_info;
1022 ist_info = boot_params.ist_info;
1023 if (boot_params.sys_desc_table.length != 0) {
1024 machine_id = boot_params.sys_desc_table.table[0];
1025 machine_submodel_id = boot_params.sys_desc_table.table[1];
1026 BIOS_revision = boot_params.sys_desc_table.table[2];
1027 }
1028#endif
1029 saved_video_mode = boot_params.hdr.vid_mode;
1030 bootloader_type = boot_params.hdr.type_of_loader;
1031 if ((bootloader_type >> 4) == 0xe) {
1032 bootloader_type &= 0xf;
1033 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
1034 }
1035 bootloader_version = bootloader_type & 0xf;
1036 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
1037
1038#ifdef CONFIG_BLK_DEV_RAM
1039 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
1040 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
1041 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1042#endif
1043#ifdef CONFIG_EFI
1044 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
1045 "EL32", 4)) {
1046 set_bit(EFI_BOOT, &x86_efi_facility);
1047 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
1048 "EL64", 4)) {
1049 set_bit(EFI_BOOT, &x86_efi_facility);
1050 set_bit(EFI_64BIT, &x86_efi_facility);
1051 }
1052
1053 if (efi_enabled(EFI_BOOT))
1054 efi_memblock_x86_reserve_range();
1055#endif
1056
1057 x86_init.oem.arch_setup();
1058
1059 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
1060 setup_memory_map();
1061 parse_setup_data();
1062
1063 copy_edd();
1064
1065 if (!boot_params.hdr.root_flags)
1066 root_mountflags &= ~MS_RDONLY;
1067 init_mm.start_code = (unsigned long) _text;
1068 init_mm.end_code = (unsigned long) _etext;
1069 init_mm.end_data = (unsigned long) _edata;
1070 init_mm.brk = _brk_end;
1071
1072 mpx_mm_init(&init_mm);
1073
1074 code_resource.start = __pa_symbol(_text);
1075 code_resource.end = __pa_symbol(_etext)-1;
1076 data_resource.start = __pa_symbol(_etext);
1077 data_resource.end = __pa_symbol(_edata)-1;
1078 bss_resource.start = __pa_symbol(__bss_start);
1079 bss_resource.end = __pa_symbol(__bss_stop)-1;
1080
1081#ifdef CONFIG_CMDLINE_BOOL
1082#ifdef CONFIG_CMDLINE_OVERRIDE
1083 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1084#else
1085 if (builtin_cmdline[0]) {
1086
1087 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
1088 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
1089 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1090 }
1091#endif
1092#endif
1093
1094 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
1095 *cmdline_p = command_line;
1096
1097
1098
1099
1100
1101
1102
1103
1104 x86_configure_nx();
1105
1106 parse_early_param();
1107
1108 x86_report_nx();
1109
1110
1111 memblock_x86_reserve_range_setup_data();
1112
1113 if (acpi_mps_check()) {
1114#ifdef CONFIG_X86_LOCAL_APIC
1115 disable_apic = 1;
1116#endif
1117 setup_clear_cpu_cap(X86_FEATURE_APIC);
1118 }
1119
1120#ifdef CONFIG_PCI
1121 if (pci_early_dump_regs)
1122 early_dump_pci_devices();
1123#endif
1124
1125
1126 e820_reserve_setup_data();
1127 finish_e820_parsing();
1128
1129 if (efi_enabled(EFI_BOOT))
1130 efi_init();
1131
1132 dmi_scan_machine();
1133 dmi_memdev_walk();
1134 dmi_set_dump_stack_arch_desc();
1135
1136
1137
1138
1139
1140 init_hypervisor_platform();
1141
1142 x86_init.resources.probe_roms();
1143
1144
1145 insert_resource(&iomem_resource, &code_resource);
1146 insert_resource(&iomem_resource, &data_resource);
1147 insert_resource(&iomem_resource, &bss_resource);
1148
1149 e820_add_kernel_range();
1150 trim_bios_range();
1151#ifdef CONFIG_X86_32
1152 if (ppro_with_ram_bug()) {
1153 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1154 E820_RESERVED);
1155 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1156 printk(KERN_INFO "fixed physical RAM map:\n");
1157 e820_print_map("bad_ppro");
1158 }
1159#else
1160 early_gart_iommu_check();
1161#endif
1162
1163
1164
1165
1166
1167 max_pfn = e820_end_of_ram_pfn();
1168
1169
1170 mtrr_bp_init();
1171 if (mtrr_trim_uncached_memory(max_pfn))
1172 max_pfn = e820_end_of_ram_pfn();
1173
1174 max_possible_pfn = max_pfn;
1175
1176
1177
1178
1179
1180 kernel_randomize_memory();
1181
1182#ifdef CONFIG_X86_32
1183
1184 find_low_pfn_range();
1185#else
1186 num_physpages = max_pfn;
1187
1188 check_x2apic();
1189
1190
1191
1192 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1193 max_low_pfn = e820_end_of_low_ram_pfn();
1194 else
1195 max_low_pfn = max_pfn;
1196
1197 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1198#endif
1199
1200
1201
1202
1203 find_smp_config();
1204
1205 reserve_ibft_region();
1206
1207 early_alloc_pgt_buf();
1208
1209
1210
1211
1212
1213
1214 reserve_brk();
1215
1216 cleanup_highmap();
1217
1218 memblock_set_current_limit(ISA_END_ADDRESS);
1219 memblock_x86_fill();
1220
1221 if (efi_enabled(EFI_BOOT))
1222 efi_find_mirror();
1223
1224
1225
1226
1227
1228 if (efi_enabled(EFI_MEMMAP))
1229 efi_reserve_boot_services();
1230
1231
1232 early_reserve_e820_mpc_new();
1233
1234#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1235 setup_bios_corruption_check();
1236#endif
1237
1238#ifdef CONFIG_X86_32
1239 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1240 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1241#endif
1242
1243 reserve_real_mode();
1244
1245 trim_platform_memory_ranges();
1246 trim_low_memory_range();
1247
1248 init_mem_mapping();
1249
1250 early_trap_pf_init();
1251
1252 setup_real_mode();
1253
1254 memblock_set_current_limit(get_max_mapped());
1255 dma_contiguous_reserve(0);
1256
1257
1258
1259
1260
1261#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1262 if (init_ohci1394_dma_early)
1263 init_ohci1394_dma_on_all_controllers();
1264#endif
1265
1266 setup_log_buf(1);
1267
1268#ifdef CONFIG_EFI_SECURE_BOOT_SECURELEVEL
1269 if (boot_params.secure_boot) {
1270 set_bit(EFI_SECURE_BOOT, &x86_efi_facility);
1271 set_securelevel(1);
1272 pr_info("Secure boot enabled\n");
1273 }
1274#endif
1275
1276 reserve_initrd();
1277
1278#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1279 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1280#endif
1281
1282 vsmp_init();
1283
1284 io_delay_init();
1285
1286
1287
1288
1289 acpi_boot_table_init();
1290
1291 early_acpi_boot_init();
1292
1293 initmem_init();
1294
1295
1296
1297
1298
1299 reserve_crashkernel();
1300
1301 memblock_find_dma_reserve();
1302
1303#ifdef CONFIG_KVM_GUEST
1304 kvmclock_init();
1305#endif
1306
1307 x86_init.paging.pagetable_init();
1308
1309 if (boot_cpu_data.cpuid_level >= 0) {
1310
1311 mmu_cr4_features = read_cr4();
1312 if (trampoline_cr4_features)
1313 *trampoline_cr4_features = mmu_cr4_features;
1314 }
1315
1316#ifdef CONFIG_X86_32
1317
1318 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1319 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1320 KERNEL_PGD_PTRS);
1321#endif
1322
1323 tboot_probe();
1324
1325#ifdef CONFIG_X86_64
1326 map_vsyscall();
1327#endif
1328
1329 generic_apic_probe();
1330
1331 early_quirks();
1332
1333
1334
1335
1336 acpi_boot_init();
1337 sfi_init();
1338 x86_dtb_init();
1339
1340
1341
1342
1343 if (smp_found_config)
1344 get_smp_config();
1345
1346
1347
1348
1349
1350 init_apic_mappings();
1351
1352 prefill_possible_map();
1353
1354 init_cpu_to_node();
1355
1356 if (x86_io_apic_ops.init)
1357 x86_io_apic_ops.init();
1358
1359 kvm_guest_init();
1360
1361 e820_reserve_resources();
1362 e820_mark_nosave_regions(max_low_pfn);
1363
1364 x86_init.resources.reserve_resources();
1365
1366 e820_setup_gap();
1367
1368#ifdef CONFIG_VT
1369#if defined(CONFIG_VGA_CONSOLE)
1370 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1371 conswitchp = &vga_con;
1372#elif defined(CONFIG_DUMMY_CONSOLE)
1373 conswitchp = &dummy_con;
1374#endif
1375#endif
1376 x86_init.oem.banner();
1377
1378 x86_init.timers.wallclock_init();
1379
1380 mcheck_init();
1381
1382 arch_init_ideal_nops();
1383
1384 register_refined_jiffies(CLOCK_TICK_RATE);
1385
1386#ifdef CONFIG_EFI
1387 if (efi_enabled(EFI_BOOT))
1388 efi_apply_memmap_quirks();
1389#endif
1390
1391 rh_check_supported();
1392}
1393
1394#ifdef CONFIG_X86_32
1395
1396static struct resource video_ram_resource = {
1397 .name = "Video RAM area",
1398 .start = 0xa0000,
1399 .end = 0xbffff,
1400 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1401};
1402
1403void __init i386_reserve_resources(void)
1404{
1405 request_resource(&iomem_resource, &video_ram_resource);
1406 reserve_standard_io_resources();
1407}
1408
1409#endif
1410
1411static struct notifier_block kernel_offset_notifier = {
1412 .notifier_call = dump_kernel_offset
1413};
1414
1415static int __init register_kernel_offset_dumper(void)
1416{
1417 atomic_notifier_chain_register(&panic_notifier_list,
1418 &kernel_offset_notifier);
1419 return 0;
1420}
1421__initcall(register_kernel_offset_dumper);
1422