1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/mmzone.h>
27#include <linux/screen_info.h>
28#include <linux/ioport.h>
29#include <linux/acpi.h>
30#include <linux/sfi.h>
31#include <linux/apm_bios.h>
32#include <linux/initrd.h>
33#include <linux/bootmem.h>
34#include <linux/memblock.h>
35#include <linux/seq_file.h>
36#include <linux/console.h>
37#include <linux/root_dev.h>
38#include <linux/highmem.h>
39#include <linux/module.h>
40#include <linux/efi.h>
41#include <linux/init.h>
42#include <linux/edd.h>
43#include <linux/iscsi_ibft.h>
44#include <linux/nodemask.h>
45#include <linux/kexec.h>
46#include <linux/dmi.h>
47#include <linux/pfn.h>
48#include <linux/pci.h>
49#include <asm/pci-direct.h>
50#include <linux/init_ohci1394_dma.h>
51#include <linux/kvm_para.h>
52#include <linux/dma-contiguous.h>
53#include <linux/security.h>
54
55#include <linux/errno.h>
56#include <linux/kernel.h>
57#include <linux/stddef.h>
58#include <linux/unistd.h>
59#include <linux/ptrace.h>
60#include <linux/user.h>
61#include <linux/delay.h>
62
63#include <linux/kallsyms.h>
64#include <linux/cpufreq.h>
65#include <linux/dma-mapping.h>
66#include <linux/ctype.h>
67#include <linux/uaccess.h>
68
69#include <linux/percpu.h>
70#include <linux/crash_dump.h>
71#include <linux/tboot.h>
72#include <linux/jiffies.h>
73#include <linux/mem_encrypt.h>
74#include <linux/cpumask.h>
75
76#include <video/edid.h>
77
78#include <asm/mtrr.h>
79#include <asm/apic.h>
80#include <asm/realmode.h>
81#include <asm/e820.h>
82#include <asm/mpspec.h>
83#include <asm/setup.h>
84#include <asm/efi.h>
85#include <asm/timer.h>
86#include <asm/i8259.h>
87#include <asm/sections.h>
88#include <asm/io_apic.h>
89#include <asm/ist.h>
90#include <asm/setup_arch.h>
91#include <asm/bios_ebda.h>
92#include <asm/cacheflush.h>
93#include <asm/processor.h>
94#include <asm/bugs.h>
95
96#include <asm/vsyscall.h>
97#include <asm/cpu.h>
98#include <asm/desc.h>
99#include <asm/dma.h>
100#include <asm/iommu.h>
101#include <asm/gart.h>
102#include <asm/mmu_context.h>
103#include <asm/proto.h>
104
105#include <asm/paravirt.h>
106#include <asm/hypervisor.h>
107#include <asm/olpc_ofw.h>
108
109#include <asm/percpu.h>
110#include <asm/topology.h>
111#include <asm/apicdef.h>
112#include <asm/amd_nb.h>
113#include <asm/mce.h>
114#include <asm/alternative.h>
115#include <asm/prom.h>
116#include <asm/microcode.h>
117#include <asm/mmu_context.h>
118#include <asm/kaslr.h>
119#include <asm/intel-family.h>
120
121
122
123
124
125
126
127
128unsigned long max_low_pfn_mapped;
129unsigned long max_pfn_mapped;
130
131#ifdef CONFIG_DMI
132RESERVE_BRK(dmi_alloc, 65536);
133#endif
134
135
136static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
137unsigned long _brk_end = (unsigned long)__brk_base;
138
139#ifdef CONFIG_X86_64
140int default_cpu_present_to_apicid(int mps_cpu)
141{
142 return __default_cpu_present_to_apicid(mps_cpu);
143}
144
145int default_check_phys_apicid_present(int phys_apicid)
146{
147 return __default_check_phys_apicid_present(phys_apicid);
148}
149#endif
150
151struct boot_params boot_params;
152
153
154
155
156static struct resource data_resource = {
157 .name = "Kernel data",
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
161};
162
163static struct resource code_resource = {
164 .name = "Kernel code",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
168};
169
170static struct resource bss_resource = {
171 .name = "Kernel bss",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
175};
176
177
178#ifdef CONFIG_X86_32
179
180struct cpuinfo_x86 new_cpu_data = {
181 .wp_works_ok = -1,
182};
183
184struct cpuinfo_x86 boot_cpu_data __read_mostly = {
185 .wp_works_ok = -1,
186};
187EXPORT_SYMBOL(boot_cpu_data);
188
189struct rh_cpuinfo_x86 rh_boot_cpu_data __read_mostly = {
190 .x86_cache_max_rmid = -1,
191 .x86_cache_occ_scale = -1,
192};
193EXPORT_SYMBOL(rh_boot_cpu_data);
194
195unsigned int def_to_bigsmp;
196
197
198unsigned int machine_id;
199unsigned int machine_submodel_id;
200unsigned int BIOS_revision;
201
202struct apm_info apm_info;
203EXPORT_SYMBOL(apm_info);
204
205#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
206 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
207struct ist_info ist_info;
208EXPORT_SYMBOL(ist_info);
209#else
210struct ist_info ist_info;
211#endif
212
213#else
214struct cpuinfo_x86 boot_cpu_data __read_mostly = {
215 .x86_phys_bits = MAX_PHYSMEM_BITS,
216};
217EXPORT_SYMBOL(boot_cpu_data);
218
219struct rh_cpuinfo_x86 rh_boot_cpu_data __read_mostly = {
220 .x86_cache_max_rmid = -1,
221 .x86_cache_occ_scale = -1,
222};
223EXPORT_SYMBOL(rh_boot_cpu_data);
224#endif
225
226
227#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
228unsigned long mmu_cr4_features;
229#else
230unsigned long mmu_cr4_features = X86_CR4_PAE;
231#endif
232
233
234int bootloader_type, bootloader_version;
235
236
237
238
239struct screen_info screen_info;
240EXPORT_SYMBOL(screen_info);
241struct edid_info edid_info;
242EXPORT_SYMBOL_GPL(edid_info);
243
244extern int root_mountflags;
245
246unsigned long saved_video_mode;
247
248#define RAMDISK_IMAGE_START_MASK 0x07FF
249#define RAMDISK_PROMPT_FLAG 0x8000
250#define RAMDISK_LOAD_FLAG 0x4000
251
252static char __initdata command_line[COMMAND_LINE_SIZE];
253#ifdef CONFIG_CMDLINE_BOOL
254static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
255#endif
256
257#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
258struct edd edd;
259#ifdef CONFIG_EDD_MODULE
260EXPORT_SYMBOL(edd);
261#endif
262
263
264
265
266
267static inline void __init copy_edd(void)
268{
269 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
270 sizeof(edd.mbr_signature));
271 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
272 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
273 edd.edd_info_nr = boot_params.eddbuf_entries;
274}
275#else
276static inline void __init copy_edd(void)
277{
278}
279#endif
280
281void * __init extend_brk(size_t size, size_t align)
282{
283 size_t mask = align - 1;
284 void *ret;
285
286 BUG_ON(_brk_start == 0);
287 BUG_ON(align & mask);
288
289 _brk_end = (_brk_end + mask) & ~mask;
290 BUG_ON((char *)(_brk_end + size) > __brk_limit);
291
292 ret = (void *)_brk_end;
293 _brk_end += size;
294
295 memset(ret, 0, size);
296
297 return ret;
298}
299
300#ifdef CONFIG_X86_32
301static void __init cleanup_highmap(void)
302{
303}
304#endif
305
306static void __init reserve_brk(void)
307{
308 if (_brk_end > _brk_start)
309 memblock_reserve(__pa_symbol(_brk_start),
310 _brk_end - _brk_start);
311
312
313
314 _brk_start = 0;
315}
316
317u64 relocated_ramdisk;
318
319#ifdef CONFIG_BLK_DEV_INITRD
320
321static u64 __init get_ramdisk_image(void)
322{
323 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
324
325 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
326
327 return ramdisk_image;
328}
329static u64 __init get_ramdisk_size(void)
330{
331 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
332
333 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
334
335 return ramdisk_size;
336}
337
338#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
339static void __init relocate_initrd(void)
340{
341
342 u64 ramdisk_image = get_ramdisk_image();
343 u64 ramdisk_size = get_ramdisk_size();
344 u64 area_size = PAGE_ALIGN(ramdisk_size);
345 unsigned long slop, clen, mapaddr;
346 char *p, *q;
347
348
349 relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
350 area_size, PAGE_SIZE);
351
352 if (!relocated_ramdisk)
353 panic("Cannot find place for new RAMDISK of size %lld\n",
354 ramdisk_size);
355
356
357
358 memblock_reserve(relocated_ramdisk, area_size);
359 initrd_start = relocated_ramdisk + PAGE_OFFSET;
360 initrd_end = initrd_start + ramdisk_size;
361 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
362 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
363
364 q = (char *)initrd_start;
365
366
367 while (ramdisk_size) {
368 slop = ramdisk_image & ~PAGE_MASK;
369 clen = ramdisk_size;
370 if (clen > MAX_MAP_CHUNK-slop)
371 clen = MAX_MAP_CHUNK-slop;
372 mapaddr = ramdisk_image & PAGE_MASK;
373 p = early_memremap(mapaddr, clen+slop);
374 memcpy(q, p+slop, clen);
375 early_iounmap(p, clen+slop);
376 q += clen;
377 ramdisk_image += clen;
378 ramdisk_size -= clen;
379 }
380
381 ramdisk_image = get_ramdisk_image();
382 ramdisk_size = get_ramdisk_size();
383 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
384 " [mem %#010llx-%#010llx]\n",
385 ramdisk_image, ramdisk_image + ramdisk_size - 1,
386 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
387}
388
389static void __init early_reserve_initrd(void)
390{
391
392 u64 ramdisk_image = get_ramdisk_image();
393 u64 ramdisk_size = get_ramdisk_size();
394 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
395
396 if (!boot_params.hdr.type_of_loader ||
397 !ramdisk_image || !ramdisk_size)
398 return;
399
400 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
401}
402static void __init reserve_initrd(void)
403{
404
405 u64 ramdisk_image = get_ramdisk_image();
406 u64 ramdisk_size = get_ramdisk_size();
407 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
408 u64 mapped_size;
409
410 if (!boot_params.hdr.type_of_loader ||
411 !ramdisk_image || !ramdisk_size)
412 return;
413
414 initrd_start = 0;
415
416 mapped_size = memblock_mem_size(max_pfn_mapped);
417 if (ramdisk_size >= (mapped_size>>1))
418 panic("initrd too large to handle, "
419 "disabling initrd (%lld needed, %lld available)\n",
420 ramdisk_size, mapped_size>>1);
421
422 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
423 ramdisk_end - 1);
424
425 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
426 PFN_DOWN(ramdisk_end))) {
427
428 initrd_start = ramdisk_image + PAGE_OFFSET;
429 initrd_end = initrd_start + ramdisk_size;
430 return;
431 }
432
433 relocate_initrd();
434
435 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
436}
437#else
438static void __init early_reserve_initrd(void)
439{
440}
441static void __init reserve_initrd(void)
442{
443}
444#endif
445
446static void __init parse_setup_data(void)
447{
448 struct setup_data *data;
449 u64 pa_data, pa_next;
450
451 pa_data = boot_params.hdr.setup_data;
452 while (pa_data) {
453 u32 data_len, map_len, data_type;
454
455 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
456 (u64)sizeof(struct setup_data));
457 data = early_memremap(pa_data, map_len);
458 data_len = data->len + sizeof(struct setup_data);
459 data_type = data->type;
460 pa_next = data->next;
461 early_iounmap(data, map_len);
462
463 switch (data_type) {
464 case SETUP_E820_EXT:
465 parse_e820_ext(pa_data, data_len);
466 break;
467 case SETUP_DTB:
468 add_dtb(pa_data);
469 break;
470 case SETUP_EFI:
471 parse_efi_setup(pa_data, data_len);
472 break;
473 default:
474 break;
475 }
476 pa_data = pa_next;
477 }
478}
479
480static void __init e820_reserve_setup_data(void)
481{
482 struct setup_data *data;
483 u64 pa_data;
484 int found = 0;
485
486 pa_data = boot_params.hdr.setup_data;
487 while (pa_data) {
488 data = early_memremap(pa_data, sizeof(*data));
489 e820_update_range(pa_data, sizeof(*data)+data->len,
490 E820_RAM, E820_RESERVED_KERN);
491 found = 1;
492 pa_data = data->next;
493 early_iounmap(data, sizeof(*data));
494 }
495 if (!found)
496 return;
497
498 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
499 memcpy(&e820_saved, &e820, sizeof(struct e820map));
500 printk(KERN_INFO "extended physical RAM map:\n");
501 e820_print_map("reserve setup_data");
502}
503
504static void __init memblock_x86_reserve_range_setup_data(void)
505{
506 struct setup_data *data;
507 u64 pa_data;
508
509 pa_data = boot_params.hdr.setup_data;
510 while (pa_data) {
511 data = early_memremap(pa_data, sizeof(*data));
512 memblock_reserve(pa_data, sizeof(*data) + data->len);
513 pa_data = data->next;
514 early_iounmap(data, sizeof(*data));
515 }
516}
517
518
519
520
521
522#ifdef CONFIG_KEXEC_CORE
523
524
525#define CRASH_ALIGN (16 << 20)
526
527
528
529
530
531
532#ifdef CONFIG_X86_32
533# define CRASH_ADDR_LOW_MAX (512 << 20)
534# define CRASH_ADDR_HIGH_MAX (512 << 20)
535#else
536# define CRASH_ADDR_LOW_MAX (896UL << 20)
537# define CRASH_ADDR_HIGH_MAX MAXMEM
538#endif
539
540static int __init reserve_crashkernel_low(void)
541{
542#ifdef CONFIG_X86_64
543 unsigned long long base, low_base = 0, low_size = 0;
544 unsigned long total_low_mem;
545 int ret;
546
547 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
548
549
550 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
551 if (ret) {
552
553
554
555
556
557
558
559
560
561 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
562 } else {
563
564 if (!low_size)
565 return 0;
566 }
567
568 low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
569 if (!low_base) {
570 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
571 (unsigned long)(low_size >> 20));
572 return -ENOMEM;
573 }
574
575 ret = memblock_reserve(low_base, low_size);
576 if (ret) {
577 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
578 return ret;
579 }
580
581 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
582 (unsigned long)(low_size >> 20),
583 (unsigned long)(low_base >> 20),
584 (unsigned long)(total_low_mem >> 20));
585
586 crashk_low_res.start = low_base;
587 crashk_low_res.end = low_base + low_size - 1;
588 insert_resource(&iomem_resource, &crashk_low_res);
589#endif
590 return 0;
591}
592
593static void __init reserve_crashkernel(void)
594{
595 unsigned long long crash_size, crash_base, total_mem;
596 bool high = false;
597 int ret;
598
599 total_mem = memblock_phys_mem_size();
600
601
602 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
603 if (ret != 0 || crash_size <= 0) {
604
605 ret = parse_crashkernel_high(boot_command_line, total_mem,
606 &crash_size, &crash_base);
607 if (ret != 0 || crash_size <= 0)
608 return;
609 high = true;
610 }
611
612
613 if (crash_base <= 0) {
614
615
616
617 crash_base = memblock_find_in_range(CRASH_ALIGN,
618 high ? CRASH_ADDR_HIGH_MAX
619 : CRASH_ADDR_LOW_MAX,
620 crash_size, CRASH_ALIGN);
621#ifdef CONFIG_X86_64
622
623
624
625 if (!high && !crash_base)
626 crash_base = memblock_find_in_range(CRASH_ALIGN,
627 (1ULL << 32),
628 crash_size, CRASH_ALIGN);
629
630
631
632 if (!high && !crash_base)
633 crash_base = memblock_find_in_range(CRASH_ALIGN,
634 CRASH_ADDR_HIGH_MAX,
635 crash_size, CRASH_ALIGN);
636#endif
637 if (!crash_base) {
638 pr_info("crashkernel reservation failed - No suitable area found.\n");
639 return;
640 }
641
642 } else {
643 unsigned long long start;
644
645 start = memblock_find_in_range(crash_base,
646 crash_base + crash_size,
647 crash_size, 1 << 20);
648 if (start != crash_base) {
649 pr_info("crashkernel reservation failed - memory is in use.\n");
650 return;
651 }
652 }
653 ret = memblock_reserve(crash_base, crash_size);
654 if (ret) {
655 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
656 return;
657 }
658
659 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
660 memblock_free(crash_base, crash_size);
661 return;
662 }
663
664 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
665 (unsigned long)(crash_size >> 20),
666 (unsigned long)(crash_base >> 20),
667 (unsigned long)(total_mem >> 20));
668
669 crashk_res.start = crash_base;
670 crashk_res.end = crash_base + crash_size - 1;
671 insert_resource(&iomem_resource, &crashk_res);
672}
673#else
674static void __init reserve_crashkernel(void)
675{
676}
677#endif
678
679static struct resource standard_io_resources[] = {
680 { .name = "dma1", .start = 0x00, .end = 0x1f,
681 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
682 { .name = "pic1", .start = 0x20, .end = 0x21,
683 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
684 { .name = "timer0", .start = 0x40, .end = 0x43,
685 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
686 { .name = "timer1", .start = 0x50, .end = 0x53,
687 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
688 { .name = "keyboard", .start = 0x60, .end = 0x60,
689 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
690 { .name = "keyboard", .start = 0x64, .end = 0x64,
691 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
692 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
693 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
694 { .name = "pic2", .start = 0xa0, .end = 0xa1,
695 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
696 { .name = "dma2", .start = 0xc0, .end = 0xdf,
697 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
698 { .name = "fpu", .start = 0xf0, .end = 0xff,
699 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
700};
701
702void __init reserve_standard_io_resources(void)
703{
704 int i;
705
706
707 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
708 request_resource(&ioport_resource, &standard_io_resources[i]);
709
710}
711
712static __init void reserve_ibft_region(void)
713{
714 unsigned long addr, size = 0;
715
716 addr = find_ibft_region(&size);
717
718 if (size)
719 memblock_reserve(addr, size);
720}
721
722static bool __init snb_gfx_workaround_needed(void)
723{
724#ifdef CONFIG_PCI
725 int i;
726 u16 vendor, devid;
727 static const __initconst u16 snb_ids[] = {
728 0x0102,
729 0x0112,
730 0x0122,
731 0x0106,
732 0x0116,
733 0x0126,
734 0x010a,
735 };
736
737
738 if (!early_pci_allowed())
739 return false;
740
741 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
742 if (vendor != 0x8086)
743 return false;
744
745 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
746 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
747 if (devid == snb_ids[i])
748 return true;
749#endif
750
751 return false;
752}
753
754
755
756
757
758static void __init trim_snb_memory(void)
759{
760 static const __initconst unsigned long bad_pages[] = {
761 0x20050000,
762 0x20110000,
763 0x20130000,
764 0x20138000,
765 0x40004000,
766 };
767 int i;
768
769 if (!snb_gfx_workaround_needed())
770 return;
771
772 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
773
774
775
776
777
778 memblock_reserve(0, 1<<20);
779
780 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
781 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
782 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
783 bad_pages[i]);
784 }
785}
786
787
788
789
790
791
792
793
794static void __init trim_platform_memory_ranges(void)
795{
796 trim_snb_memory();
797}
798
799static void __init trim_bios_range(void)
800{
801
802
803
804
805
806
807
808
809
810 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
811
812
813
814
815
816
817 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
818
819 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
820}
821
822
823static void __init e820_add_kernel_range(void)
824{
825 u64 start = __pa_symbol(_text);
826 u64 size = __pa_symbol(_end) - start;
827
828
829
830
831
832
833
834
835 if (e820_all_mapped(start, start + size, E820_RAM))
836 return;
837
838 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
839 e820_remove_range(start, size, E820_RAM, 0);
840 e820_add_region(start, size, E820_RAM);
841}
842
843static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
844
845static int __init parse_reservelow(char *p)
846{
847 unsigned long long size;
848
849 if (!p)
850 return -EINVAL;
851
852 size = memparse(p, &p);
853
854 if (size < 4096)
855 size = 4096;
856
857 if (size > 640*1024)
858 size = 640*1024;
859
860 reserve_low = size;
861
862 return 0;
863}
864
865early_param("reservelow", parse_reservelow);
866
867static void __init trim_low_memory_range(void)
868{
869 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
870}
871
872static bool valid_amd_processor(__u8 family, const char *model_id)
873{
874 bool valid;
875
876 if (family < 0x17)
877 valid = true;
878 else
879 valid = strstr(model_id, "AMD EPYC 7");
880
881 return valid;
882}
883
884static bool valid_intel_processor(__u8 model, __u8 stepping)
885{
886 bool valid;
887
888 switch(model) {
889 case INTEL_FAM6_KABYLAKE_DESKTOP:
890 valid = (stepping <= 10 || stepping == 12);
891 break;
892
893 case INTEL_FAM6_KABYLAKE_MOBILE:
894 valid = (stepping <= 11);
895 break;
896
897 case INTEL_FAM6_XEON_PHI_KNM:
898 case INTEL_FAM6_ATOM_GEMINI_LAKE:
899 case INTEL_FAM6_ATOM_DENVERTON:
900 case INTEL_FAM6_XEON_PHI_KNL:
901 case INTEL_FAM6_BROADWELL_XEON_D:
902 case INTEL_FAM6_BROADWELL_X:
903 case INTEL_FAM6_ATOM_SILVERMONT2:
904 case INTEL_FAM6_BROADWELL_GT3E:
905 case INTEL_FAM6_HASWELL_GT3E:
906 case INTEL_FAM6_HASWELL_ULT:
907 valid = true;
908 break;
909
910 case INTEL_FAM6_SKYLAKE_MOBILE:
911 case INTEL_FAM6_SKYLAKE_DESKTOP:
912
913 valid = (stepping <= 4);
914 break;
915
916 case INTEL_FAM6_SKYLAKE_X:
917 valid = (stepping <= 5);
918 break;
919
920 default:
921 valid = (model <= INTEL_FAM6_HASWELL_X);
922 break;
923 }
924
925 return valid;
926}
927
928static void rh_check_supported(void)
929{
930
931 if (((boot_cpu_data.x86_max_cores * smp_num_siblings) == 1) &&
932 !x86_hyper && !cpu_has_hypervisor && !is_kdump_kernel()) {
933 pr_crit("Detected single cpu native boot.\n");
934 pr_crit("Important: In Red Hat Enterprise Linux 7, single threaded, single CPU 64-bit physical systems are unsupported by Red Hat. Please contact your Red Hat support representative for a list of certified and supported systems.");
935 }
936
937
938
939
940
941 if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) &&
942 (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)) {
943 pr_crit("Detected processor %s %s\n",
944 boot_cpu_data.x86_vendor_id,
945 boot_cpu_data.x86_model_id);
946 mark_hardware_unsupported("Processor");
947 }
948
949 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
950 if (!valid_amd_processor(boot_cpu_data.x86,
951 boot_cpu_data.x86_model_id)) {
952 pr_crit("Detected CPU family %xh model %d\n",
953 boot_cpu_data.x86,
954 boot_cpu_data.x86_model);
955 mark_hardware_unsupported("AMD Processor");
956 }
957 }
958
959
960 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
961 ((boot_cpu_data.x86 == 6))) {
962 if (!valid_intel_processor(boot_cpu_data.x86_model,
963 boot_cpu_data.x86_mask)) {
964 pr_crit("Detected CPU family %d model %d stepping %d\n",
965 boot_cpu_data.x86,
966 boot_cpu_data.x86_model,
967 boot_cpu_data.x86_mask);
968 mark_hardware_unsupported("Intel Processor");
969 }
970 }
971
972
973
974
975
976
977
978 if (acpi_disabled && !x86_hyper && !cpu_has_hypervisor)
979 pr_crit("ACPI has been disabled or is not available on this hardware. This may result in a single cpu boot, incorrect PCI IRQ routing, or boot failure.\n");
980}
981
982
983
984
985static int
986dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
987{
988 if (kaslr_enabled()) {
989 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
990 kaslr_offset(),
991 __START_KERNEL,
992 __START_KERNEL_map,
993 MODULES_VADDR-1);
994 } else {
995 pr_emerg("Kernel Offset: disabled\n");
996 }
997
998 return 0;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014void __init setup_arch(char **cmdline_p)
1015{
1016 memblock_reserve(__pa_symbol(_text),
1017 (unsigned long)__bss_stop - (unsigned long)_text);
1018
1019
1020
1021
1022
1023 memblock_reserve(0, PAGE_SIZE);
1024
1025 early_reserve_initrd();
1026
1027
1028
1029
1030
1031
1032
1033#ifdef CONFIG_X86_32
1034 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
1035 visws_early_detect();
1036
1037
1038
1039
1040
1041 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1042 initial_page_table + KERNEL_PGD_BOUNDARY,
1043 KERNEL_PGD_PTRS);
1044
1045 load_cr3(swapper_pg_dir);
1046 __flush_tlb_all();
1047#else
1048 printk(KERN_INFO "Command line: %s\n", boot_command_line);
1049#endif
1050
1051
1052
1053
1054
1055 olpc_ofw_detect();
1056
1057 early_trap_init();
1058 early_cpu_init();
1059 early_ioremap_init();
1060
1061 setup_olpc_ofw_pgd();
1062
1063 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
1064 screen_info = boot_params.screen_info;
1065 edid_info = boot_params.edid_info;
1066#ifdef CONFIG_X86_32
1067 apm_info.bios = boot_params.apm_bios_info;
1068 ist_info = boot_params.ist_info;
1069 if (boot_params.sys_desc_table.length != 0) {
1070 machine_id = boot_params.sys_desc_table.table[0];
1071 machine_submodel_id = boot_params.sys_desc_table.table[1];
1072 BIOS_revision = boot_params.sys_desc_table.table[2];
1073 }
1074#endif
1075 saved_video_mode = boot_params.hdr.vid_mode;
1076 bootloader_type = boot_params.hdr.type_of_loader;
1077 if ((bootloader_type >> 4) == 0xe) {
1078 bootloader_type &= 0xf;
1079 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
1080 }
1081 bootloader_version = bootloader_type & 0xf;
1082 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
1083
1084#ifdef CONFIG_BLK_DEV_RAM
1085 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
1086 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
1087 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1088#endif
1089#ifdef CONFIG_EFI
1090 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
1091 "EL32", 4)) {
1092 set_bit(EFI_BOOT, &efi.flags);
1093 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
1094 "EL64", 4)) {
1095 set_bit(EFI_BOOT, &efi.flags);
1096 set_bit(EFI_64BIT, &efi.flags);
1097 }
1098
1099 if (efi_enabled(EFI_BOOT))
1100 efi_memblock_x86_reserve_range();
1101#endif
1102
1103 x86_init.oem.arch_setup();
1104
1105 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
1106 setup_memory_map();
1107 parse_setup_data();
1108
1109 copy_edd();
1110
1111 if (!boot_params.hdr.root_flags)
1112 root_mountflags &= ~MS_RDONLY;
1113 init_mm.start_code = (unsigned long) _text;
1114 init_mm.end_code = (unsigned long) _etext;
1115 init_mm.end_data = (unsigned long) _edata;
1116 init_mm.brk = _brk_end;
1117
1118 mpx_mm_init(&init_mm);
1119
1120 code_resource.start = __pa_symbol(_text);
1121 code_resource.end = __pa_symbol(_etext)-1;
1122 data_resource.start = __pa_symbol(_etext);
1123 data_resource.end = __pa_symbol(_edata)-1;
1124 bss_resource.start = __pa_symbol(__bss_start);
1125 bss_resource.end = __pa_symbol(__bss_stop)-1;
1126
1127#ifdef CONFIG_CMDLINE_BOOL
1128#ifdef CONFIG_CMDLINE_OVERRIDE
1129 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1130#else
1131 if (builtin_cmdline[0]) {
1132
1133 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
1134 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
1135 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1136 }
1137#endif
1138#endif
1139
1140 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
1141 *cmdline_p = command_line;
1142
1143
1144
1145
1146
1147
1148
1149
1150 x86_configure_nx();
1151
1152 parse_early_param();
1153
1154 x86_report_nx();
1155
1156
1157 memblock_x86_reserve_range_setup_data();
1158
1159 if (acpi_mps_check()) {
1160#ifdef CONFIG_X86_LOCAL_APIC
1161 disable_apic = 1;
1162#endif
1163 setup_clear_cpu_cap(X86_FEATURE_APIC);
1164 }
1165
1166#ifdef CONFIG_PCI
1167 if (pci_early_dump_regs)
1168 early_dump_pci_devices();
1169#endif
1170
1171
1172 e820_reserve_setup_data();
1173 finish_e820_parsing();
1174
1175 if (efi_enabled(EFI_BOOT))
1176 efi_init();
1177
1178 dmi_scan_machine();
1179 dmi_memdev_walk();
1180 dmi_set_dump_stack_arch_desc();
1181
1182
1183
1184
1185
1186 init_hypervisor_platform();
1187
1188 x86_init.resources.probe_roms();
1189
1190
1191 insert_resource(&iomem_resource, &code_resource);
1192 insert_resource(&iomem_resource, &data_resource);
1193 insert_resource(&iomem_resource, &bss_resource);
1194
1195 e820_add_kernel_range();
1196 trim_bios_range();
1197#ifdef CONFIG_X86_32
1198 if (ppro_with_ram_bug()) {
1199 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1200 E820_RESERVED);
1201 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1202 printk(KERN_INFO "fixed physical RAM map:\n");
1203 e820_print_map("bad_ppro");
1204 }
1205#else
1206 early_gart_iommu_check();
1207#endif
1208
1209
1210
1211
1212
1213 max_pfn = e820_end_of_ram_pfn();
1214
1215
1216 mtrr_bp_init();
1217 if (mtrr_trim_uncached_memory(max_pfn))
1218 max_pfn = e820_end_of_ram_pfn();
1219
1220 max_possible_pfn = max_pfn;
1221
1222
1223
1224
1225
1226 kernel_randomize_memory();
1227
1228#ifdef CONFIG_X86_32
1229
1230 find_low_pfn_range();
1231#else
1232 num_physpages = max_pfn;
1233
1234 check_x2apic();
1235
1236
1237
1238 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1239 max_low_pfn = e820_end_of_low_ram_pfn();
1240 else
1241 max_low_pfn = max_pfn;
1242
1243 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1244#endif
1245
1246
1247
1248
1249 find_smp_config();
1250
1251 reserve_ibft_region();
1252
1253 early_alloc_pgt_buf();
1254
1255
1256
1257
1258
1259
1260 reserve_brk();
1261
1262 cleanup_highmap();
1263
1264 memblock_set_current_limit(ISA_END_ADDRESS);
1265 memblock_x86_fill();
1266
1267 if (efi_enabled(EFI_BOOT))
1268 efi_find_mirror();
1269
1270
1271
1272
1273
1274 if (efi_enabled(EFI_MEMMAP))
1275 efi_reserve_boot_services();
1276
1277
1278 early_reserve_e820_mpc_new();
1279
1280#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1281 setup_bios_corruption_check();
1282#endif
1283
1284#ifdef CONFIG_X86_32
1285 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1286 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1287#endif
1288
1289 reserve_real_mode();
1290
1291 trim_platform_memory_ranges();
1292 trim_low_memory_range();
1293
1294 init_mem_mapping();
1295
1296 early_trap_pf_init();
1297
1298 setup_real_mode();
1299
1300 memblock_set_current_limit(get_max_mapped());
1301 dma_contiguous_reserve(0);
1302
1303
1304
1305
1306
1307#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1308 if (init_ohci1394_dma_early)
1309 init_ohci1394_dma_on_all_controllers();
1310#endif
1311
1312 setup_log_buf(1);
1313
1314#ifdef CONFIG_EFI_SECURE_BOOT_SECURELEVEL
1315 if (boot_params.secure_boot) {
1316 set_bit(EFI_SECURE_BOOT, &efi.flags);
1317 set_securelevel(1);
1318 pr_info("Secure boot enabled\n");
1319 }
1320#endif
1321
1322 reserve_initrd();
1323
1324#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1325 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1326#endif
1327
1328 vsmp_init();
1329
1330 io_delay_init();
1331
1332
1333
1334
1335 acpi_boot_table_init();
1336
1337 early_acpi_boot_init();
1338
1339 initmem_init();
1340
1341
1342
1343
1344
1345 reserve_crashkernel();
1346
1347 memblock_find_dma_reserve();
1348
1349#ifdef CONFIG_KVM_GUEST
1350 kvmclock_init();
1351#endif
1352
1353 x86_init.paging.pagetable_init();
1354
1355 if (boot_cpu_data.cpuid_level >= 0) {
1356
1357 mmu_cr4_features = read_cr4();
1358 if (trampoline_cr4_features)
1359 *trampoline_cr4_features = mmu_cr4_features;
1360 }
1361
1362#ifdef CONFIG_X86_32
1363
1364 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1365 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1366 KERNEL_PGD_PTRS);
1367#endif
1368
1369 tboot_probe();
1370
1371#ifdef CONFIG_X86_64
1372 map_vsyscall();
1373#endif
1374
1375 generic_apic_probe();
1376
1377 early_quirks();
1378
1379
1380
1381
1382 acpi_boot_init();
1383 sfi_init();
1384 x86_dtb_init();
1385
1386
1387
1388
1389 if (smp_found_config)
1390 get_smp_config();
1391
1392
1393
1394
1395
1396 init_apic_mappings();
1397
1398 prefill_possible_map();
1399
1400 init_cpu_to_node();
1401
1402 if (x86_io_apic_ops.init)
1403 x86_io_apic_ops.init();
1404
1405 kvm_guest_init();
1406
1407 e820_reserve_resources();
1408 e820_mark_nosave_regions(max_low_pfn);
1409
1410 x86_init.resources.reserve_resources();
1411
1412 e820_setup_gap();
1413
1414#ifdef CONFIG_VT
1415#if defined(CONFIG_VGA_CONSOLE)
1416 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1417 conswitchp = &vga_con;
1418#elif defined(CONFIG_DUMMY_CONSOLE)
1419 conswitchp = &dummy_con;
1420#endif
1421#endif
1422 x86_init.oem.banner();
1423
1424 x86_init.timers.wallclock_init();
1425
1426 mcheck_init();
1427
1428 arch_init_ideal_nops();
1429
1430 register_refined_jiffies(CLOCK_TICK_RATE);
1431
1432#ifdef CONFIG_EFI
1433 if (efi_enabled(EFI_BOOT))
1434 efi_apply_memmap_quirks();
1435#endif
1436
1437 rh_check_supported();
1438}
1439
1440#ifdef CONFIG_X86_32
1441
1442static struct resource video_ram_resource = {
1443 .name = "Video RAM area",
1444 .start = 0xa0000,
1445 .end = 0xbffff,
1446 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1447};
1448
1449void __init i386_reserve_resources(void)
1450{
1451 request_resource(&iomem_resource, &video_ram_resource);
1452 reserve_standard_io_resources();
1453}
1454
1455#endif
1456
1457void arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
1458{
1459 if (!boot_cpu_has(X86_FEATURE_OSPKE))
1460 return;
1461
1462 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
1463}
1464
1465static struct notifier_block kernel_offset_notifier = {
1466 .notifier_call = dump_kernel_offset
1467};
1468
1469static int __init register_kernel_offset_dumper(void)
1470{
1471 atomic_notifier_chain_register(&panic_notifier_list,
1472 &kernel_offset_notifier);
1473 return 0;
1474}
1475__initcall(register_kernel_offset_dumper);
1476