1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define BOOT_CTYPE_H
21
22
23
24
25
26
27
28
29#define _LINUX_EXPORT_H
30#define EXPORT_SYMBOL(sym)
31
32#include "misc.h"
33#include "error.h"
34#include "../string.h"
35
36#include <generated/compile.h>
37#include <linux/module.h>
38#include <linux/uts.h>
39#include <linux/utsname.h>
40#include <linux/ctype.h>
41#include <linux/efi.h>
42#include <generated/utsrelease.h>
43#include <asm/efi.h>
44
45
46#define STATIC
47#include <linux/decompress/mm.h>
48
49extern unsigned long get_cmd_line_ptr(void);
50
51
52static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
53 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
54
55static unsigned long rotate_xor(unsigned long hash, const void *area,
56 size_t size)
57{
58 size_t i;
59 unsigned long *ptr = (unsigned long *)area;
60
61 for (i = 0; i < size / sizeof(hash); i++) {
62
63 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
64 hash ^= ptr[i];
65 }
66
67 return hash;
68}
69
70
71static unsigned long get_boot_seed(void)
72{
73 unsigned long hash = 0;
74
75 hash = rotate_xor(hash, build_str, sizeof(build_str));
76 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
77
78 return hash;
79}
80
81#define KASLR_COMPRESSED_BOOT
82#include "../../lib/kaslr.c"
83
84struct mem_vector {
85 unsigned long long start;
86 unsigned long long size;
87};
88
89
90#define MAX_MEMMAP_REGIONS 4
91
92static bool memmap_too_large;
93
94
95
96unsigned long long mem_limit = ULLONG_MAX;
97
98
99enum mem_avoid_index {
100 MEM_AVOID_ZO_RANGE = 0,
101 MEM_AVOID_INITRD,
102 MEM_AVOID_CMDLINE,
103 MEM_AVOID_BOOTPARAMS,
104 MEM_AVOID_MEMMAP_BEGIN,
105 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
106 MEM_AVOID_MAX,
107};
108
109static struct mem_vector mem_avoid[MEM_AVOID_MAX];
110
111static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
112{
113
114 if (one->start + one->size <= two->start)
115 return false;
116
117 if (one->start >= two->start + two->size)
118 return false;
119 return true;
120}
121
122char *skip_spaces(const char *str)
123{
124 while (isspace(*str))
125 ++str;
126 return (char *)str;
127}
128#include "../../../../lib/ctype.c"
129#include "../../../../lib/cmdline.c"
130
131static int
132parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
133{
134 char *oldp;
135
136 if (!p)
137 return -EINVAL;
138
139
140 if (!strncmp(p, "exactmap", 8))
141 return -EINVAL;
142
143 oldp = p;
144 *size = memparse(p, &p);
145 if (p == oldp)
146 return -EINVAL;
147
148 switch (*p) {
149 case '#':
150 case '$':
151 case '!':
152 *start = memparse(p + 1, &p);
153 return 0;
154 case '@':
155
156 *size = 0;
157
158 default:
159
160
161
162
163
164 *start = 0;
165 return 0;
166 }
167
168 return -EINVAL;
169}
170
171static void mem_avoid_memmap(char *str)
172{
173 static int i;
174
175 if (i >= MAX_MEMMAP_REGIONS)
176 return;
177
178 while (str && (i < MAX_MEMMAP_REGIONS)) {
179 int rc;
180 unsigned long long start, size;
181 char *k = strchr(str, ',');
182
183 if (k)
184 *k++ = 0;
185
186 rc = parse_memmap(str, &start, &size);
187 if (rc < 0)
188 break;
189 str = k;
190
191 if (start == 0) {
192
193 if (size > 0)
194 mem_limit = size;
195
196 continue;
197 }
198
199 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
200 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
201 i++;
202 }
203
204
205 if ((i >= MAX_MEMMAP_REGIONS) && str)
206 memmap_too_large = true;
207}
208
209static int handle_mem_memmap(void)
210{
211 char *args = (char *)get_cmd_line_ptr();
212 size_t len = strlen((char *)args);
213 char *tmp_cmdline;
214 char *param, *val;
215 u64 mem_size;
216
217 if (!strstr(args, "memmap=") && !strstr(args, "mem="))
218 return 0;
219
220 tmp_cmdline = malloc(len + 1);
221 if (!tmp_cmdline)
222 error("Failed to allocate space for tmp_cmdline");
223
224 memcpy(tmp_cmdline, args, len);
225 tmp_cmdline[len] = 0;
226 args = tmp_cmdline;
227
228
229 args = skip_spaces(args);
230
231 while (*args) {
232 args = next_arg(args, ¶m, &val);
233
234 if (!val && strcmp(param, "--") == 0) {
235 warn("Only '--' specified in cmdline");
236 free(tmp_cmdline);
237 return -1;
238 }
239
240 if (!strcmp(param, "memmap")) {
241 mem_avoid_memmap(val);
242 } else if (!strcmp(param, "mem")) {
243 char *p = val;
244
245 if (!strcmp(p, "nopentium"))
246 continue;
247 mem_size = memparse(p, &p);
248 if (mem_size == 0) {
249 free(tmp_cmdline);
250 return -EINVAL;
251 }
252 mem_limit = mem_size;
253 }
254 }
255
256 free(tmp_cmdline);
257 return 0;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static void mem_avoid_init(unsigned long input, unsigned long input_size,
335 unsigned long output)
336{
337 unsigned long init_size = boot_params->hdr.init_size;
338 u64 initrd_start, initrd_size;
339 u64 cmd_line, cmd_line_size;
340 char *ptr;
341
342
343
344
345
346 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
347 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
348 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
349 mem_avoid[MEM_AVOID_ZO_RANGE].size);
350
351
352 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
353 initrd_start |= boot_params->hdr.ramdisk_image;
354 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
355 initrd_size |= boot_params->hdr.ramdisk_size;
356 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
357 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
358
359
360
361 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
362 cmd_line |= boot_params->hdr.cmd_line_ptr;
363
364 ptr = (char *)(unsigned long)cmd_line;
365 for (cmd_line_size = 0; ptr[cmd_line_size++];)
366 ;
367 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
368 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
369 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
370 mem_avoid[MEM_AVOID_CMDLINE].size);
371
372
373 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
374 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
375 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
376 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
377
378
379
380
381 handle_mem_memmap();
382
383#ifdef CONFIG_X86_VERBOSE_BOOTUP
384
385 add_identity_map(0, PMD_SIZE);
386#endif
387}
388
389
390
391
392
393static bool mem_avoid_overlap(struct mem_vector *img,
394 struct mem_vector *overlap)
395{
396 int i;
397 struct setup_data *ptr;
398 unsigned long earliest = img->start + img->size;
399 bool is_overlapping = false;
400
401 for (i = 0; i < MEM_AVOID_MAX; i++) {
402 if (mem_overlaps(img, &mem_avoid[i]) &&
403 mem_avoid[i].start < earliest) {
404 *overlap = mem_avoid[i];
405 earliest = overlap->start;
406 is_overlapping = true;
407 }
408 }
409
410
411 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
412 while (ptr) {
413 struct mem_vector avoid;
414
415 avoid.start = (unsigned long)ptr;
416 avoid.size = sizeof(*ptr) + ptr->len;
417
418 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
419 *overlap = avoid;
420 earliest = overlap->start;
421 is_overlapping = true;
422 }
423
424 ptr = (struct setup_data *)(unsigned long)ptr->next;
425 }
426
427 return is_overlapping;
428}
429
430struct slot_area {
431 unsigned long addr;
432 int num;
433};
434
435#define MAX_SLOT_AREA 100
436
437static struct slot_area slot_areas[MAX_SLOT_AREA];
438
439static unsigned long slot_max;
440
441static unsigned long slot_area_index;
442
443static void store_slot_info(struct mem_vector *region, unsigned long image_size)
444{
445 struct slot_area slot_area;
446
447 if (slot_area_index == MAX_SLOT_AREA)
448 return;
449
450 slot_area.addr = region->start;
451 slot_area.num = (region->size - image_size) /
452 CONFIG_PHYSICAL_ALIGN + 1;
453
454 if (slot_area.num > 0) {
455 slot_areas[slot_area_index++] = slot_area;
456 slot_max += slot_area.num;
457 }
458}
459
460static unsigned long slots_fetch_random(void)
461{
462 unsigned long slot;
463 int i;
464
465
466 if (slot_max == 0)
467 return 0;
468
469 slot = kaslr_get_random_long("Physical") % slot_max;
470
471 for (i = 0; i < slot_area_index; i++) {
472 if (slot >= slot_areas[i].num) {
473 slot -= slot_areas[i].num;
474 continue;
475 }
476 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
477 }
478
479 if (i == slot_area_index)
480 debug_putstr("slots_fetch_random() failed!?\n");
481 return 0;
482}
483
484static void process_mem_region(struct mem_vector *entry,
485 unsigned long minimum,
486 unsigned long image_size)
487{
488 struct mem_vector region, overlap;
489 struct slot_area slot_area;
490 unsigned long start_orig, end;
491 struct mem_vector cur_entry;
492
493
494 if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
495 return;
496
497
498 if (entry->start + entry->size < minimum)
499 return;
500
501
502 end = min(entry->size + entry->start, mem_limit);
503 if (entry->start >= end)
504 return;
505 cur_entry.start = entry->start;
506 cur_entry.size = end - entry->start;
507
508 region.start = cur_entry.start;
509 region.size = cur_entry.size;
510
511
512 while (slot_area_index < MAX_SLOT_AREA) {
513 start_orig = region.start;
514
515
516 if (region.start < minimum)
517 region.start = minimum;
518
519
520 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
521
522
523 if (region.start > cur_entry.start + cur_entry.size)
524 return;
525
526
527 region.size -= region.start - start_orig;
528
529
530 if (IS_ENABLED(CONFIG_X86_32) &&
531 region.start + region.size > KERNEL_IMAGE_SIZE)
532 region.size = KERNEL_IMAGE_SIZE - region.start;
533
534
535 if (region.size < image_size)
536 return;
537
538
539 if (!mem_avoid_overlap(®ion, &overlap)) {
540 store_slot_info(®ion, image_size);
541 return;
542 }
543
544
545 if (overlap.start > region.start + image_size) {
546 struct mem_vector beginning;
547
548 beginning.start = region.start;
549 beginning.size = overlap.start - region.start;
550 store_slot_info(&beginning, image_size);
551 }
552
553
554 if (overlap.start + overlap.size >= region.start + region.size)
555 return;
556
557
558 region.size -= overlap.start - region.start + overlap.size;
559 region.start = overlap.start + overlap.size;
560 }
561}
562
563#ifdef CONFIG_EFI
564
565
566
567
568static bool
569process_efi_entries(unsigned long minimum, unsigned long image_size)
570{
571 struct efi_info *e = &boot_params->efi_info;
572 bool efi_mirror_found = false;
573 struct mem_vector region;
574 efi_memory_desc_t *md;
575 unsigned long pmap;
576 char *signature;
577 u32 nr_desc;
578 int i;
579
580 signature = (char *)&e->efi_loader_signature;
581 if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
582 strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
583 return false;
584
585#ifdef CONFIG_X86_32
586
587 if (e->efi_memmap_hi) {
588 warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
589 return false;
590 }
591 pmap = e->efi_memmap;
592#else
593 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
594#endif
595
596 nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
597 for (i = 0; i < nr_desc; i++) {
598 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
599 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
600 efi_mirror_found = true;
601 break;
602 }
603 }
604
605 for (i = 0; i < nr_desc; i++) {
606 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
607
608
609
610
611
612
613
614
615
616
617
618
619 if (md->type != EFI_CONVENTIONAL_MEMORY)
620 continue;
621
622 if (efi_mirror_found &&
623 !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
624 continue;
625
626 region.start = md->phys_addr;
627 region.size = md->num_pages << EFI_PAGE_SHIFT;
628 process_mem_region(®ion, minimum, image_size);
629 if (slot_area_index == MAX_SLOT_AREA) {
630 debug_putstr("Aborted EFI scan (slot_areas full)!\n");
631 break;
632 }
633 }
634 return true;
635}
636#else
637static inline bool
638process_efi_entries(unsigned long minimum, unsigned long image_size)
639{
640 return false;
641}
642#endif
643
644static void process_e820_entries(unsigned long minimum,
645 unsigned long image_size)
646{
647 int i;
648 struct mem_vector region;
649 struct boot_e820_entry *entry;
650
651
652 for (i = 0; i < boot_params->e820_entries; i++) {
653 entry = &boot_params->e820_table[i];
654
655 if (entry->type != E820_TYPE_RAM)
656 continue;
657 region.start = entry->addr;
658 region.size = entry->size;
659 process_mem_region(®ion, minimum, image_size);
660 if (slot_area_index == MAX_SLOT_AREA) {
661 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
662 break;
663 }
664 }
665}
666
667static unsigned long find_random_phys_addr(unsigned long minimum,
668 unsigned long image_size)
669{
670
671 if (memmap_too_large) {
672 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
673 return 0;
674 }
675
676
677 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
678
679 if (process_efi_entries(minimum, image_size))
680 return slots_fetch_random();
681
682 process_e820_entries(minimum, image_size);
683 return slots_fetch_random();
684}
685
686static unsigned long find_random_virt_addr(unsigned long minimum,
687 unsigned long image_size)
688{
689 unsigned long slots, random_addr;
690
691
692 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
693
694 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
695
696
697
698
699
700
701 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
702 CONFIG_PHYSICAL_ALIGN + 1;
703
704 random_addr = kaslr_get_random_long("Virtual") % slots;
705
706 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
707}
708
709
710
711
712
713void choose_random_location(unsigned long input,
714 unsigned long input_size,
715 unsigned long *output,
716 unsigned long output_size,
717 unsigned long *virt_addr)
718{
719 unsigned long random_addr, min_addr;
720
721 if (cmdline_find_option_bool("nokaslr")) {
722 warn("KASLR disabled: 'nokaslr' on cmdline.");
723 return;
724 }
725
726 boot_params->hdr.loadflags |= KASLR_FLAG;
727
728
729 initialize_identity_maps();
730
731
732 mem_avoid_init(input, input_size, *output);
733
734
735
736
737
738
739 min_addr = min(*output, 512UL << 20);
740
741
742 random_addr = find_random_phys_addr(min_addr, output_size);
743 if (!random_addr) {
744 warn("Physical KASLR disabled: no suitable memory region!");
745 } else {
746
747 if (*output != random_addr) {
748 add_identity_map(random_addr, output_size);
749 *output = random_addr;
750 }
751
752
753
754
755
756
757
758
759 finalize_identity_maps();
760 }
761
762
763
764 if (IS_ENABLED(CONFIG_X86_64))
765 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
766 *virt_addr = random_addr;
767}
768