1#include <linux/gfp.h>
2#include <linux/initrd.h>
3#include <linux/ioport.h>
4#include <linux/swap.h>
5#include <linux/memblock.h>
6#include <linux/swapfile.h>
7#include <linux/swapops.h>
8#include <linux/kmemleak.h>
9#include <linux/sched/task.h>
10
11#include <asm/set_memory.h>
12#include <asm/e820/api.h>
13#include <asm/init.h>
14#include <asm/page.h>
15#include <asm/page_types.h>
16#include <asm/sections.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
19#include <asm/tlb.h>
20#include <asm/proto.h>
21#include <asm/dma.h>
22#include <asm/microcode.h>
23#include <asm/kaslr.h>
24#include <asm/hypervisor.h>
25#include <asm/cpufeature.h>
26#include <asm/pti.h>
27#include <asm/text-patching.h>
28#include <asm/memtype.h>
29
30
31
32
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/tlb.h>
36
37#include "mm_internal.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
54 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
55 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
56 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
60};
61
62unsigned long cachemode2protval(enum page_cache_mode pcm)
63{
64 if (likely(pcm == 0))
65 return 0;
66 return __cachemode2pte_tbl[pcm];
67}
68EXPORT_SYMBOL(cachemode2protval);
69
70static uint8_t __pte2cachemode_tbl[8] = {
71 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
72 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
73 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
76 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
77 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
78 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79};
80
81
82bool x86_has_pat_wp(void)
83{
84 return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
85}
86
87enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
88{
89 unsigned long masked;
90
91 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
92 if (likely(masked == 0))
93 return 0;
94 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
95}
96
97static unsigned long __initdata pgt_buf_start;
98static unsigned long __initdata pgt_buf_end;
99static unsigned long __initdata pgt_buf_top;
100
101static unsigned long min_pfn_mapped;
102
103static bool __initdata can_use_brk_pgt = true;
104
105
106
107
108
109
110
111
112
113
114__ref void *alloc_low_pages(unsigned int num)
115{
116 unsigned long pfn;
117 int i;
118
119 if (after_bootmem) {
120 unsigned int order;
121
122 order = get_order((unsigned long)num << PAGE_SHIFT);
123 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
124 }
125
126 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
127 unsigned long ret = 0;
128
129 if (min_pfn_mapped < max_pfn_mapped) {
130 ret = memblock_phys_alloc_range(
131 PAGE_SIZE * num, PAGE_SIZE,
132 min_pfn_mapped << PAGE_SHIFT,
133 max_pfn_mapped << PAGE_SHIFT);
134 }
135 if (!ret && can_use_brk_pgt)
136 ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
137
138 if (!ret)
139 panic("alloc_low_pages: can not alloc memory");
140
141 pfn = ret >> PAGE_SHIFT;
142 } else {
143 pfn = pgt_buf_end;
144 pgt_buf_end += num;
145 }
146
147 for (i = 0; i < num; i++) {
148 void *adr;
149
150 adr = __va((pfn + i) << PAGE_SHIFT);
151 clear_page(adr);
152 }
153
154 return __va(pfn << PAGE_SHIFT);
155}
156
157
158
159
160
161
162
163
164
165#ifndef CONFIG_X86_5LEVEL
166#define INIT_PGD_PAGE_TABLES 3
167#else
168#define INIT_PGD_PAGE_TABLES 4
169#endif
170
171#ifndef CONFIG_RANDOMIZE_MEMORY
172#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
173#else
174#define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
175#endif
176
177#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
178RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
179void __init early_alloc_pgt_buf(void)
180{
181 unsigned long tables = INIT_PGT_BUF_SIZE;
182 phys_addr_t base;
183
184 base = __pa(extend_brk(tables, PAGE_SIZE));
185
186 pgt_buf_start = base >> PAGE_SHIFT;
187 pgt_buf_end = pgt_buf_start;
188 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
189}
190
191int after_bootmem;
192
193early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
194
195struct map_range {
196 unsigned long start;
197 unsigned long end;
198 unsigned page_size_mask;
199};
200
201static int page_size_mask;
202
203
204
205
206
207
208static inline void cr4_set_bits_and_update_boot(unsigned long mask)
209{
210 mmu_cr4_features |= mask;
211 if (trampoline_cr4_features)
212 *trampoline_cr4_features = mmu_cr4_features;
213 cr4_set_bits(mask);
214}
215
216static void __init probe_page_size_mask(void)
217{
218
219
220
221
222
223 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
224 page_size_mask |= 1 << PG_LEVEL_2M;
225 else
226 direct_gbpages = 0;
227
228
229 if (boot_cpu_has(X86_FEATURE_PSE))
230 cr4_set_bits_and_update_boot(X86_CR4_PSE);
231
232
233 __supported_pte_mask &= ~_PAGE_GLOBAL;
234 if (boot_cpu_has(X86_FEATURE_PGE)) {
235 cr4_set_bits_and_update_boot(X86_CR4_PGE);
236 __supported_pte_mask |= _PAGE_GLOBAL;
237 }
238
239
240 __default_kernel_pte_mask = __supported_pte_mask;
241
242 if (cpu_feature_enabled(X86_FEATURE_PTI))
243 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
244
245
246 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
247 printk(KERN_INFO "Using GB pages for direct mapping\n");
248 page_size_mask |= 1 << PG_LEVEL_1G;
249 } else {
250 direct_gbpages = 0;
251 }
252}
253
254static void setup_pcid(void)
255{
256 if (!IS_ENABLED(CONFIG_X86_64))
257 return;
258
259 if (!boot_cpu_has(X86_FEATURE_PCID))
260 return;
261
262 if (boot_cpu_has(X86_FEATURE_PGE)) {
263
264
265
266
267
268
269
270
271
272
273
274 cr4_set_bits(X86_CR4_PCIDE);
275
276
277
278
279
280
281
282 if (boot_cpu_has(X86_FEATURE_INVPCID))
283 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
284 } else {
285
286
287
288
289
290
291
292 setup_clear_cpu_cap(X86_FEATURE_PCID);
293 }
294}
295
296#ifdef CONFIG_X86_32
297#define NR_RANGE_MR 3
298#else
299#define NR_RANGE_MR 5
300#endif
301
302static int __meminit save_mr(struct map_range *mr, int nr_range,
303 unsigned long start_pfn, unsigned long end_pfn,
304 unsigned long page_size_mask)
305{
306 if (start_pfn < end_pfn) {
307 if (nr_range >= NR_RANGE_MR)
308 panic("run out of range for init_memory_mapping\n");
309 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
310 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
311 mr[nr_range].page_size_mask = page_size_mask;
312 nr_range++;
313 }
314
315 return nr_range;
316}
317
318
319
320
321
322static void __ref adjust_range_page_size_mask(struct map_range *mr,
323 int nr_range)
324{
325 int i;
326
327 for (i = 0; i < nr_range; i++) {
328 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
329 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
330 unsigned long start = round_down(mr[i].start, PMD_SIZE);
331 unsigned long end = round_up(mr[i].end, PMD_SIZE);
332
333#ifdef CONFIG_X86_32
334 if ((end >> PAGE_SHIFT) > max_low_pfn)
335 continue;
336#endif
337
338 if (memblock_is_region_memory(start, end - start))
339 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
340 }
341 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
342 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
343 unsigned long start = round_down(mr[i].start, PUD_SIZE);
344 unsigned long end = round_up(mr[i].end, PUD_SIZE);
345
346 if (memblock_is_region_memory(start, end - start))
347 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
348 }
349 }
350}
351
352static const char *page_size_string(struct map_range *mr)
353{
354 static const char str_1g[] = "1G";
355 static const char str_2m[] = "2M";
356 static const char str_4m[] = "4M";
357 static const char str_4k[] = "4k";
358
359 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
360 return str_1g;
361
362
363
364
365
366 if (IS_ENABLED(CONFIG_X86_32) &&
367 !IS_ENABLED(CONFIG_X86_PAE) &&
368 mr->page_size_mask & (1<<PG_LEVEL_2M))
369 return str_4m;
370
371 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
372 return str_2m;
373
374 return str_4k;
375}
376
377static int __meminit split_mem_range(struct map_range *mr, int nr_range,
378 unsigned long start,
379 unsigned long end)
380{
381 unsigned long start_pfn, end_pfn, limit_pfn;
382 unsigned long pfn;
383 int i;
384
385 limit_pfn = PFN_DOWN(end);
386
387
388 pfn = start_pfn = PFN_DOWN(start);
389#ifdef CONFIG_X86_32
390
391
392
393
394
395
396 if (pfn == 0)
397 end_pfn = PFN_DOWN(PMD_SIZE);
398 else
399 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
400#else
401 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
402#endif
403 if (end_pfn > limit_pfn)
404 end_pfn = limit_pfn;
405 if (start_pfn < end_pfn) {
406 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
407 pfn = end_pfn;
408 }
409
410
411 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
412#ifdef CONFIG_X86_32
413 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
414#else
415 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
416 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
417 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
418#endif
419
420 if (start_pfn < end_pfn) {
421 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
422 page_size_mask & (1<<PG_LEVEL_2M));
423 pfn = end_pfn;
424 }
425
426#ifdef CONFIG_X86_64
427
428 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
429 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
430 if (start_pfn < end_pfn) {
431 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
432 page_size_mask &
433 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
434 pfn = end_pfn;
435 }
436
437
438 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
439 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
440 if (start_pfn < end_pfn) {
441 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
442 page_size_mask & (1<<PG_LEVEL_2M));
443 pfn = end_pfn;
444 }
445#endif
446
447
448 start_pfn = pfn;
449 end_pfn = limit_pfn;
450 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
451
452 if (!after_bootmem)
453 adjust_range_page_size_mask(mr, nr_range);
454
455
456 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
457 unsigned long old_start;
458 if (mr[i].end != mr[i+1].start ||
459 mr[i].page_size_mask != mr[i+1].page_size_mask)
460 continue;
461
462 old_start = mr[i].start;
463 memmove(&mr[i], &mr[i+1],
464 (nr_range - 1 - i) * sizeof(struct map_range));
465 mr[i--].start = old_start;
466 nr_range--;
467 }
468
469 for (i = 0; i < nr_range; i++)
470 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
471 mr[i].start, mr[i].end - 1,
472 page_size_string(&mr[i]));
473
474 return nr_range;
475}
476
477struct range pfn_mapped[E820_MAX_ENTRIES];
478int nr_pfn_mapped;
479
480static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
481{
482 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
483 nr_pfn_mapped, start_pfn, end_pfn);
484 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
485
486 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
487
488 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
489 max_low_pfn_mapped = max(max_low_pfn_mapped,
490 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
491}
492
493bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
494{
495 int i;
496
497 for (i = 0; i < nr_pfn_mapped; i++)
498 if ((start_pfn >= pfn_mapped[i].start) &&
499 (end_pfn <= pfn_mapped[i].end))
500 return true;
501
502 return false;
503}
504
505
506
507
508
509
510unsigned long __ref init_memory_mapping(unsigned long start,
511 unsigned long end, pgprot_t prot)
512{
513 struct map_range mr[NR_RANGE_MR];
514 unsigned long ret = 0;
515 int nr_range, i;
516
517 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
518 start, end - 1);
519
520 memset(mr, 0, sizeof(mr));
521 nr_range = split_mem_range(mr, 0, start, end);
522
523 for (i = 0; i < nr_range; i++)
524 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
525 mr[i].page_size_mask,
526 prot);
527
528 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
529
530 return ret >> PAGE_SHIFT;
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static unsigned long __init init_range_memory_mapping(
547 unsigned long r_start,
548 unsigned long r_end)
549{
550 unsigned long start_pfn, end_pfn;
551 unsigned long mapped_ram_size = 0;
552 int i;
553
554 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
555 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
556 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
557 if (start >= end)
558 continue;
559
560
561
562
563
564 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
565 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
566 init_memory_mapping(start, end, PAGE_KERNEL);
567 mapped_ram_size += end - start;
568 can_use_brk_pgt = true;
569 }
570
571 return mapped_ram_size;
572}
573
574static unsigned long __init get_new_step_size(unsigned long step_size)
575{
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
591}
592
593
594
595
596
597
598
599
600
601
602
603static void __init memory_map_top_down(unsigned long map_start,
604 unsigned long map_end)
605{
606 unsigned long real_end, last_start;
607 unsigned long step_size;
608 unsigned long addr;
609 unsigned long mapped_ram_size = 0;
610
611
612
613
614
615
616
617
618
619 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
620 map_end);
621 memblock_free(addr, PMD_SIZE);
622 real_end = addr + PMD_SIZE;
623
624
625 step_size = PMD_SIZE;
626 max_pfn_mapped = 0;
627 min_pfn_mapped = real_end >> PAGE_SHIFT;
628 last_start = real_end;
629
630
631
632
633
634
635
636 while (last_start > map_start) {
637 unsigned long start;
638
639 if (last_start > step_size) {
640 start = round_down(last_start - 1, step_size);
641 if (start < map_start)
642 start = map_start;
643 } else
644 start = map_start;
645 mapped_ram_size += init_range_memory_mapping(start,
646 last_start);
647 last_start = start;
648 min_pfn_mapped = last_start >> PAGE_SHIFT;
649 if (mapped_ram_size >= step_size)
650 step_size = get_new_step_size(step_size);
651 }
652
653 if (real_end < map_end)
654 init_range_memory_mapping(real_end, map_end);
655}
656
657
658
659
660
661
662
663
664
665
666
667
668static void __init memory_map_bottom_up(unsigned long map_start,
669 unsigned long map_end)
670{
671 unsigned long next, start;
672 unsigned long mapped_ram_size = 0;
673
674 unsigned long step_size = PMD_SIZE;
675
676 start = map_start;
677 min_pfn_mapped = start >> PAGE_SHIFT;
678
679
680
681
682
683
684
685 while (start < map_end) {
686 if (step_size && map_end - start > step_size) {
687 next = round_up(start + 1, step_size);
688 if (next > map_end)
689 next = map_end;
690 } else {
691 next = map_end;
692 }
693
694 mapped_ram_size += init_range_memory_mapping(start, next);
695 start = next;
696
697 if (mapped_ram_size >= step_size)
698 step_size = get_new_step_size(step_size);
699 }
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714static void __init init_trampoline(void)
715{
716#ifdef CONFIG_X86_64
717 if (!kaslr_memory_enabled())
718 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
719 else
720 init_trampoline_kaslr();
721#endif
722}
723
724void __init init_mem_mapping(void)
725{
726 unsigned long end;
727
728 pti_check_boottime_disable();
729 probe_page_size_mask();
730 setup_pcid();
731
732#ifdef CONFIG_X86_64
733 end = max_pfn << PAGE_SHIFT;
734#else
735 end = max_low_pfn << PAGE_SHIFT;
736#endif
737
738
739 init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
740
741
742 init_trampoline();
743
744
745
746
747
748 if (memblock_bottom_up()) {
749 unsigned long kernel_end = __pa_symbol(_end);
750
751
752
753
754
755
756
757
758 memory_map_bottom_up(kernel_end, end);
759 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
760 } else {
761 memory_map_top_down(ISA_END_ADDRESS, end);
762 }
763
764#ifdef CONFIG_X86_64
765 if (max_pfn > max_low_pfn) {
766
767 max_low_pfn = max_pfn;
768 }
769#else
770 early_ioremap_page_table_range_init();
771#endif
772
773 load_cr3(swapper_pg_dir);
774 __flush_tlb_all();
775
776 x86_init.hyper.init_mem_mapping();
777
778 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
779}
780
781
782
783
784
785void __init poking_init(void)
786{
787 spinlock_t *ptl;
788 pte_t *ptep;
789
790 poking_mm = copy_init_mm();
791 BUG_ON(!poking_mm);
792
793
794
795
796
797
798 poking_addr = TASK_UNMAPPED_BASE;
799 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
800 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
801 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
802
803 if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
804 poking_addr += PAGE_SIZE;
805
806
807
808
809
810
811 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
812 BUG_ON(!ptep);
813 pte_unmap_unlock(ptep, ptl);
814}
815
816
817
818
819
820
821
822
823
824
825
826
827
828int devmem_is_allowed(unsigned long pagenr)
829{
830 if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
831 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
832 != REGION_DISJOINT) {
833
834
835
836
837 if (pagenr < 256)
838 return 2;
839
840 return 0;
841 }
842
843
844
845
846
847 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
848
849 if (pagenr < 256)
850 return 1;
851
852 return 0;
853 }
854
855 return 1;
856}
857
858void free_init_pages(const char *what, unsigned long begin, unsigned long end)
859{
860 unsigned long begin_aligned, end_aligned;
861
862
863 begin_aligned = PAGE_ALIGN(begin);
864 end_aligned = end & PAGE_MASK;
865
866 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
867 begin = begin_aligned;
868 end = end_aligned;
869 }
870
871 if (begin >= end)
872 return;
873
874
875
876
877
878
879 if (debug_pagealloc_enabled()) {
880 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
881 begin, end - 1);
882
883
884
885
886 kmemleak_free_part((void *)begin, end - begin);
887 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
888 } else {
889
890
891
892
893
894 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
895 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
896
897 free_reserved_area((void *)begin, (void *)end,
898 POISON_FREE_INITMEM, what);
899 }
900}
901
902
903
904
905
906
907void free_kernel_image_pages(const char *what, void *begin, void *end)
908{
909 unsigned long begin_ul = (unsigned long)begin;
910 unsigned long end_ul = (unsigned long)end;
911 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
912
913 free_init_pages(what, begin_ul, end_ul);
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930 if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
931 set_memory_np_noalias(begin_ul, len_pages);
932}
933
934void __ref free_initmem(void)
935{
936 e820__reallocate_tables();
937
938 mem_encrypt_free_decrypted_mem();
939
940 free_kernel_image_pages("unused kernel image (initmem)",
941 &__init_begin, &__init_end);
942}
943
944#ifdef CONFIG_BLK_DEV_INITRD
945void __init free_initrd_mem(unsigned long start, unsigned long end)
946{
947
948
949
950
951
952
953
954
955
956 free_init_pages("initrd", start, PAGE_ALIGN(end));
957}
958#endif
959
960
961
962
963
964
965
966
967
968void __init memblock_find_dma_reserve(void)
969{
970#ifdef CONFIG_X86_64
971 u64 nr_pages = 0, nr_free_pages = 0;
972 unsigned long start_pfn, end_pfn;
973 phys_addr_t start_addr, end_addr;
974 int i;
975 u64 u;
976
977
978
979
980
981 nr_pages = 0;
982 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
983 start_pfn = min(start_pfn, MAX_DMA_PFN);
984 end_pfn = min(end_pfn, MAX_DMA_PFN);
985
986 nr_pages += end_pfn - start_pfn;
987 }
988
989
990
991
992
993
994 nr_free_pages = 0;
995 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
996 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
997 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
998
999 if (start_pfn < end_pfn)
1000 nr_free_pages += end_pfn - start_pfn;
1001 }
1002
1003 set_dma_reserve(nr_pages - nr_free_pages);
1004#endif
1005}
1006
1007void __init zone_sizes_init(void)
1008{
1009 unsigned long max_zone_pfns[MAX_NR_ZONES];
1010
1011 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1012
1013#ifdef CONFIG_ZONE_DMA
1014 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
1015#endif
1016#ifdef CONFIG_ZONE_DMA32
1017 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
1018#endif
1019 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1020#ifdef CONFIG_HIGHMEM
1021 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1022#endif
1023
1024 free_area_init(max_zone_pfns);
1025}
1026
1027__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
1028 .loaded_mm = &init_mm,
1029 .next_asid = 1,
1030 .cr4 = ~0UL,
1031};
1032
1033void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1034{
1035
1036 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1037
1038 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1039 __pte2cachemode_tbl[entry] = cache;
1040}
1041
1042#ifdef CONFIG_SWAP
1043unsigned long max_swapfile_size(void)
1044{
1045 unsigned long pages;
1046
1047 pages = generic_max_swapfile_size();
1048
1049 if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
1050
1051 unsigned long long l1tf_limit = l1tf_pfn_limit();
1052
1053
1054
1055
1056#if CONFIG_PGTABLE_LEVELS > 2
1057 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1058#endif
1059 pages = min_t(unsigned long long, l1tf_limit, pages);
1060 }
1061 return pages;
1062}
1063#endif
1064