1#include <linux/gfp.h>
2#include <linux/initrd.h>
3#include <linux/ioport.h>
4#include <linux/swap.h>
5#include <linux/memblock.h>
6#include <linux/bootmem.h>
7
8#include <asm/set_memory.h>
9#include <asm/e820/api.h>
10#include <asm/init.h>
11#include <asm/page.h>
12#include <asm/page_types.h>
13#include <asm/sections.h>
14#include <asm/setup.h>
15#include <asm/tlbflush.h>
16#include <asm/tlb.h>
17#include <asm/proto.h>
18#include <asm/dma.h>
19#include <asm/microcode.h>
20#include <asm/kaslr.h>
21#include <asm/hypervisor.h>
22#include <asm/cpufeature.h>
23
24
25
26
27
28#define CREATE_TRACE_POINTS
29#include <trace/events/tlb.h>
30
31#include "mm_internal.h"
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
48 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
49 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
50 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
51 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
52 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
53 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
54};
55EXPORT_SYMBOL(__cachemode2pte_tbl);
56
57uint8_t __pte2cachemode_tbl[8] = {
58 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
59 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
60 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
61 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
62 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
63 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
64 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
65 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
66};
67EXPORT_SYMBOL(__pte2cachemode_tbl);
68
69static unsigned long __initdata pgt_buf_start;
70static unsigned long __initdata pgt_buf_end;
71static unsigned long __initdata pgt_buf_top;
72
73static unsigned long min_pfn_mapped;
74
75static bool __initdata can_use_brk_pgt = true;
76
77
78
79
80
81
82
83
84
85
86__ref void *alloc_low_pages(unsigned int num)
87{
88 unsigned long pfn;
89 int i;
90
91 if (after_bootmem) {
92 unsigned int order;
93
94 order = get_order((unsigned long)num << PAGE_SHIFT);
95 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
96 __GFP_ZERO, order);
97 }
98
99 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
100 unsigned long ret;
101 if (min_pfn_mapped >= max_pfn_mapped)
102 panic("alloc_low_pages: ran out of memory");
103 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
104 max_pfn_mapped << PAGE_SHIFT,
105 PAGE_SIZE * num , PAGE_SIZE);
106 if (!ret)
107 panic("alloc_low_pages: can not alloc memory");
108 memblock_reserve(ret, PAGE_SIZE * num);
109 pfn = ret >> PAGE_SHIFT;
110 } else {
111 pfn = pgt_buf_end;
112 pgt_buf_end += num;
113 printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
114 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
115 }
116
117 for (i = 0; i < num; i++) {
118 void *adr;
119
120 adr = __va((pfn + i) << PAGE_SHIFT);
121 clear_page(adr);
122 }
123
124 return __va(pfn << PAGE_SHIFT);
125}
126
127
128
129
130
131
132
133#ifndef CONFIG_RANDOMIZE_MEMORY
134#define INIT_PGD_PAGE_COUNT 6
135#else
136#define INIT_PGD_PAGE_COUNT 12
137#endif
138#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
139RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
140void __init early_alloc_pgt_buf(void)
141{
142 unsigned long tables = INIT_PGT_BUF_SIZE;
143 phys_addr_t base;
144
145 base = __pa(extend_brk(tables, PAGE_SIZE));
146
147 pgt_buf_start = base >> PAGE_SHIFT;
148 pgt_buf_end = pgt_buf_start;
149 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
150}
151
152int after_bootmem;
153
154early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
155
156struct map_range {
157 unsigned long start;
158 unsigned long end;
159 unsigned page_size_mask;
160};
161
162static int page_size_mask;
163
164static void __init probe_page_size_mask(void)
165{
166
167
168
169
170
171
172 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
173 page_size_mask |= 1 << PG_LEVEL_2M;
174 else
175 direct_gbpages = 0;
176
177
178 if (boot_cpu_has(X86_FEATURE_PSE))
179 cr4_set_bits_and_update_boot(X86_CR4_PSE);
180
181
182 if (boot_cpu_has(X86_FEATURE_PGE)) {
183 cr4_set_bits_and_update_boot(X86_CR4_PGE);
184 __supported_pte_mask |= _PAGE_GLOBAL;
185 } else
186 __supported_pte_mask &= ~_PAGE_GLOBAL;
187
188
189 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
190 printk(KERN_INFO "Using GB pages for direct mapping\n");
191 page_size_mask |= 1 << PG_LEVEL_1G;
192 } else {
193 direct_gbpages = 0;
194 }
195}
196
197static void setup_pcid(void)
198{
199#ifdef CONFIG_X86_64
200 if (boot_cpu_has(X86_FEATURE_PCID)) {
201 if (boot_cpu_has(X86_FEATURE_PGE)) {
202
203
204
205
206
207
208
209
210
211
212
213 cr4_set_bits(X86_CR4_PCIDE);
214 } else {
215
216
217
218
219
220
221
222
223 setup_clear_cpu_cap(X86_FEATURE_PCID);
224 }
225 }
226#endif
227}
228
229#ifdef CONFIG_X86_32
230#define NR_RANGE_MR 3
231#else
232#define NR_RANGE_MR 5
233#endif
234
235static int __meminit save_mr(struct map_range *mr, int nr_range,
236 unsigned long start_pfn, unsigned long end_pfn,
237 unsigned long page_size_mask)
238{
239 if (start_pfn < end_pfn) {
240 if (nr_range >= NR_RANGE_MR)
241 panic("run out of range for init_memory_mapping\n");
242 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
243 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
244 mr[nr_range].page_size_mask = page_size_mask;
245 nr_range++;
246 }
247
248 return nr_range;
249}
250
251
252
253
254
255static void __ref adjust_range_page_size_mask(struct map_range *mr,
256 int nr_range)
257{
258 int i;
259
260 for (i = 0; i < nr_range; i++) {
261 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
262 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
263 unsigned long start = round_down(mr[i].start, PMD_SIZE);
264 unsigned long end = round_up(mr[i].end, PMD_SIZE);
265
266#ifdef CONFIG_X86_32
267 if ((end >> PAGE_SHIFT) > max_low_pfn)
268 continue;
269#endif
270
271 if (memblock_is_region_memory(start, end - start))
272 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
273 }
274 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
275 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
276 unsigned long start = round_down(mr[i].start, PUD_SIZE);
277 unsigned long end = round_up(mr[i].end, PUD_SIZE);
278
279 if (memblock_is_region_memory(start, end - start))
280 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
281 }
282 }
283}
284
285static const char *page_size_string(struct map_range *mr)
286{
287 static const char str_1g[] = "1G";
288 static const char str_2m[] = "2M";
289 static const char str_4m[] = "4M";
290 static const char str_4k[] = "4k";
291
292 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
293 return str_1g;
294
295
296
297
298
299 if (IS_ENABLED(CONFIG_X86_32) &&
300 !IS_ENABLED(CONFIG_X86_PAE) &&
301 mr->page_size_mask & (1<<PG_LEVEL_2M))
302 return str_4m;
303
304 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
305 return str_2m;
306
307 return str_4k;
308}
309
310static int __meminit split_mem_range(struct map_range *mr, int nr_range,
311 unsigned long start,
312 unsigned long end)
313{
314 unsigned long start_pfn, end_pfn, limit_pfn;
315 unsigned long pfn;
316 int i;
317
318 limit_pfn = PFN_DOWN(end);
319
320
321 pfn = start_pfn = PFN_DOWN(start);
322#ifdef CONFIG_X86_32
323
324
325
326
327
328
329 if (pfn == 0)
330 end_pfn = PFN_DOWN(PMD_SIZE);
331 else
332 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
333#else
334 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
335#endif
336 if (end_pfn > limit_pfn)
337 end_pfn = limit_pfn;
338 if (start_pfn < end_pfn) {
339 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
340 pfn = end_pfn;
341 }
342
343
344 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
345#ifdef CONFIG_X86_32
346 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
347#else
348 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
349 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
350 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
351#endif
352
353 if (start_pfn < end_pfn) {
354 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
355 page_size_mask & (1<<PG_LEVEL_2M));
356 pfn = end_pfn;
357 }
358
359#ifdef CONFIG_X86_64
360
361 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
362 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
363 if (start_pfn < end_pfn) {
364 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
365 page_size_mask &
366 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
367 pfn = end_pfn;
368 }
369
370
371 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
372 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
373 if (start_pfn < end_pfn) {
374 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
375 page_size_mask & (1<<PG_LEVEL_2M));
376 pfn = end_pfn;
377 }
378#endif
379
380
381 start_pfn = pfn;
382 end_pfn = limit_pfn;
383 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
384
385 if (!after_bootmem)
386 adjust_range_page_size_mask(mr, nr_range);
387
388
389 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
390 unsigned long old_start;
391 if (mr[i].end != mr[i+1].start ||
392 mr[i].page_size_mask != mr[i+1].page_size_mask)
393 continue;
394
395 old_start = mr[i].start;
396 memmove(&mr[i], &mr[i+1],
397 (nr_range - 1 - i) * sizeof(struct map_range));
398 mr[i--].start = old_start;
399 nr_range--;
400 }
401
402 for (i = 0; i < nr_range; i++)
403 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
404 mr[i].start, mr[i].end - 1,
405 page_size_string(&mr[i]));
406
407 return nr_range;
408}
409
410struct range pfn_mapped[E820_MAX_ENTRIES];
411int nr_pfn_mapped;
412
413static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
414{
415 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
416 nr_pfn_mapped, start_pfn, end_pfn);
417 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
418
419 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
420
421 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
422 max_low_pfn_mapped = max(max_low_pfn_mapped,
423 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
424}
425
426bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
427{
428 int i;
429
430 for (i = 0; i < nr_pfn_mapped; i++)
431 if ((start_pfn >= pfn_mapped[i].start) &&
432 (end_pfn <= pfn_mapped[i].end))
433 return true;
434
435 return false;
436}
437
438
439
440
441
442
443unsigned long __ref init_memory_mapping(unsigned long start,
444 unsigned long end)
445{
446 struct map_range mr[NR_RANGE_MR];
447 unsigned long ret = 0;
448 int nr_range, i;
449
450 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
451 start, end - 1);
452
453 memset(mr, 0, sizeof(mr));
454 nr_range = split_mem_range(mr, 0, start, end);
455
456 for (i = 0; i < nr_range; i++)
457 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
458 mr[i].page_size_mask);
459
460 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
461
462 return ret >> PAGE_SHIFT;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static unsigned long __init init_range_memory_mapping(
479 unsigned long r_start,
480 unsigned long r_end)
481{
482 unsigned long start_pfn, end_pfn;
483 unsigned long mapped_ram_size = 0;
484 int i;
485
486 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
487 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
488 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
489 if (start >= end)
490 continue;
491
492
493
494
495
496 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
497 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
498 init_memory_mapping(start, end);
499 mapped_ram_size += end - start;
500 can_use_brk_pgt = true;
501 }
502
503 return mapped_ram_size;
504}
505
506static unsigned long __init get_new_step_size(unsigned long step_size)
507{
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
523}
524
525
526
527
528
529
530
531
532
533
534
535static void __init memory_map_top_down(unsigned long map_start,
536 unsigned long map_end)
537{
538 unsigned long real_end, start, last_start;
539 unsigned long step_size;
540 unsigned long addr;
541 unsigned long mapped_ram_size = 0;
542
543
544 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
545 real_end = addr + PMD_SIZE;
546
547
548 step_size = PMD_SIZE;
549 max_pfn_mapped = 0;
550 min_pfn_mapped = real_end >> PAGE_SHIFT;
551 last_start = start = real_end;
552
553
554
555
556
557
558
559 while (last_start > map_start) {
560 if (last_start > step_size) {
561 start = round_down(last_start - 1, step_size);
562 if (start < map_start)
563 start = map_start;
564 } else
565 start = map_start;
566 mapped_ram_size += init_range_memory_mapping(start,
567 last_start);
568 last_start = start;
569 min_pfn_mapped = last_start >> PAGE_SHIFT;
570 if (mapped_ram_size >= step_size)
571 step_size = get_new_step_size(step_size);
572 }
573
574 if (real_end < map_end)
575 init_range_memory_mapping(real_end, map_end);
576}
577
578
579
580
581
582
583
584
585
586
587
588
589static void __init memory_map_bottom_up(unsigned long map_start,
590 unsigned long map_end)
591{
592 unsigned long next, start;
593 unsigned long mapped_ram_size = 0;
594
595 unsigned long step_size = PMD_SIZE;
596
597 start = map_start;
598 min_pfn_mapped = start >> PAGE_SHIFT;
599
600
601
602
603
604
605
606 while (start < map_end) {
607 if (step_size && map_end - start > step_size) {
608 next = round_up(start + 1, step_size);
609 if (next > map_end)
610 next = map_end;
611 } else {
612 next = map_end;
613 }
614
615 mapped_ram_size += init_range_memory_mapping(start, next);
616 start = next;
617
618 if (mapped_ram_size >= step_size)
619 step_size = get_new_step_size(step_size);
620 }
621}
622
623void __init init_mem_mapping(void)
624{
625 unsigned long end;
626
627 probe_page_size_mask();
628 setup_pcid();
629
630#ifdef CONFIG_X86_64
631 end = max_pfn << PAGE_SHIFT;
632#else
633 end = max_low_pfn << PAGE_SHIFT;
634#endif
635
636
637 init_memory_mapping(0, ISA_END_ADDRESS);
638
639
640 init_trampoline();
641
642
643
644
645
646 if (memblock_bottom_up()) {
647 unsigned long kernel_end = __pa_symbol(_end);
648
649
650
651
652
653
654
655
656 memory_map_bottom_up(kernel_end, end);
657 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
658 } else {
659 memory_map_top_down(ISA_END_ADDRESS, end);
660 }
661
662#ifdef CONFIG_X86_64
663 if (max_pfn > max_low_pfn) {
664
665 max_low_pfn = max_pfn;
666 }
667#else
668 early_ioremap_page_table_range_init();
669#endif
670
671 load_cr3(swapper_pg_dir);
672 __flush_tlb_all();
673
674 hypervisor_init_mem_mapping();
675
676 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691int devmem_is_allowed(unsigned long pagenr)
692{
693 if (page_is_ram(pagenr)) {
694
695
696
697
698 if (pagenr < 256)
699 return 2;
700
701 return 0;
702 }
703
704
705
706
707
708 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
709
710 if (pagenr < 256)
711 return 1;
712
713 return 0;
714 }
715
716 return 1;
717}
718
719void free_init_pages(char *what, unsigned long begin, unsigned long end)
720{
721 unsigned long begin_aligned, end_aligned;
722
723
724 begin_aligned = PAGE_ALIGN(begin);
725 end_aligned = end & PAGE_MASK;
726
727 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
728 begin = begin_aligned;
729 end = end_aligned;
730 }
731
732 if (begin >= end)
733 return;
734
735
736
737
738
739
740 if (debug_pagealloc_enabled()) {
741 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
742 begin, end - 1);
743 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
744 } else {
745
746
747
748
749
750 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
751 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
752
753 free_reserved_area((void *)begin, (void *)end,
754 POISON_FREE_INITMEM, what);
755 }
756}
757
758void __ref free_initmem(void)
759{
760 e820__reallocate_tables();
761
762 free_init_pages("unused kernel",
763 (unsigned long)(&__init_begin),
764 (unsigned long)(&__init_end));
765}
766
767#ifdef CONFIG_BLK_DEV_INITRD
768void __init free_initrd_mem(unsigned long start, unsigned long end)
769{
770
771
772
773
774
775
776
777
778
779 free_init_pages("initrd", start, PAGE_ALIGN(end));
780}
781#endif
782
783
784
785
786
787
788
789
790
791void __init memblock_find_dma_reserve(void)
792{
793#ifdef CONFIG_X86_64
794 u64 nr_pages = 0, nr_free_pages = 0;
795 unsigned long start_pfn, end_pfn;
796 phys_addr_t start_addr, end_addr;
797 int i;
798 u64 u;
799
800
801
802
803
804 nr_pages = 0;
805 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
806 start_pfn = min(start_pfn, MAX_DMA_PFN);
807 end_pfn = min(end_pfn, MAX_DMA_PFN);
808
809 nr_pages += end_pfn - start_pfn;
810 }
811
812
813
814
815
816
817 nr_free_pages = 0;
818 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
819 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
820 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
821
822 if (start_pfn < end_pfn)
823 nr_free_pages += end_pfn - start_pfn;
824 }
825
826 set_dma_reserve(nr_pages - nr_free_pages);
827#endif
828}
829
830void __init zone_sizes_init(void)
831{
832 unsigned long max_zone_pfns[MAX_NR_ZONES];
833
834 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
835
836#ifdef CONFIG_ZONE_DMA
837 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
838#endif
839#ifdef CONFIG_ZONE_DMA32
840 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
841#endif
842 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
843#ifdef CONFIG_HIGHMEM
844 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
845#endif
846
847 free_area_init_nodes(max_zone_pfns);
848}
849
850DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
851 .loaded_mm = &init_mm,
852 .next_asid = 1,
853 .cr4 = ~0UL,
854};
855EXPORT_SYMBOL_GPL(cpu_tlbstate);
856
857void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
858{
859
860 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
861
862 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
863 __pte2cachemode_tbl[entry] = cache;
864}
865