1
2#include <linux/init.h>
3
4#include <linux/mm.h>
5#include <linux/spinlock.h>
6#include <linux/smp.h>
7#include <linux/interrupt.h>
8#include <linux/export.h>
9#include <linux/cpu.h>
10#include <linux/debugfs.h>
11
12#include <asm/tlbflush.h>
13#include <asm/mmu_context.h>
14#include <asm/nospec-branch.h>
15#include <asm/cache.h>
16#include <asm/apic.h>
17#include <asm/uv/uv.h>
18
19#include "mm_internal.h"
20
21#ifdef CONFIG_PARAVIRT
22# define STATIC_NOPV
23#else
24# define STATIC_NOPV static
25# define __flush_tlb_local native_flush_tlb_local
26# define __flush_tlb_global native_flush_tlb_global
27# define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
28# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
29#endif
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define LAST_USER_MM_IBPB 0x1UL
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define CR3_HW_ASID_BITS 12
81
82
83
84
85
86#ifdef CONFIG_PAGE_TABLE_ISOLATION
87# define PTI_CONSUMED_PCID_BITS 1
88#else
89# define PTI_CONSUMED_PCID_BITS 0
90#endif
91
92#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
93
94
95
96
97
98
99#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
100
101
102
103
104static inline u16 kern_pcid(u16 asid)
105{
106 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
107
108#ifdef CONFIG_PAGE_TABLE_ISOLATION
109
110
111
112
113 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
114
115
116
117
118
119 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
120#endif
121
122
123
124
125
126
127
128
129
130
131
132
133
134 return asid + 1;
135}
136
137
138
139
140static inline u16 user_pcid(u16 asid)
141{
142 u16 ret = kern_pcid(asid);
143#ifdef CONFIG_PAGE_TABLE_ISOLATION
144 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
145#endif
146 return ret;
147}
148
149static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
150{
151 if (static_cpu_has(X86_FEATURE_PCID)) {
152 return __sme_pa(pgd) | kern_pcid(asid);
153 } else {
154 VM_WARN_ON_ONCE(asid != 0);
155 return __sme_pa(pgd);
156 }
157}
158
159static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
160{
161 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
162
163
164
165
166
167 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
168 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
169}
170
171
172
173
174
175
176
177static void clear_asid_other(void)
178{
179 u16 asid;
180
181
182
183
184
185 if (!static_cpu_has(X86_FEATURE_PTI)) {
186 WARN_ON_ONCE(1);
187 return;
188 }
189
190 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
191
192 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
193 continue;
194
195
196
197
198 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
199 }
200 this_cpu_write(cpu_tlbstate.invalidate_other, false);
201}
202
203atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
204
205
206static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
207 u16 *new_asid, bool *need_flush)
208{
209 u16 asid;
210
211 if (!static_cpu_has(X86_FEATURE_PCID)) {
212 *new_asid = 0;
213 *need_flush = true;
214 return;
215 }
216
217 if (this_cpu_read(cpu_tlbstate.invalidate_other))
218 clear_asid_other();
219
220 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
221 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
222 next->context.ctx_id)
223 continue;
224
225 *new_asid = asid;
226 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
227 next_tlb_gen);
228 return;
229 }
230
231
232
233
234
235 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
236 if (*new_asid >= TLB_NR_DYN_ASIDS) {
237 *new_asid = 0;
238 this_cpu_write(cpu_tlbstate.next_asid, 1);
239 }
240 *need_flush = true;
241}
242
243
244
245
246
247
248
249static inline void invalidate_user_asid(u16 asid)
250{
251
252 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
253 return;
254
255
256
257
258
259 if (!cpu_feature_enabled(X86_FEATURE_PCID))
260 return;
261
262 if (!static_cpu_has(X86_FEATURE_PTI))
263 return;
264
265 __set_bit(kern_pcid(asid),
266 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
267}
268
269static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
270{
271 unsigned long new_mm_cr3;
272
273 if (need_flush) {
274 invalidate_user_asid(new_asid);
275 new_mm_cr3 = build_cr3(pgdir, new_asid);
276 } else {
277 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
278 }
279
280
281
282
283
284
285 write_cr3(new_mm_cr3);
286}
287
288void leave_mm(int cpu)
289{
290 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
291
292
293
294
295
296
297
298
299
300 if (loaded_mm == &init_mm)
301 return;
302
303
304 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
305
306 switch_mm(NULL, &init_mm, NULL);
307}
308EXPORT_SYMBOL_GPL(leave_mm);
309
310void switch_mm(struct mm_struct *prev, struct mm_struct *next,
311 struct task_struct *tsk)
312{
313 unsigned long flags;
314
315 local_irq_save(flags);
316 switch_mm_irqs_off(prev, next, tsk);
317 local_irq_restore(flags);
318}
319
320static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
321{
322 unsigned long next_tif = task_thread_info(next)->flags;
323 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
324
325 return (unsigned long)next->mm | ibpb;
326}
327
328static void cond_ibpb(struct task_struct *next)
329{
330 if (!next || !next->mm)
331 return;
332
333
334
335
336
337
338
339
340
341
342 if (static_branch_likely(&switch_mm_cond_ibpb)) {
343 unsigned long prev_mm, next_mm;
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376 next_mm = mm_mangle_tif_spec_ib(next);
377 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
378
379
380
381
382
383 if (next_mm != prev_mm &&
384 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
385 indirect_branch_prediction_barrier();
386
387 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
388 }
389
390 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
391
392
393
394
395
396 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
397 indirect_branch_prediction_barrier();
398 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
399 }
400 }
401}
402
403#ifdef CONFIG_PERF_EVENTS
404static inline void cr4_update_pce_mm(struct mm_struct *mm)
405{
406 if (static_branch_unlikely(&rdpmc_always_available_key) ||
407 (!static_branch_unlikely(&rdpmc_never_available_key) &&
408 atomic_read(&mm->context.perf_rdpmc_allowed)))
409 cr4_set_bits_irqsoff(X86_CR4_PCE);
410 else
411 cr4_clear_bits_irqsoff(X86_CR4_PCE);
412}
413
414void cr4_update_pce(void *ignored)
415{
416 cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
417}
418
419#else
420static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
421#endif
422
423void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
424 struct task_struct *tsk)
425{
426 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
427 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
428 bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
429 unsigned cpu = smp_processor_id();
430 u64 next_tlb_gen;
431 bool need_flush;
432 u16 new_asid;
433
434
435
436
437
438
439
440
441
442
443
444 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
445 WARN_ON_ONCE(!irqs_disabled());
446
447
448
449
450
451
452
453
454
455
456#ifdef CONFIG_DEBUG_VM
457 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
458
459
460
461
462
463
464
465
466
467
468
469
470 __flush_tlb_all();
471 }
472#endif
473 this_cpu_write(cpu_tlbstate.is_lazy, false);
474
475
476
477
478
479
480
481 if (real_prev == next) {
482 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
483 next->context.ctx_id);
484
485
486
487
488
489
490 if (WARN_ON_ONCE(real_prev != &init_mm &&
491 !cpumask_test_cpu(cpu, mm_cpumask(next))))
492 cpumask_set_cpu(cpu, mm_cpumask(next));
493
494
495
496
497
498
499 if (!was_lazy)
500 return;
501
502
503
504
505
506
507
508 smp_mb();
509 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
510 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
511 next_tlb_gen)
512 return;
513
514
515
516
517
518 new_asid = prev_asid;
519 need_flush = true;
520 } else {
521
522
523
524
525
526 cond_ibpb(tsk);
527
528
529
530
531
532
533 if (real_prev != &init_mm) {
534 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
535 mm_cpumask(real_prev)));
536 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
537 }
538
539
540
541
542 if (next != &init_mm)
543 cpumask_set_cpu(cpu, mm_cpumask(next));
544 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
545
546 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
547
548
549 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
550 barrier();
551 }
552
553 if (need_flush) {
554 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
555 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
556 load_new_mm_cr3(next->pgd, new_asid, true);
557
558 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
559 } else {
560
561 load_new_mm_cr3(next->pgd, new_asid, false);
562
563 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
564 }
565
566
567 barrier();
568
569 this_cpu_write(cpu_tlbstate.loaded_mm, next);
570 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
571
572 if (next != real_prev) {
573 cr4_update_pce_mm(next);
574 switch_ldt(real_prev, next);
575 }
576}
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
592{
593 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
594 return;
595
596 this_cpu_write(cpu_tlbstate.is_lazy, true);
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612void initialize_tlbstate_and_flush(void)
613{
614 int i;
615 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
616 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
617 unsigned long cr3 = __read_cr3();
618
619
620 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
621
622
623
624
625
626
627 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
628 !(cr4_read_shadow() & X86_CR4_PCIDE));
629
630
631 write_cr3(build_cr3(mm->pgd, 0));
632
633
634 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
635 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
636 this_cpu_write(cpu_tlbstate.next_asid, 1);
637 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
638 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
639
640 for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
641 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
642}
643
644
645
646
647
648
649
650
651static void flush_tlb_func_common(const struct flush_tlb_info *f,
652 bool local, enum tlb_flush_reason reason)
653{
654
655
656
657
658
659
660
661
662
663 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
664 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
665 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
666 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
667
668
669 VM_WARN_ON(!irqs_disabled());
670
671 if (unlikely(loaded_mm == &init_mm))
672 return;
673
674 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
675 loaded_mm->context.ctx_id);
676
677 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
678
679
680
681
682
683
684
685
686
687 switch_mm_irqs_off(NULL, &init_mm, NULL);
688 return;
689 }
690
691 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
692
693
694
695
696
697
698 trace_tlb_flush(reason, 0);
699 return;
700 }
701
702 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
703 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742 if (f->end != TLB_FLUSH_ALL &&
743 f->new_tlb_gen == local_tlb_gen + 1 &&
744 f->new_tlb_gen == mm_tlb_gen) {
745
746 unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
747 unsigned long addr = f->start;
748
749 while (addr < f->end) {
750 flush_tlb_one_user(addr);
751 addr += 1UL << f->stride_shift;
752 }
753 if (local)
754 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
755 trace_tlb_flush(reason, nr_invalidate);
756 } else {
757
758 flush_tlb_local();
759 if (local)
760 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
761 trace_tlb_flush(reason, TLB_FLUSH_ALL);
762 }
763
764
765 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
766}
767
768static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
769{
770 const struct flush_tlb_info *f = info;
771
772 flush_tlb_func_common(f, true, reason);
773}
774
775static void flush_tlb_func_remote(void *info)
776{
777 const struct flush_tlb_info *f = info;
778
779 inc_irq_stat(irq_tlb_count);
780
781 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
782 return;
783
784 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
785 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
786}
787
788static bool tlb_is_not_lazy(int cpu, void *data)
789{
790 return !per_cpu(cpu_tlbstate.is_lazy, cpu);
791}
792
793STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
794 const struct flush_tlb_info *info)
795{
796 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
797 if (info->end == TLB_FLUSH_ALL)
798 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
799 else
800 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
801 (info->end - info->start) >> PAGE_SHIFT);
802
803 if (is_uv_system()) {
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 cpumask = uv_flush_tlb_others(cpumask, info);
820 if (cpumask)
821 smp_call_function_many(cpumask, flush_tlb_func_remote,
822 (void *)info, 1);
823 return;
824 }
825
826
827
828
829
830
831
832
833
834
835
836 if (info->freed_tables)
837 smp_call_function_many(cpumask, flush_tlb_func_remote,
838 (void *)info, 1);
839 else
840 on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
841 (void *)info, 1, cpumask);
842}
843
844void flush_tlb_others(const struct cpumask *cpumask,
845 const struct flush_tlb_info *info)
846{
847 __flush_tlb_others(cpumask, info);
848}
849
850
851
852
853
854
855
856
857
858
859
860unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
861
862static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
863
864#ifdef CONFIG_DEBUG_VM
865static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
866#endif
867
868static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
869 unsigned long start, unsigned long end,
870 unsigned int stride_shift, bool freed_tables,
871 u64 new_tlb_gen)
872{
873 struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
874
875#ifdef CONFIG_DEBUG_VM
876
877
878
879
880
881 BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
882#endif
883
884 info->start = start;
885 info->end = end;
886 info->mm = mm;
887 info->stride_shift = stride_shift;
888 info->freed_tables = freed_tables;
889 info->new_tlb_gen = new_tlb_gen;
890
891 return info;
892}
893
894static inline void put_flush_tlb_info(void)
895{
896#ifdef CONFIG_DEBUG_VM
897
898 barrier();
899 this_cpu_dec(flush_tlb_info_idx);
900#endif
901}
902
903void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
904 unsigned long end, unsigned int stride_shift,
905 bool freed_tables)
906{
907 struct flush_tlb_info *info;
908 u64 new_tlb_gen;
909 int cpu;
910
911 cpu = get_cpu();
912
913
914 if ((end == TLB_FLUSH_ALL) ||
915 ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
916 start = 0;
917 end = TLB_FLUSH_ALL;
918 }
919
920
921 new_tlb_gen = inc_mm_tlb_gen(mm);
922
923 info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
924 new_tlb_gen);
925
926 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
927 lockdep_assert_irqs_enabled();
928 local_irq_disable();
929 flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
930 local_irq_enable();
931 }
932
933 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
934 flush_tlb_others(mm_cpumask(mm), info);
935
936 put_flush_tlb_info();
937 put_cpu();
938}
939
940
941static void do_flush_tlb_all(void *info)
942{
943 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
944 __flush_tlb_all();
945}
946
947void flush_tlb_all(void)
948{
949 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
950 on_each_cpu(do_flush_tlb_all, NULL, 1);
951}
952
953static void do_kernel_range_flush(void *info)
954{
955 struct flush_tlb_info *f = info;
956 unsigned long addr;
957
958
959 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
960 flush_tlb_one_kernel(addr);
961}
962
963void flush_tlb_kernel_range(unsigned long start, unsigned long end)
964{
965
966 if (end == TLB_FLUSH_ALL ||
967 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
968 on_each_cpu(do_flush_tlb_all, NULL, 1);
969 } else {
970 struct flush_tlb_info *info;
971
972 preempt_disable();
973 info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
974
975 on_each_cpu(do_kernel_range_flush, info, 1);
976
977 put_flush_tlb_info();
978 preempt_enable();
979 }
980}
981
982
983
984
985
986
987
988
989unsigned long __get_current_cr3_fast(void)
990{
991 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
992 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
993
994
995 VM_WARN_ON(in_nmi() || preemptible());
996
997 VM_BUG_ON(cr3 != __read_cr3());
998 return cr3;
999}
1000EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
1001
1002
1003
1004
1005void flush_tlb_one_kernel(unsigned long addr)
1006{
1007 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 flush_tlb_one_user(addr);
1021
1022 if (!static_cpu_has(X86_FEATURE_PTI))
1023 return;
1024
1025
1026
1027
1028
1029
1030
1031 this_cpu_write(cpu_tlbstate.invalidate_other, true);
1032}
1033
1034
1035
1036
1037STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
1038{
1039 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1040
1041 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
1042
1043 if (!static_cpu_has(X86_FEATURE_PTI))
1044 return;
1045
1046
1047
1048
1049
1050 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
1051 invalidate_user_asid(loaded_mm_asid);
1052 else
1053 invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
1054}
1055
1056void flush_tlb_one_user(unsigned long addr)
1057{
1058 __flush_tlb_one_user(addr);
1059}
1060
1061
1062
1063
1064STATIC_NOPV void native_flush_tlb_global(void)
1065{
1066 unsigned long cr4, flags;
1067
1068 if (static_cpu_has(X86_FEATURE_INVPCID)) {
1069
1070
1071
1072
1073
1074
1075 invpcid_flush_all();
1076 return;
1077 }
1078
1079
1080
1081
1082
1083
1084 raw_local_irq_save(flags);
1085
1086 cr4 = this_cpu_read(cpu_tlbstate.cr4);
1087
1088 native_write_cr4(cr4 ^ X86_CR4_PGE);
1089
1090 native_write_cr4(cr4);
1091
1092 raw_local_irq_restore(flags);
1093}
1094
1095
1096
1097
1098STATIC_NOPV void native_flush_tlb_local(void)
1099{
1100
1101
1102
1103
1104
1105 WARN_ON_ONCE(preemptible());
1106
1107 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
1108
1109
1110 native_write_cr3(__native_read_cr3());
1111}
1112
1113void flush_tlb_local(void)
1114{
1115 __flush_tlb_local();
1116}
1117
1118
1119
1120
1121void __flush_tlb_all(void)
1122{
1123
1124
1125
1126
1127 VM_WARN_ON_ONCE(preemptible());
1128
1129 if (boot_cpu_has(X86_FEATURE_PGE)) {
1130 __flush_tlb_global();
1131 } else {
1132
1133
1134
1135 flush_tlb_local();
1136 }
1137}
1138EXPORT_SYMBOL_GPL(__flush_tlb_all);
1139
1140
1141
1142
1143
1144
1145
1146static const struct flush_tlb_info full_flush_tlb_info = {
1147 .mm = NULL,
1148 .start = 0,
1149 .end = TLB_FLUSH_ALL,
1150};
1151
1152void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
1153{
1154 int cpu = get_cpu();
1155
1156 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
1157 lockdep_assert_irqs_enabled();
1158 local_irq_disable();
1159 flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
1160 local_irq_enable();
1161 }
1162
1163 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
1164 flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
1165
1166 cpumask_clear(&batch->cpumask);
1167
1168 put_cpu();
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178bool nmi_uaccess_okay(void)
1179{
1180 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1181 struct mm_struct *current_mm = current->mm;
1182
1183 VM_WARN_ON_ONCE(!loaded_mm);
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 if (loaded_mm != current_mm)
1196 return false;
1197
1198 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
1199
1200 return true;
1201}
1202
1203static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
1204 size_t count, loff_t *ppos)
1205{
1206 char buf[32];
1207 unsigned int len;
1208
1209 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1211}
1212
1213static ssize_t tlbflush_write_file(struct file *file,
1214 const char __user *user_buf, size_t count, loff_t *ppos)
1215{
1216 char buf[32];
1217 ssize_t len;
1218 int ceiling;
1219
1220 len = min(count, sizeof(buf) - 1);
1221 if (copy_from_user(buf, user_buf, len))
1222 return -EFAULT;
1223
1224 buf[len] = '\0';
1225 if (kstrtoint(buf, 0, &ceiling))
1226 return -EINVAL;
1227
1228 if (ceiling < 0)
1229 return -EINVAL;
1230
1231 tlb_single_page_flush_ceiling = ceiling;
1232 return count;
1233}
1234
1235static const struct file_operations fops_tlbflush = {
1236 .read = tlbflush_read_file,
1237 .write = tlbflush_write_file,
1238 .llseek = default_llseek,
1239};
1240
1241static int __init create_tlb_single_page_flush_ceiling(void)
1242{
1243 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
1244 arch_debugfs_dir, NULL, &fops_tlbflush);
1245 return 0;
1246}
1247late_initcall(create_tlb_single_page_flush_ceiling);
1248