1
2#ifndef _ASM_X86_TLBFLUSH_H
3#define _ASM_X86_TLBFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7
8#include <asm/processor.h>
9#include <asm/cpufeature.h>
10#include <asm/special_insns.h>
11#include <asm/smp.h>
12
13static inline void __invpcid(unsigned long pcid, unsigned long addr,
14 unsigned long type)
15{
16 struct { u64 d[2]; } desc = { { pcid, addr } };
17
18
19
20
21
22
23
24
25
26
27 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
28 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29}
30
31#define INVPCID_TYPE_INDIV_ADDR 0
32#define INVPCID_TYPE_SINGLE_CTXT 1
33#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
34#define INVPCID_TYPE_ALL_NON_GLOBAL 3
35
36
37static inline void invpcid_flush_one(unsigned long pcid,
38 unsigned long addr)
39{
40 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
41}
42
43
44static inline void invpcid_flush_single_context(unsigned long pcid)
45{
46 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
47}
48
49
50static inline void invpcid_flush_all(void)
51{
52 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
53}
54
55
56static inline void invpcid_flush_all_nonglobals(void)
57{
58 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
59}
60
61static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
62{
63 u64 new_tlb_gen;
64
65
66
67
68
69
70
71 smp_mb__before_atomic();
72 new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
73 smp_mb__after_atomic();
74
75 return new_tlb_gen;
76}
77
78#ifdef CONFIG_PARAVIRT
79#include <asm/paravirt.h>
80#else
81#define __flush_tlb() __native_flush_tlb()
82#define __flush_tlb_global() __native_flush_tlb_global()
83#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
84#endif
85
86static inline bool tlb_defer_switch_to_init_mm(void)
87{
88
89
90
91
92
93
94
95
96
97
98
99 return !static_cpu_has(X86_FEATURE_PCID);
100}
101
102
103
104
105
106#define TLB_NR_DYN_ASIDS 6
107
108struct tlb_context {
109 u64 ctx_id;
110 u64 tlb_gen;
111};
112
113struct tlb_state {
114
115
116
117
118
119
120 struct mm_struct *loaded_mm;
121 u16 loaded_mm_asid;
122 u16 next_asid;
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 bool is_lazy;
140
141
142
143
144
145 unsigned long cr4;
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
167};
168DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
169
170
171static inline void cr4_init_shadow(void)
172{
173 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
174}
175
176
177static inline void cr4_set_bits(unsigned long mask)
178{
179 unsigned long cr4;
180
181 cr4 = this_cpu_read(cpu_tlbstate.cr4);
182 if ((cr4 | mask) != cr4) {
183 cr4 |= mask;
184 this_cpu_write(cpu_tlbstate.cr4, cr4);
185 __write_cr4(cr4);
186 }
187}
188
189
190static inline void cr4_clear_bits(unsigned long mask)
191{
192 unsigned long cr4;
193
194 cr4 = this_cpu_read(cpu_tlbstate.cr4);
195 if ((cr4 & ~mask) != cr4) {
196 cr4 &= ~mask;
197 this_cpu_write(cpu_tlbstate.cr4, cr4);
198 __write_cr4(cr4);
199 }
200}
201
202static inline void cr4_toggle_bits(unsigned long mask)
203{
204 unsigned long cr4;
205
206 cr4 = this_cpu_read(cpu_tlbstate.cr4);
207 cr4 ^= mask;
208 this_cpu_write(cpu_tlbstate.cr4, cr4);
209 __write_cr4(cr4);
210}
211
212
213static inline unsigned long cr4_read_shadow(void)
214{
215 return this_cpu_read(cpu_tlbstate.cr4);
216}
217
218
219
220
221
222
223
224extern unsigned long mmu_cr4_features;
225extern u32 *trampoline_cr4_features;
226
227static inline void cr4_set_bits_and_update_boot(unsigned long mask)
228{
229 mmu_cr4_features |= mask;
230 if (trampoline_cr4_features)
231 *trampoline_cr4_features = mmu_cr4_features;
232 cr4_set_bits(mask);
233}
234
235extern void initialize_tlbstate_and_flush(void);
236
237static inline void __native_flush_tlb(void)
238{
239
240
241
242
243
244 preempt_disable();
245 native_write_cr3(__native_read_cr3());
246 preempt_enable();
247}
248
249static inline void __native_flush_tlb_global_irq_disabled(void)
250{
251 unsigned long cr4;
252
253 cr4 = this_cpu_read(cpu_tlbstate.cr4);
254
255 native_write_cr4(cr4 & ~X86_CR4_PGE);
256
257 native_write_cr4(cr4);
258}
259
260static inline void __native_flush_tlb_global(void)
261{
262 unsigned long flags;
263
264 if (static_cpu_has(X86_FEATURE_INVPCID)) {
265
266
267
268
269 invpcid_flush_all();
270 return;
271 }
272
273
274
275
276
277
278 raw_local_irq_save(flags);
279
280 __native_flush_tlb_global_irq_disabled();
281
282 raw_local_irq_restore(flags);
283}
284
285static inline void __native_flush_tlb_single(unsigned long addr)
286{
287 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
288}
289
290static inline void __flush_tlb_all(void)
291{
292 if (boot_cpu_has(X86_FEATURE_PGE))
293 __flush_tlb_global();
294 else
295 __flush_tlb();
296
297
298
299
300
301
302
303
304}
305
306static inline void __flush_tlb_one(unsigned long addr)
307{
308 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
309 __flush_tlb_single(addr);
310}
311
312#define TLB_FLUSH_ALL -1UL
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327struct flush_tlb_info {
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344 struct mm_struct *mm;
345 unsigned long start;
346 unsigned long end;
347 u64 new_tlb_gen;
348};
349
350#define local_flush_tlb() __flush_tlb()
351
352#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
353
354#define flush_tlb_range(vma, start, end) \
355 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
356
357extern void flush_tlb_all(void);
358extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
359 unsigned long end, unsigned long vmflag);
360extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
361
362static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
363{
364 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
365}
366
367void native_flush_tlb_others(const struct cpumask *cpumask,
368 const struct flush_tlb_info *info);
369
370static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
371 struct mm_struct *mm)
372{
373 inc_mm_tlb_gen(mm);
374 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
375}
376
377extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
378
379#ifndef CONFIG_PARAVIRT
380#define flush_tlb_others(mask, info) \
381 native_flush_tlb_others(mask, info)
382#endif
383
384#endif
385