1#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
8#include <asm/cpufeature.h>
9#include <asm/special_insns.h>
10
11static inline void __invpcid(unsigned long pcid, unsigned long addr,
12 unsigned long type)
13{
14 struct { u64 d[2]; } desc = { { pcid, addr } };
15
16
17
18
19
20
21
22
23
24
25 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
26 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
27}
28
29#define INVPCID_TYPE_INDIV_ADDR 0
30#define INVPCID_TYPE_SINGLE_CTXT 1
31#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
32#define INVPCID_TYPE_ALL_NON_GLOBAL 3
33
34
35static inline void invpcid_flush_one(unsigned long pcid,
36 unsigned long addr)
37{
38 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
39}
40
41
42static inline void invpcid_flush_single_context(unsigned long pcid)
43{
44 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
45}
46
47
48static inline void invpcid_flush_all(void)
49{
50 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
51}
52
53
54static inline void invpcid_flush_all_nonglobals(void)
55{
56 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
57}
58
59#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
61#else
62#define __flush_tlb() __native_flush_tlb()
63#define __flush_tlb_global() __native_flush_tlb_global()
64#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
65#endif
66
67struct tlb_state {
68#ifdef CONFIG_SMP
69 struct mm_struct *active_mm;
70 int state;
71#endif
72
73
74
75
76
77 unsigned long cr4;
78};
79DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
80
81
82static inline void cr4_init_shadow(void)
83{
84 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
85}
86
87
88static inline void cr4_set_bits(unsigned long mask)
89{
90 unsigned long cr4;
91
92 cr4 = this_cpu_read(cpu_tlbstate.cr4);
93 if ((cr4 | mask) != cr4) {
94 cr4 |= mask;
95 this_cpu_write(cpu_tlbstate.cr4, cr4);
96 __write_cr4(cr4);
97 }
98}
99
100
101static inline void cr4_clear_bits(unsigned long mask)
102{
103 unsigned long cr4;
104
105 cr4 = this_cpu_read(cpu_tlbstate.cr4);
106 if ((cr4 & ~mask) != cr4) {
107 cr4 &= ~mask;
108 this_cpu_write(cpu_tlbstate.cr4, cr4);
109 __write_cr4(cr4);
110 }
111}
112
113
114static inline unsigned long cr4_read_shadow(void)
115{
116 return this_cpu_read(cpu_tlbstate.cr4);
117}
118
119
120
121
122
123
124
125extern unsigned long mmu_cr4_features;
126extern u32 *trampoline_cr4_features;
127
128static inline void cr4_set_bits_and_update_boot(unsigned long mask)
129{
130 mmu_cr4_features |= mask;
131 if (trampoline_cr4_features)
132 *trampoline_cr4_features = mmu_cr4_features;
133 cr4_set_bits(mask);
134}
135
136static inline void __native_flush_tlb(void)
137{
138 native_write_cr3(native_read_cr3());
139}
140
141static inline void __native_flush_tlb_global_irq_disabled(void)
142{
143 unsigned long cr4;
144
145 cr4 = this_cpu_read(cpu_tlbstate.cr4);
146
147 native_write_cr4(cr4 & ~X86_CR4_PGE);
148
149 native_write_cr4(cr4);
150}
151
152static inline void __native_flush_tlb_global(void)
153{
154 unsigned long flags;
155
156 if (static_cpu_has(X86_FEATURE_INVPCID)) {
157
158
159
160
161 invpcid_flush_all();
162 return;
163 }
164
165
166
167
168
169
170 raw_local_irq_save(flags);
171
172 __native_flush_tlb_global_irq_disabled();
173
174 raw_local_irq_restore(flags);
175}
176
177static inline void __native_flush_tlb_single(unsigned long addr)
178{
179 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
180}
181
182static inline void __flush_tlb_all(void)
183{
184 if (cpu_has_pge)
185 __flush_tlb_global();
186 else
187 __flush_tlb();
188}
189
190static inline void __flush_tlb_one(unsigned long addr)
191{
192 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
193 __flush_tlb_single(addr);
194}
195
196#define TLB_FLUSH_ALL -1UL
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213#ifndef CONFIG_SMP
214
215
216
217
218
219
220
221static inline void __flush_tlb_up(void)
222{
223 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
224 __flush_tlb();
225}
226
227static inline void flush_tlb_all(void)
228{
229 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
230 __flush_tlb_all();
231}
232
233static inline void flush_tlb(void)
234{
235 __flush_tlb_up();
236}
237
238static inline void local_flush_tlb(void)
239{
240 __flush_tlb_up();
241}
242
243static inline void flush_tlb_mm(struct mm_struct *mm)
244{
245 if (mm == current->active_mm)
246 __flush_tlb_up();
247}
248
249static inline void flush_tlb_page(struct vm_area_struct *vma,
250 unsigned long addr)
251{
252 if (vma->vm_mm == current->active_mm)
253 __flush_tlb_one(addr);
254}
255
256static inline void flush_tlb_range(struct vm_area_struct *vma,
257 unsigned long start, unsigned long end)
258{
259 if (vma->vm_mm == current->active_mm)
260 __flush_tlb_up();
261}
262
263static inline void flush_tlb_mm_range(struct mm_struct *mm,
264 unsigned long start, unsigned long end, unsigned long vmflag)
265{
266 if (mm == current->active_mm)
267 __flush_tlb_up();
268}
269
270static inline void native_flush_tlb_others(const struct cpumask *cpumask,
271 struct mm_struct *mm,
272 unsigned long start,
273 unsigned long end)
274{
275}
276
277static inline void reset_lazy_tlbstate(void)
278{
279}
280
281static inline void flush_tlb_kernel_range(unsigned long start,
282 unsigned long end)
283{
284 flush_tlb_all();
285}
286
287#else
288
289#include <asm/smp.h>
290
291#define local_flush_tlb() __flush_tlb()
292
293#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
294
295#define flush_tlb_range(vma, start, end) \
296 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
297
298extern void flush_tlb_all(void);
299extern void flush_tlb_current_task(void);
300extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
301extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
302 unsigned long end, unsigned long vmflag);
303extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
304
305#define flush_tlb() flush_tlb_current_task()
306
307void native_flush_tlb_others(const struct cpumask *cpumask,
308 struct mm_struct *mm,
309 unsigned long start, unsigned long end);
310
311#define TLBSTATE_OK 1
312#define TLBSTATE_LAZY 2
313
314static inline void reset_lazy_tlbstate(void)
315{
316 this_cpu_write(cpu_tlbstate.state, 0);
317 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
318}
319
320#endif
321
322#ifndef CONFIG_PARAVIRT
323#define flush_tlb_others(mask, mm, start, end) \
324 native_flush_tlb_others(mask, mm, start, end)
325#endif
326
327#endif
328