1
2#ifndef _ASM_X86_MMU_CONTEXT_H
3#define _ASM_X86_MMU_CONTEXT_H
4
5#include <asm/desc.h>
6#include <linux/atomic.h>
7#include <linux/mm_types.h>
8#include <linux/pkeys.h>
9
10#include <trace/events/tlb.h>
11
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/paravirt.h>
15#include <asm/mpx.h>
16
17extern atomic64_t last_mm_ctx_id;
18
19#ifndef CONFIG_PARAVIRT
20static inline void paravirt_activate_mm(struct mm_struct *prev,
21 struct mm_struct *next)
22{
23}
24#endif
25
26#ifdef CONFIG_PERF_EVENTS
27
28DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
29DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30
31static inline void load_mm_cr4(struct mm_struct *mm)
32{
33 if (static_branch_unlikely(&rdpmc_always_available_key) ||
34 (!static_branch_unlikely(&rdpmc_never_available_key) &&
35 atomic_read(&mm->context.perf_rdpmc_allowed)))
36 cr4_set_bits(X86_CR4_PCE);
37 else
38 cr4_clear_bits(X86_CR4_PCE);
39}
40#else
41static inline void load_mm_cr4(struct mm_struct *mm) {}
42#endif
43
44#ifdef CONFIG_MODIFY_LDT_SYSCALL
45
46
47
48
49struct ldt_struct {
50
51
52
53
54
55
56 struct desc_struct *entries;
57 unsigned int nr_entries;
58
59
60
61
62
63
64
65
66
67
68 int slot;
69};
70
71
72#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
73
74static inline void *ldt_slot_va(int slot)
75{
76#ifdef CONFIG_X86_64
77 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
78#else
79 BUG();
80 return (void *)fix_to_virt(FIX_HOLE);
81#endif
82}
83
84
85
86
87static inline void init_new_context_ldt(struct mm_struct *mm)
88{
89 mm->context.ldt = NULL;
90 init_rwsem(&mm->context.ldt_usr_sem);
91}
92int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
93void destroy_context_ldt(struct mm_struct *mm);
94void ldt_arch_exit_mmap(struct mm_struct *mm);
95#else
96static inline void init_new_context_ldt(struct mm_struct *mm) { }
97static inline int ldt_dup_context(struct mm_struct *oldmm,
98 struct mm_struct *mm)
99{
100 return 0;
101}
102static inline void destroy_context_ldt(struct mm_struct *mm) { }
103static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
104#endif
105
106static inline void load_mm_ldt(struct mm_struct *mm)
107{
108#ifdef CONFIG_MODIFY_LDT_SYSCALL
109 struct ldt_struct *ldt;
110
111
112 ldt = READ_ONCE(mm->context.ldt);
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128 if (unlikely(ldt)) {
129 if (static_cpu_has(X86_FEATURE_PTI)) {
130 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
131
132
133
134
135
136 clear_LDT();
137 return;
138 }
139
140
141
142
143
144
145
146 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
147 } else {
148 set_ldt(ldt->entries, ldt->nr_entries);
149 }
150 } else {
151 clear_LDT();
152 }
153#else
154 clear_LDT();
155#endif
156}
157
158static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
159{
160#ifdef CONFIG_MODIFY_LDT_SYSCALL
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 if (unlikely((unsigned long)prev->context.ldt |
179 (unsigned long)next->context.ldt))
180 load_mm_ldt(next);
181#endif
182
183 DEBUG_LOCKS_WARN_ON(preemptible());
184}
185
186void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
187
188
189
190
191
192static inline int init_new_context(struct task_struct *tsk,
193 struct mm_struct *mm)
194{
195 mutex_init(&mm->context.lock);
196
197 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
198 atomic64_set(&mm->context.tlb_gen, 0);
199
200#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
201 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
202
203 mm->context.pkey_allocation_map = 0x1;
204
205 mm->context.execute_only_pkey = -1;
206 }
207#endif
208 init_new_context_ldt(mm);
209 return 0;
210}
211static inline void destroy_context(struct mm_struct *mm)
212{
213 destroy_context_ldt(mm);
214}
215
216extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
217 struct task_struct *tsk);
218
219extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
220 struct task_struct *tsk);
221#define switch_mm_irqs_off switch_mm_irqs_off
222
223#define activate_mm(prev, next) \
224do { \
225 paravirt_activate_mm((prev), (next)); \
226 switch_mm((prev), (next), NULL); \
227} while (0);
228
229#ifdef CONFIG_X86_32
230#define deactivate_mm(tsk, mm) \
231do { \
232 lazy_load_gs(0); \
233} while (0)
234#else
235#define deactivate_mm(tsk, mm) \
236do { \
237 load_gs_index(0); \
238 loadsegment(fs, 0); \
239} while (0)
240#endif
241
242static inline void arch_dup_pkeys(struct mm_struct *oldmm,
243 struct mm_struct *mm)
244{
245#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
246 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
247 return;
248
249
250 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
251 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
252#endif
253}
254
255static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
256{
257 arch_dup_pkeys(oldmm, mm);
258 paravirt_arch_dup_mmap(oldmm, mm);
259 return ldt_dup_context(oldmm, mm);
260}
261
262static inline void arch_exit_mmap(struct mm_struct *mm)
263{
264 paravirt_arch_exit_mmap(mm);
265 ldt_arch_exit_mmap(mm);
266}
267
268#ifdef CONFIG_X86_64
269static inline bool is_64bit_mm(struct mm_struct *mm)
270{
271 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
272 !(mm->context.ia32_compat == TIF_IA32);
273}
274#else
275static inline bool is_64bit_mm(struct mm_struct *mm)
276{
277 return false;
278}
279#endif
280
281static inline void arch_bprm_mm_init(struct mm_struct *mm,
282 struct vm_area_struct *vma)
283{
284 mpx_mm_init(mm);
285}
286
287static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
288 unsigned long start, unsigned long end)
289{
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
308 mpx_notify_unmap(mm, vma, start, end);
309}
310
311
312
313
314
315
316
317
318
319
320static inline bool vma_is_foreign(struct vm_area_struct *vma)
321{
322 if (!current->mm)
323 return true;
324
325
326
327
328
329 if (current->mm != vma->vm_mm)
330 return true;
331
332 return false;
333}
334
335static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
336 bool write, bool execute, bool foreign)
337{
338
339 if (execute)
340 return true;
341
342 if (foreign || vma_is_foreign(vma))
343 return true;
344 return __pkru_allows_pkey(vma_pkey(vma), write);
345}
346
347
348
349
350
351
352
353
354static inline unsigned long __get_current_cr3_fast(void)
355{
356 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
357 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
358
359
360 VM_WARN_ON(in_nmi() || preemptible());
361
362 VM_BUG_ON(cr3 != __read_cr3());
363 return cr3;
364}
365
366#endif
367