1
2#ifndef _ASM_X86_MMU_CONTEXT_H
3#define _ASM_X86_MMU_CONTEXT_H
4
5#include <asm/desc.h>
6#include <linux/atomic.h>
7#include <linux/mm_types.h>
8#include <linux/pkeys.h>
9
10#include <trace/events/tlb.h>
11
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/paravirt.h>
15#include <asm/mpx.h>
16#include <asm/debugreg.h>
17
18extern atomic64_t last_mm_ctx_id;
19
20#ifndef CONFIG_PARAVIRT_XXL
21static inline void paravirt_activate_mm(struct mm_struct *prev,
22 struct mm_struct *next)
23{
24}
25#endif
26
27#ifdef CONFIG_PERF_EVENTS
28
29DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30
31static inline void load_mm_cr4(struct mm_struct *mm)
32{
33 if (static_branch_unlikely(&rdpmc_always_available_key) ||
34 atomic_read(&mm->context.perf_rdpmc_allowed))
35 cr4_set_bits(X86_CR4_PCE);
36 else
37 cr4_clear_bits(X86_CR4_PCE);
38}
39#else
40static inline void load_mm_cr4(struct mm_struct *mm) {}
41#endif
42
43#ifdef CONFIG_MODIFY_LDT_SYSCALL
44
45
46
47
48struct ldt_struct {
49
50
51
52
53
54
55 struct desc_struct *entries;
56 unsigned int nr_entries;
57
58
59
60
61
62
63
64
65
66
67 int slot;
68};
69
70
71#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
72
73static inline void *ldt_slot_va(int slot)
74{
75 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
76}
77
78
79
80
81static inline void init_new_context_ldt(struct mm_struct *mm)
82{
83 mm->context.ldt = NULL;
84 init_rwsem(&mm->context.ldt_usr_sem);
85}
86int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
87void destroy_context_ldt(struct mm_struct *mm);
88void ldt_arch_exit_mmap(struct mm_struct *mm);
89#else
90static inline void init_new_context_ldt(struct mm_struct *mm) { }
91static inline int ldt_dup_context(struct mm_struct *oldmm,
92 struct mm_struct *mm)
93{
94 return 0;
95}
96static inline void destroy_context_ldt(struct mm_struct *mm) { }
97static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
98#endif
99
100static inline void load_mm_ldt(struct mm_struct *mm)
101{
102#ifdef CONFIG_MODIFY_LDT_SYSCALL
103 struct ldt_struct *ldt;
104
105
106 ldt = READ_ONCE(mm->context.ldt);
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122 if (unlikely(ldt)) {
123 if (static_cpu_has(X86_FEATURE_PTI)) {
124 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
125
126
127
128
129
130 clear_LDT();
131 return;
132 }
133
134
135
136
137
138
139
140 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
141 } else {
142 set_ldt(ldt->entries, ldt->nr_entries);
143 }
144 } else {
145 clear_LDT();
146 }
147#else
148 clear_LDT();
149#endif
150}
151
152static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
153{
154#ifdef CONFIG_MODIFY_LDT_SYSCALL
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 if (unlikely((unsigned long)prev->context.ldt |
173 (unsigned long)next->context.ldt))
174 load_mm_ldt(next);
175#endif
176
177 DEBUG_LOCKS_WARN_ON(preemptible());
178}
179
180void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
181
182
183
184
185
186static inline int init_new_context(struct task_struct *tsk,
187 struct mm_struct *mm)
188{
189 mutex_init(&mm->context.lock);
190
191 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
192 atomic64_set(&mm->context.tlb_gen, 0);
193
194#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
195 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
196
197 mm->context.pkey_allocation_map = 0x1;
198
199 mm->context.execute_only_pkey = -1;
200 }
201#endif
202 init_new_context_ldt(mm);
203 return 0;
204}
205static inline void destroy_context(struct mm_struct *mm)
206{
207 destroy_context_ldt(mm);
208}
209
210extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
211 struct task_struct *tsk);
212
213extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
214 struct task_struct *tsk);
215#define switch_mm_irqs_off switch_mm_irqs_off
216
217#define activate_mm(prev, next) \
218do { \
219 paravirt_activate_mm((prev), (next)); \
220 switch_mm((prev), (next), NULL); \
221} while (0);
222
223#ifdef CONFIG_X86_32
224#define deactivate_mm(tsk, mm) \
225do { \
226 lazy_load_gs(0); \
227} while (0)
228#else
229#define deactivate_mm(tsk, mm) \
230do { \
231 load_gs_index(0); \
232 loadsegment(fs, 0); \
233} while (0)
234#endif
235
236static inline void arch_dup_pkeys(struct mm_struct *oldmm,
237 struct mm_struct *mm)
238{
239#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
240 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
241 return;
242
243
244 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
245 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
246#endif
247}
248
249static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
250{
251 arch_dup_pkeys(oldmm, mm);
252 paravirt_arch_dup_mmap(oldmm, mm);
253 return ldt_dup_context(oldmm, mm);
254}
255
256static inline void arch_exit_mmap(struct mm_struct *mm)
257{
258 paravirt_arch_exit_mmap(mm);
259 ldt_arch_exit_mmap(mm);
260}
261
262#ifdef CONFIG_X86_64
263static inline bool is_64bit_mm(struct mm_struct *mm)
264{
265 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
266 !(mm->context.ia32_compat == TIF_IA32);
267}
268#else
269static inline bool is_64bit_mm(struct mm_struct *mm)
270{
271 return false;
272}
273#endif
274
275static inline void arch_bprm_mm_init(struct mm_struct *mm,
276 struct vm_area_struct *vma)
277{
278 mpx_mm_init(mm);
279}
280
281static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
282 unsigned long end)
283{
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
302 mpx_notify_unmap(mm, start, end);
303}
304
305
306
307
308
309
310
311
312
313
314static inline bool vma_is_foreign(struct vm_area_struct *vma)
315{
316 if (!current->mm)
317 return true;
318
319
320
321
322
323 if (current->mm != vma->vm_mm)
324 return true;
325
326 return false;
327}
328
329static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
330 bool write, bool execute, bool foreign)
331{
332
333 if (execute)
334 return true;
335
336 if (foreign || vma_is_foreign(vma))
337 return true;
338 return __pkru_allows_pkey(vma_pkey(vma), write);
339}
340
341
342
343
344
345
346
347
348static inline unsigned long __get_current_cr3_fast(void)
349{
350 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
351 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
352
353
354 VM_WARN_ON(in_nmi() || preemptible());
355
356 VM_BUG_ON(cr3 != __read_cr3());
357 return cr3;
358}
359
360typedef struct {
361 struct mm_struct *mm;
362} temp_mm_state_t;
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
378{
379 temp_mm_state_t temp_state;
380
381 lockdep_assert_irqs_disabled();
382 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
383 switch_mm_irqs_off(NULL, mm, current);
384
385
386
387
388
389
390
391
392
393
394
395
396 if (hw_breakpoint_active())
397 hw_breakpoint_disable();
398
399 return temp_state;
400}
401
402static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
403{
404 lockdep_assert_irqs_disabled();
405 switch_mm_irqs_off(NULL, prev_state.mm, current);
406
407
408
409
410
411 if (hw_breakpoint_active())
412 hw_breakpoint_restore();
413}
414
415#endif
416