1
2#ifndef _ASM_X86_MMU_CONTEXT_H
3#define _ASM_X86_MMU_CONTEXT_H
4
5#include <asm/desc.h>
6#include <linux/atomic.h>
7#include <linux/mm_types.h>
8#include <linux/pkeys.h>
9
10#include <trace/events/tlb.h>
11
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/paravirt.h>
15#include <asm/mpx.h>
16#include <asm/debugreg.h>
17
18extern atomic64_t last_mm_ctx_id;
19
20#ifndef CONFIG_PARAVIRT_XXL
21static inline void paravirt_activate_mm(struct mm_struct *prev,
22 struct mm_struct *next)
23{
24}
25#endif
26
27#ifdef CONFIG_PERF_EVENTS
28
29DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
30DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
31
32static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
33{
34 if (static_branch_unlikely(&rdpmc_always_available_key) ||
35 (!static_branch_unlikely(&rdpmc_never_available_key) &&
36 atomic_read(&mm->context.perf_rdpmc_allowed)))
37 cr4_set_bits_irqsoff(X86_CR4_PCE);
38 else
39 cr4_clear_bits_irqsoff(X86_CR4_PCE);
40}
41#else
42static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
43#endif
44
45#ifdef CONFIG_MODIFY_LDT_SYSCALL
46
47
48
49
50struct ldt_struct {
51
52
53
54
55
56
57 struct desc_struct *entries;
58 unsigned int nr_entries;
59
60
61
62
63
64
65
66
67
68
69 int slot;
70};
71
72
73#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
74
75static inline void *ldt_slot_va(int slot)
76{
77 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
78}
79
80
81
82
83static inline void init_new_context_ldt(struct mm_struct *mm)
84{
85 mm->context.ldt = NULL;
86 init_rwsem(&mm->context.ldt_usr_sem);
87}
88int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
89void destroy_context_ldt(struct mm_struct *mm);
90void ldt_arch_exit_mmap(struct mm_struct *mm);
91#else
92static inline void init_new_context_ldt(struct mm_struct *mm) { }
93static inline int ldt_dup_context(struct mm_struct *oldmm,
94 struct mm_struct *mm)
95{
96 return 0;
97}
98static inline void destroy_context_ldt(struct mm_struct *mm) { }
99static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
100#endif
101
102static inline void load_mm_ldt(struct mm_struct *mm)
103{
104#ifdef CONFIG_MODIFY_LDT_SYSCALL
105 struct ldt_struct *ldt;
106
107
108 ldt = READ_ONCE(mm->context.ldt);
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 if (unlikely(ldt)) {
125 if (static_cpu_has(X86_FEATURE_PTI)) {
126 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
127
128
129
130
131
132 clear_LDT();
133 return;
134 }
135
136
137
138
139
140
141
142 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
143 } else {
144 set_ldt(ldt->entries, ldt->nr_entries);
145 }
146 } else {
147 clear_LDT();
148 }
149#else
150 clear_LDT();
151#endif
152}
153
154static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
155{
156#ifdef CONFIG_MODIFY_LDT_SYSCALL
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 if (unlikely((unsigned long)prev->context.ldt |
175 (unsigned long)next->context.ldt))
176 load_mm_ldt(next);
177#endif
178
179 DEBUG_LOCKS_WARN_ON(preemptible());
180}
181
182void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
183
184
185
186
187
188static inline int init_new_context(struct task_struct *tsk,
189 struct mm_struct *mm)
190{
191 mutex_init(&mm->context.lock);
192
193 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
194 atomic64_set(&mm->context.tlb_gen, 0);
195
196#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
197 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
198
199 mm->context.pkey_allocation_map = 0x1;
200
201 mm->context.execute_only_pkey = -1;
202 }
203#endif
204 init_new_context_ldt(mm);
205 return 0;
206}
207static inline void destroy_context(struct mm_struct *mm)
208{
209 destroy_context_ldt(mm);
210}
211
212extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
213 struct task_struct *tsk);
214
215extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
216 struct task_struct *tsk);
217#define switch_mm_irqs_off switch_mm_irqs_off
218
219#define activate_mm(prev, next) \
220do { \
221 paravirt_activate_mm((prev), (next)); \
222 switch_mm((prev), (next), NULL); \
223} while (0);
224
225#ifdef CONFIG_X86_32
226#define deactivate_mm(tsk, mm) \
227do { \
228 lazy_load_gs(0); \
229} while (0)
230#else
231#define deactivate_mm(tsk, mm) \
232do { \
233 load_gs_index(0); \
234 loadsegment(fs, 0); \
235} while (0)
236#endif
237
238static inline void arch_dup_pkeys(struct mm_struct *oldmm,
239 struct mm_struct *mm)
240{
241#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
242 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
243 return;
244
245
246 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
247 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
248#endif
249}
250
251static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
252{
253 arch_dup_pkeys(oldmm, mm);
254 paravirt_arch_dup_mmap(oldmm, mm);
255 return ldt_dup_context(oldmm, mm);
256}
257
258static inline void arch_exit_mmap(struct mm_struct *mm)
259{
260 paravirt_arch_exit_mmap(mm);
261 ldt_arch_exit_mmap(mm);
262}
263
264#ifdef CONFIG_X86_64
265static inline bool is_64bit_mm(struct mm_struct *mm)
266{
267 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
268 !(mm->context.ia32_compat == TIF_IA32);
269}
270#else
271static inline bool is_64bit_mm(struct mm_struct *mm)
272{
273 return false;
274}
275#endif
276
277static inline void arch_bprm_mm_init(struct mm_struct *mm,
278 struct vm_area_struct *vma)
279{
280 mpx_mm_init(mm);
281}
282
283static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
284 unsigned long end)
285{
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
304 mpx_notify_unmap(mm, start, end);
305}
306
307
308
309
310
311
312
313
314
315
316static inline bool vma_is_foreign(struct vm_area_struct *vma)
317{
318 if (!current->mm)
319 return true;
320
321
322
323
324
325 if (current->mm != vma->vm_mm)
326 return true;
327
328 return false;
329}
330
331static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
332 bool write, bool execute, bool foreign)
333{
334
335 if (execute)
336 return true;
337
338 if (foreign || vma_is_foreign(vma))
339 return true;
340 return __pkru_allows_pkey(vma_pkey(vma), write);
341}
342
343
344
345
346
347
348
349
350static inline unsigned long __get_current_cr3_fast(void)
351{
352 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
353 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
354
355
356 VM_WARN_ON(in_nmi() || preemptible());
357
358 VM_BUG_ON(cr3 != __read_cr3());
359 return cr3;
360}
361
362typedef struct {
363 struct mm_struct *mm;
364} temp_mm_state_t;
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
380{
381 temp_mm_state_t temp_state;
382
383 lockdep_assert_irqs_disabled();
384 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
385 switch_mm_irqs_off(NULL, mm, current);
386
387
388
389
390
391
392
393
394
395
396
397
398 if (hw_breakpoint_active())
399 hw_breakpoint_disable();
400
401 return temp_state;
402}
403
404static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
405{
406 lockdep_assert_irqs_disabled();
407 switch_mm_irqs_off(NULL, prev_state.mm, current);
408
409
410
411
412
413 if (hw_breakpoint_active())
414 hw_breakpoint_restore();
415}
416
417#endif
418