1
2#ifndef _ASM_X86_MMU_CONTEXT_H
3#define _ASM_X86_MMU_CONTEXT_H
4
5#include <asm/desc.h>
6#include <linux/atomic.h>
7#include <linux/mm_types.h>
8#include <linux/pkeys.h>
9
10#include <trace/events/tlb.h>
11
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14#include <asm/paravirt.h>
15#include <asm/debugreg.h>
16
17extern atomic64_t last_mm_ctx_id;
18
19#ifndef CONFIG_PARAVIRT_XXL
20static inline void paravirt_activate_mm(struct mm_struct *prev,
21 struct mm_struct *next)
22{
23}
24#endif
25
26#ifdef CONFIG_PERF_EVENTS
27
28DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
29DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30
31static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
32{
33 if (static_branch_unlikely(&rdpmc_always_available_key) ||
34 (!static_branch_unlikely(&rdpmc_never_available_key) &&
35 atomic_read(&mm->context.perf_rdpmc_allowed)))
36 cr4_set_bits_irqsoff(X86_CR4_PCE);
37 else
38 cr4_clear_bits_irqsoff(X86_CR4_PCE);
39}
40#else
41static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
42#endif
43
44#ifdef CONFIG_MODIFY_LDT_SYSCALL
45
46
47
48
49struct ldt_struct {
50
51
52
53
54
55
56 struct desc_struct *entries;
57 unsigned int nr_entries;
58
59
60
61
62
63
64
65
66
67
68 int slot;
69};
70
71
72
73
74static inline void init_new_context_ldt(struct mm_struct *mm)
75{
76 mm->context.ldt = NULL;
77 init_rwsem(&mm->context.ldt_usr_sem);
78}
79int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
80void destroy_context_ldt(struct mm_struct *mm);
81void ldt_arch_exit_mmap(struct mm_struct *mm);
82#else
83static inline void init_new_context_ldt(struct mm_struct *mm) { }
84static inline int ldt_dup_context(struct mm_struct *oldmm,
85 struct mm_struct *mm)
86{
87 return 0;
88}
89static inline void destroy_context_ldt(struct mm_struct *mm) { }
90static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
91#endif
92
93#ifdef CONFIG_MODIFY_LDT_SYSCALL
94extern void load_mm_ldt(struct mm_struct *mm);
95extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
96#else
97static inline void load_mm_ldt(struct mm_struct *mm)
98{
99 clear_LDT();
100}
101static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
102{
103 DEBUG_LOCKS_WARN_ON(preemptible());
104}
105#endif
106
107extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
108
109
110
111
112
113static inline int init_new_context(struct task_struct *tsk,
114 struct mm_struct *mm)
115{
116 mutex_init(&mm->context.lock);
117
118 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
119 atomic64_set(&mm->context.tlb_gen, 0);
120
121#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
122 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
123
124 mm->context.pkey_allocation_map = 0x1;
125
126 mm->context.execute_only_pkey = -1;
127 }
128#endif
129 init_new_context_ldt(mm);
130 return 0;
131}
132static inline void destroy_context(struct mm_struct *mm)
133{
134 destroy_context_ldt(mm);
135}
136
137extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
138 struct task_struct *tsk);
139
140extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
141 struct task_struct *tsk);
142#define switch_mm_irqs_off switch_mm_irqs_off
143
144#define activate_mm(prev, next) \
145do { \
146 paravirt_activate_mm((prev), (next)); \
147 switch_mm((prev), (next), NULL); \
148} while (0);
149
150#ifdef CONFIG_X86_32
151#define deactivate_mm(tsk, mm) \
152do { \
153 lazy_load_gs(0); \
154} while (0)
155#else
156#define deactivate_mm(tsk, mm) \
157do { \
158 load_gs_index(0); \
159 loadsegment(fs, 0); \
160} while (0)
161#endif
162
163static inline void arch_dup_pkeys(struct mm_struct *oldmm,
164 struct mm_struct *mm)
165{
166#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
167 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
168 return;
169
170
171 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
172 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
173#endif
174}
175
176static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
177{
178 arch_dup_pkeys(oldmm, mm);
179 paravirt_arch_dup_mmap(oldmm, mm);
180 return ldt_dup_context(oldmm, mm);
181}
182
183static inline void arch_exit_mmap(struct mm_struct *mm)
184{
185 paravirt_arch_exit_mmap(mm);
186 ldt_arch_exit_mmap(mm);
187}
188
189#ifdef CONFIG_X86_64
190static inline bool is_64bit_mm(struct mm_struct *mm)
191{
192 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
193 !(mm->context.ia32_compat == TIF_IA32);
194}
195#else
196static inline bool is_64bit_mm(struct mm_struct *mm)
197{
198 return false;
199}
200#endif
201
202static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
203 unsigned long end)
204{
205}
206
207
208
209
210
211
212
213
214
215
216static inline bool vma_is_foreign(struct vm_area_struct *vma)
217{
218 if (!current->mm)
219 return true;
220
221
222
223
224
225 if (current->mm != vma->vm_mm)
226 return true;
227
228 return false;
229}
230
231static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
232 bool write, bool execute, bool foreign)
233{
234
235 if (execute)
236 return true;
237
238 if (foreign || vma_is_foreign(vma))
239 return true;
240 return __pkru_allows_pkey(vma_pkey(vma), write);
241}
242
243
244
245
246
247
248
249
250static inline unsigned long __get_current_cr3_fast(void)
251{
252 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
253 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
254
255
256 VM_WARN_ON(in_nmi() || preemptible());
257
258 VM_BUG_ON(cr3 != __read_cr3());
259 return cr3;
260}
261
262typedef struct {
263 struct mm_struct *mm;
264} temp_mm_state_t;
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
280{
281 temp_mm_state_t temp_state;
282
283 lockdep_assert_irqs_disabled();
284 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
285 switch_mm_irqs_off(NULL, mm, current);
286
287
288
289
290
291
292
293
294
295
296
297
298 if (hw_breakpoint_active())
299 hw_breakpoint_disable();
300
301 return temp_state;
302}
303
304static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
305{
306 lockdep_assert_irqs_disabled();
307 switch_mm_irqs_off(NULL, prev_state.mm, current);
308
309
310
311
312
313 if (hw_breakpoint_active())
314 hw_breakpoint_restore();
315}
316
317#endif
318