1
2#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
3#define _ASM_POWERPC_BOOK3S_64_KUP_H
4
5#include <linux/const.h>
6#include <asm/reg.h>
7
8#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
9#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
10#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
11#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
12
13#ifdef __ASSEMBLY__
14
15.macro kuap_user_restore gpr1, gpr2
16#if defined(CONFIG_PPC_PKEY)
17 BEGIN_MMU_FTR_SECTION_NESTED(67)
18 b 100f
19 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
20
21
22
23
24 ld \gpr1, STACK_REGS_AMR(r1)
25
26
27
28
29
30 BEGIN_MMU_FTR_SECTION_NESTED(68)
31 mfspr \gpr2, SPRN_AMR
32 cmpd \gpr1, \gpr2
33 beq 99f
34 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
35
36 isync
37 mtspr SPRN_AMR, \gpr1
3899:
39
40
41
42 ld \gpr1, STACK_REGS_IAMR(r1)
43
44
45
46
47
48 BEGIN_MMU_FTR_SECTION_NESTED(69)
49 mfspr \gpr2, SPRN_IAMR
50 cmpd \gpr1, \gpr2
51 beq 100f
52 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53
54 isync
55 mtspr SPRN_IAMR, \gpr1
56
57100:
58
59#endif
60.endm
61
62.macro kuap_kernel_restore gpr1, gpr2
63#if defined(CONFIG_PPC_PKEY)
64
65 BEGIN_MMU_FTR_SECTION_NESTED(67)
66
67
68
69
70 ld \gpr2, STACK_REGS_AMR(r1)
71 mfspr \gpr1, SPRN_AMR
72 cmpd \gpr1, \gpr2
73 beq 100f
74 isync
75 mtspr SPRN_AMR, \gpr2
76
77
78
79
80100:
81 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
82#endif
83.endm
84
85#ifdef CONFIG_PPC_KUAP
86.macro kuap_check_amr gpr1, gpr2
87#ifdef CONFIG_PPC_KUAP_DEBUG
88 BEGIN_MMU_FTR_SECTION_NESTED(67)
89 mfspr \gpr1, SPRN_AMR
90
91 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
92999: tdne \gpr1, \gpr2
93 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
94 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
95#endif
96.endm
97#endif
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
126#if defined(CONFIG_PPC_PKEY)
127
128
129
130
131 BEGIN_MMU_FTR_SECTION_NESTED(68)
132 b 100f
133 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
134
135
136
137
138
139 BEGIN_MMU_FTR_SECTION_NESTED(67)
140 .ifnb \msr_pr_cr
141
142
143
144
145 bne \msr_pr_cr, 100f
146 .endif
147 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
148
149
150
151
152 mfspr \gpr1, SPRN_AMR
153 std \gpr1, STACK_REGS_AMR(r1)
154
155
156
157
158
159 BEGIN_MMU_FTR_SECTION_NESTED(69)
160 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
161 cmpd \use_cr, \gpr1, \gpr2
162 beq \use_cr, 102f
163
164
165
166 mtspr SPRN_AMR, \gpr2
167 isync
168102:
169 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
170
171
172
173
174 .ifnb \msr_pr_cr
175 beq \msr_pr_cr, 100f
176 mfspr \gpr1, SPRN_IAMR
177 std \gpr1, STACK_REGS_IAMR(r1)
178
179
180
181
182
183 BEGIN_MMU_FTR_SECTION_NESTED(70)
184 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
185 mtspr SPRN_IAMR, \gpr2
186 isync
187 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
188 .endif
189
190100:
191#endif
192.endm
193
194#else
195
196#include <linux/jump_label.h>
197
198DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
199
200#ifdef CONFIG_PPC_PKEY
201
202extern u64 __ro_after_init default_uamor;
203extern u64 __ro_after_init default_amr;
204extern u64 __ro_after_init default_iamr;
205
206#include <asm/mmu.h>
207#include <asm/ptrace.h>
208
209
210
211
212
213
214
215static inline u64 current_thread_amr(void)
216{
217 if (current->thread.regs)
218 return current->thread.regs->amr;
219 return default_amr;
220}
221
222static inline u64 current_thread_iamr(void)
223{
224 if (current->thread.regs)
225 return current->thread.regs->iamr;
226 return default_iamr;
227}
228#endif
229
230#ifdef CONFIG_PPC_KUAP
231
232static inline void kuap_user_restore(struct pt_regs *regs)
233{
234 bool restore_amr = false, restore_iamr = false;
235 unsigned long amr, iamr;
236
237 if (!mmu_has_feature(MMU_FTR_PKEY))
238 return;
239
240 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
241 amr = mfspr(SPRN_AMR);
242 if (amr != regs->amr)
243 restore_amr = true;
244 } else {
245 restore_amr = true;
246 }
247
248 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
249 iamr = mfspr(SPRN_IAMR);
250 if (iamr != regs->iamr)
251 restore_iamr = true;
252 } else {
253 restore_iamr = true;
254 }
255
256
257 if (restore_amr || restore_iamr) {
258 isync();
259 if (restore_amr)
260 mtspr(SPRN_AMR, regs->amr);
261 if (restore_iamr)
262 mtspr(SPRN_IAMR, regs->iamr);
263 }
264
265
266
267
268
269}
270
271static inline void kuap_kernel_restore(struct pt_regs *regs,
272 unsigned long amr)
273{
274 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
275 if (unlikely(regs->amr != amr)) {
276 isync();
277 mtspr(SPRN_AMR, regs->amr);
278
279
280
281
282
283 }
284 }
285
286
287
288}
289
290static inline unsigned long kuap_get_and_assert_locked(void)
291{
292 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
293 unsigned long amr = mfspr(SPRN_AMR);
294 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
295 WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
296 return amr;
297 }
298 return 0;
299}
300
301static inline void kuap_assert_locked(void)
302{
303 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
304 WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
305}
306
307
308
309
310
311
312static inline unsigned long get_kuap(void)
313{
314
315
316
317
318
319
320
321
322 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
323 return AMR_KUAP_BLOCKED;
324
325 return mfspr(SPRN_AMR);
326}
327
328static inline void set_kuap(unsigned long value)
329{
330 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
331 return;
332
333
334
335
336
337 isync();
338 mtspr(SPRN_AMR, value);
339 isync();
340}
341
342static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
343 bool is_write)
344{
345 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
346 return false;
347
348
349
350
351
352
353
354
355
356
357
358
359 if (is_write) {
360 return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
361 }
362 return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
363}
364
365static __always_inline void allow_user_access(void __user *to, const void __user *from,
366 unsigned long size, unsigned long dir)
367{
368 unsigned long thread_amr = 0;
369
370
371 BUILD_BUG_ON(!__builtin_constant_p(dir));
372
373 if (mmu_has_feature(MMU_FTR_PKEY))
374 thread_amr = current_thread_amr();
375
376 if (dir == KUAP_READ)
377 set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
378 else if (dir == KUAP_WRITE)
379 set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
380 else if (dir == KUAP_READ_WRITE)
381 set_kuap(thread_amr);
382 else
383 BUILD_BUG();
384}
385
386#else
387
388static inline unsigned long get_kuap(void)
389{
390 return AMR_KUAP_BLOCKED;
391}
392
393static inline void set_kuap(unsigned long value) { }
394
395static __always_inline void allow_user_access(void __user *to, const void __user *from,
396 unsigned long size, unsigned long dir)
397{ }
398
399#endif
400
401static inline void prevent_user_access(unsigned long dir)
402{
403 set_kuap(AMR_KUAP_BLOCKED);
404 if (static_branch_unlikely(&uaccess_flush_key))
405 do_uaccess_flush();
406}
407
408static inline unsigned long prevent_user_access_return(void)
409{
410 unsigned long flags = get_kuap();
411
412 set_kuap(AMR_KUAP_BLOCKED);
413 if (static_branch_unlikely(&uaccess_flush_key))
414 do_uaccess_flush();
415
416 return flags;
417}
418
419static inline void restore_user_access(unsigned long flags)
420{
421 set_kuap(flags);
422 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
423 do_uaccess_flush();
424}
425#endif
426
427#endif
428