1
2
3
4
5
6
7
8#ifndef _ASM_POWERPC_KEYS_H
9#define _ASM_POWERPC_KEYS_H
10
11#include <linux/jump_label.h>
12#include <asm/firmware.h>
13
14DECLARE_STATIC_KEY_TRUE(pkey_disabled);
15extern int pkeys_total;
16extern u32 initial_allocation_mask;
17
18#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
19 VM_PKEY_BIT3 | VM_PKEY_BIT4)
20
21
22#define PKEY_DISABLE_EXECUTE 0x4
23#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | \
24 PKEY_DISABLE_WRITE | \
25 PKEY_DISABLE_EXECUTE)
26
27static inline u64 pkey_to_vmflag_bits(u16 pkey)
28{
29 return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
30}
31
32static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
33{
34 if (static_branch_likely(&pkey_disabled))
35 return 0x0UL;
36
37 return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) |
38 ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) |
39 ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
40 ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) |
41 ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL));
42}
43
44static inline int vma_pkey(struct vm_area_struct *vma)
45{
46 if (static_branch_likely(&pkey_disabled))
47 return 0;
48 return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
49}
50
51#define arch_max_pkey() pkeys_total
52
53static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
54{
55 return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) |
56 ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
57 ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
58 ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
59 ((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL));
60}
61
62static inline u16 pte_to_pkey_bits(u64 pteflags)
63{
64 return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) |
65 ((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) |
66 ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
67 ((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) |
68 ((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL));
69}
70
71#define pkey_alloc_mask(pkey) (0x1 << pkey)
72
73#define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
74
75#define __mm_pkey_allocated(mm, pkey) { \
76 mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
77}
78
79#define __mm_pkey_free(mm, pkey) { \
80 mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey); \
81}
82
83#define __mm_pkey_is_allocated(mm, pkey) \
84 (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
85
86#define __mm_pkey_is_reserved(pkey) (initial_allocation_mask & \
87 pkey_alloc_mask(pkey))
88
89static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
90{
91
92 return ((pkey < arch_max_pkey()) &&
93 !__mm_pkey_is_reserved(pkey) &&
94 __mm_pkey_is_allocated(mm, pkey));
95}
96
97extern void __arch_activate_pkey(int pkey);
98extern void __arch_deactivate_pkey(int pkey);
99
100
101
102
103
104static inline int mm_pkey_alloc(struct mm_struct *mm)
105{
106
107
108
109
110
111 u32 all_pkeys_mask = (u32)(~(0x0));
112 int ret;
113
114 if (static_branch_likely(&pkey_disabled))
115 return -1;
116
117
118
119
120
121 if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
122 return -1;
123
124 ret = ffz((u32)mm_pkey_allocation_map(mm));
125 __mm_pkey_allocated(mm, ret);
126
127
128
129
130 if (ret > 0)
131 __arch_activate_pkey(ret);
132 return ret;
133}
134
135static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
136{
137 if (static_branch_likely(&pkey_disabled))
138 return -1;
139
140 if (!mm_pkey_is_allocated(mm, pkey))
141 return -EINVAL;
142
143
144
145
146 __arch_deactivate_pkey(pkey);
147 __mm_pkey_free(mm, pkey);
148
149 return 0;
150}
151
152
153
154
155
156extern int __execute_only_pkey(struct mm_struct *mm);
157static inline int execute_only_pkey(struct mm_struct *mm)
158{
159 if (static_branch_likely(&pkey_disabled))
160 return -1;
161
162 return __execute_only_pkey(mm);
163}
164
165extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
166 int prot, int pkey);
167static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
168 int prot, int pkey)
169{
170 if (static_branch_likely(&pkey_disabled))
171 return 0;
172
173
174
175
176
177 if (pkey != -1)
178 return pkey;
179
180 return __arch_override_mprotect_pkey(vma, prot, pkey);
181}
182
183extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
184 unsigned long init_val);
185static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
186 unsigned long init_val)
187{
188 if (static_branch_likely(&pkey_disabled))
189 return -EINVAL;
190 return __arch_set_user_pkey_access(tsk, pkey, init_val);
191}
192
193static inline bool arch_pkeys_enabled(void)
194{
195 return !static_branch_likely(&pkey_disabled);
196}
197
198extern void pkey_mm_init(struct mm_struct *mm);
199extern bool arch_supports_pkeys(int cap);
200extern unsigned int arch_usable_pkeys(void);
201extern void thread_pkey_regs_save(struct thread_struct *thread);
202extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
203 struct thread_struct *old_thread);
204extern void thread_pkey_regs_init(struct thread_struct *thread);
205#endif
206