1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/idr.h>
21#include <linux/export.h>
22#include <linux/gfp.h>
23#include <linux/slab.h>
24
25#include <asm/mmu_context.h>
26#include <asm/pgalloc.h>
27
28static DEFINE_SPINLOCK(mmu_context_lock);
29static DEFINE_IDA(mmu_context_ida);
30
31static int alloc_context_id(int min_id, int max_id)
32{
33 int index, err;
34
35again:
36 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
37 return -ENOMEM;
38
39 spin_lock(&mmu_context_lock);
40 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
41 spin_unlock(&mmu_context_lock);
42
43 if (err == -EAGAIN)
44 goto again;
45 else if (err)
46 return err;
47
48 if (index > max_id) {
49 spin_lock(&mmu_context_lock);
50 ida_remove(&mmu_context_ida, index);
51 spin_unlock(&mmu_context_lock);
52 return -ENOMEM;
53 }
54
55 return index;
56}
57
58void hash__reserve_context_id(int id)
59{
60 int rc, result = 0;
61
62 do {
63 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
64 break;
65
66 spin_lock(&mmu_context_lock);
67 rc = ida_get_new_above(&mmu_context_ida, id, &result);
68 spin_unlock(&mmu_context_lock);
69 } while (rc == -EAGAIN);
70
71 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
72}
73
74int hash__alloc_context_id(void)
75{
76 unsigned long max;
77
78 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
79 max = MAX_USER_CONTEXT;
80 else
81 max = MAX_USER_CONTEXT_65BIT_VA;
82
83 return alloc_context_id(MIN_USER_CONTEXT, max);
84}
85EXPORT_SYMBOL_GPL(hash__alloc_context_id);
86
87static int hash__init_new_context(struct mm_struct *mm)
88{
89 int index;
90
91 index = hash__alloc_context_id();
92 if (index < 0)
93 return index;
94
95
96
97
98
99
100 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 if (mm->context.id == 0)
117 slice_set_user_psize(mm, mmu_virtual_psize);
118
119 subpage_prot_init_new_context(mm);
120
121 return index;
122}
123
124static int radix__init_new_context(struct mm_struct *mm)
125{
126 unsigned long rts_field;
127 int index, max_id;
128
129 max_id = (1 << mmu_pid_bits) - 1;
130 index = alloc_context_id(mmu_base_pid, max_id);
131 if (index < 0)
132 return index;
133
134
135
136
137 rts_field = radix__get_tree_size();
138 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
139
140
141
142
143
144
145
146 asm volatile("ptesync;isync" : : : "memory");
147
148 mm->context.npu_context = NULL;
149
150 return index;
151}
152
153int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
154{
155 int index;
156
157 if (radix_enabled())
158 index = radix__init_new_context(mm);
159 else
160 index = hash__init_new_context(mm);
161
162 if (index < 0)
163 return index;
164
165 mm->context.id = index;
166
167#ifdef CONFIG_PPC_64K_PAGES
168 mm->context.pte_frag = NULL;
169#endif
170#ifdef CONFIG_SPAPR_TCE_IOMMU
171 mm_iommu_init(mm);
172#endif
173 atomic_set(&mm->context.active_cpus, 0);
174
175 return 0;
176}
177
178void __destroy_context(int context_id)
179{
180 spin_lock(&mmu_context_lock);
181 ida_remove(&mmu_context_ida, context_id);
182 spin_unlock(&mmu_context_lock);
183}
184EXPORT_SYMBOL_GPL(__destroy_context);
185
186#ifdef CONFIG_PPC_64K_PAGES
187static void destroy_pagetable_page(struct mm_struct *mm)
188{
189 int count;
190 void *pte_frag;
191 struct page *page;
192
193 pte_frag = mm->context.pte_frag;
194 if (!pte_frag)
195 return;
196
197 page = virt_to_page(pte_frag);
198
199 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
200
201 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
202 pgtable_page_dtor(page);
203 free_hot_cold_page(page, 0);
204 }
205}
206
207#else
208static inline void destroy_pagetable_page(struct mm_struct *mm)
209{
210 return;
211}
212#endif
213
214void destroy_context(struct mm_struct *mm)
215{
216#ifdef CONFIG_SPAPR_TCE_IOMMU
217 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
218#endif
219 if (radix_enabled()) {
220
221
222
223
224
225
226 process_tb[mm->context.id].prtb0 = 0;
227 } else
228 subpage_prot_free(mm);
229 destroy_pagetable_page(mm);
230 __destroy_context(mm->context.id);
231 mm->context.id = MMU_NO_CONTEXT;
232}
233
234#ifdef CONFIG_PPC_RADIX_MMU
235void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
236{
237
238 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
239 isync();
240 mtspr(SPRN_PID, next->context.id);
241 isync();
242 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
243 } else {
244 mtspr(SPRN_PID, next->context.id);
245 isync();
246 }
247}
248#endif
249