1
2
3
4
5
6
7
8
9#include <linux/bitfield.h>
10#include <linux/bitops.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/mm.h>
14
15#include <asm/cpufeature.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
18#include <asm/tlbflush.h>
19
20static u32 asid_bits;
21static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22
23static atomic64_t asid_generation;
24static unsigned long *asid_map;
25
26static DEFINE_PER_CPU(atomic64_t, active_asids);
27static DEFINE_PER_CPU(u64, reserved_asids);
28static cpumask_t tlb_flush_pending;
29
30static unsigned long max_pinned_asids;
31static unsigned long nr_pinned_asids;
32static unsigned long *pinned_asid_map;
33
34#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
35#define ASID_FIRST_VERSION (1UL << asid_bits)
36
37#define NUM_USER_ASIDS ASID_FIRST_VERSION
38#define asid2idx(asid) ((asid) & ~ASID_MASK)
39#define idx2asid(idx) asid2idx(idx)
40
41
42static u32 get_cpu_asid_bits(void)
43{
44 u32 asid;
45 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
46 ID_AA64MMFR0_ASID_SHIFT);
47
48 switch (fld) {
49 default:
50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
51 smp_processor_id(), fld);
52 fallthrough;
53 case 0:
54 asid = 8;
55 break;
56 case 2:
57 asid = 16;
58 }
59
60 return asid;
61}
62
63
64void verify_cpu_asid_bits(void)
65{
66 u32 asid = get_cpu_asid_bits();
67
68 if (asid < asid_bits) {
69
70
71
72
73 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
74 smp_processor_id(), asid, asid_bits);
75 cpu_panic_kernel();
76 }
77}
78
79static void set_kpti_asid_bits(unsigned long *map)
80{
81 unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
82
83
84
85
86
87
88 memset(map, 0xaa, len);
89}
90
91static void set_reserved_asid_bits(void)
92{
93 if (pinned_asid_map)
94 bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
95 else if (arm64_kernel_unmapped_at_el0())
96 set_kpti_asid_bits(asid_map);
97 else
98 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
99}
100
101#define asid_gen_match(asid) \
102 (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
103
104static void flush_context(void)
105{
106 int i;
107 u64 asid;
108
109
110 set_reserved_asid_bits();
111
112 for_each_possible_cpu(i) {
113 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
114
115
116
117
118
119
120
121 if (asid == 0)
122 asid = per_cpu(reserved_asids, i);
123 __set_bit(asid2idx(asid), asid_map);
124 per_cpu(reserved_asids, i) = asid;
125 }
126
127
128
129
130
131 cpumask_setall(&tlb_flush_pending);
132}
133
134static bool check_update_reserved_asid(u64 asid, u64 newasid)
135{
136 int cpu;
137 bool hit = false;
138
139
140
141
142
143
144
145
146
147
148 for_each_possible_cpu(cpu) {
149 if (per_cpu(reserved_asids, cpu) == asid) {
150 hit = true;
151 per_cpu(reserved_asids, cpu) = newasid;
152 }
153 }
154
155 return hit;
156}
157
158static u64 new_context(struct mm_struct *mm)
159{
160 static u32 cur_idx = 1;
161 u64 asid = atomic64_read(&mm->context.id);
162 u64 generation = atomic64_read(&asid_generation);
163
164 if (asid != 0) {
165 u64 newasid = generation | (asid & ~ASID_MASK);
166
167
168
169
170
171 if (check_update_reserved_asid(asid, newasid))
172 return newasid;
173
174
175
176
177
178
179 if (refcount_read(&mm->context.pinned))
180 return newasid;
181
182
183
184
185
186 if (!__test_and_set_bit(asid2idx(asid), asid_map))
187 return newasid;
188 }
189
190
191
192
193
194
195
196
197 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
198 if (asid != NUM_USER_ASIDS)
199 goto set_asid;
200
201
202 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
203 &asid_generation);
204 flush_context();
205
206
207 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
208
209set_asid:
210 __set_bit(asid, asid_map);
211 cur_idx = asid;
212 return idx2asid(asid) | generation;
213}
214
215void check_and_switch_context(struct mm_struct *mm)
216{
217 unsigned long flags;
218 unsigned int cpu;
219 u64 asid, old_active_asid;
220
221 if (system_supports_cnp())
222 cpu_set_reserved_ttbr0();
223
224 asid = atomic64_read(&mm->context.id);
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240 old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
241 if (old_active_asid && asid_gen_match(asid) &&
242 atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
243 old_active_asid, asid))
244 goto switch_mm_fastpath;
245
246 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
247
248 asid = atomic64_read(&mm->context.id);
249 if (!asid_gen_match(asid)) {
250 asid = new_context(mm);
251 atomic64_set(&mm->context.id, asid);
252 }
253
254 cpu = smp_processor_id();
255 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
256 local_flush_tlb_all();
257
258 atomic64_set(this_cpu_ptr(&active_asids), asid);
259 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
260
261switch_mm_fastpath:
262
263 arm64_apply_bp_hardening();
264
265
266
267
268
269 if (!system_uses_ttbr0_pan())
270 cpu_switch_mm(mm->pgd, mm);
271}
272
273unsigned long arm64_mm_context_get(struct mm_struct *mm)
274{
275 unsigned long flags;
276 u64 asid;
277
278 if (!pinned_asid_map)
279 return 0;
280
281 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
282
283 asid = atomic64_read(&mm->context.id);
284
285 if (refcount_inc_not_zero(&mm->context.pinned))
286 goto out_unlock;
287
288 if (nr_pinned_asids >= max_pinned_asids) {
289 asid = 0;
290 goto out_unlock;
291 }
292
293 if (!asid_gen_match(asid)) {
294
295
296
297
298 asid = new_context(mm);
299 atomic64_set(&mm->context.id, asid);
300 }
301
302 nr_pinned_asids++;
303 __set_bit(asid2idx(asid), pinned_asid_map);
304 refcount_set(&mm->context.pinned, 1);
305
306out_unlock:
307 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
308
309 asid &= ~ASID_MASK;
310
311
312 if (asid && arm64_kernel_unmapped_at_el0())
313 asid |= 1;
314
315 return asid;
316}
317EXPORT_SYMBOL_GPL(arm64_mm_context_get);
318
319void arm64_mm_context_put(struct mm_struct *mm)
320{
321 unsigned long flags;
322 u64 asid = atomic64_read(&mm->context.id);
323
324 if (!pinned_asid_map)
325 return;
326
327 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
328
329 if (refcount_dec_and_test(&mm->context.pinned)) {
330 __clear_bit(asid2idx(asid), pinned_asid_map);
331 nr_pinned_asids--;
332 }
333
334 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
335}
336EXPORT_SYMBOL_GPL(arm64_mm_context_put);
337
338
339asmlinkage void post_ttbr_update_workaround(void)
340{
341 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
342 return;
343
344 asm(ALTERNATIVE("nop; nop; nop",
345 "ic iallu; dsb nsh; isb",
346 ARM64_WORKAROUND_CAVIUM_27456));
347}
348
349void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
350{
351 unsigned long ttbr1 = read_sysreg(ttbr1_el1);
352 unsigned long asid = ASID(mm);
353 unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
354
355
356 if (system_supports_cnp() && asid)
357 ttbr0 |= TTBR_CNP_BIT;
358
359
360 if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
361 ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
362
363
364 ttbr1 &= ~TTBR_ASID_MASK;
365 ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
366
367 write_sysreg(ttbr1, ttbr1_el1);
368 isb();
369 write_sysreg(ttbr0, ttbr0_el1);
370 isb();
371 post_ttbr_update_workaround();
372}
373
374static int asids_update_limit(void)
375{
376 unsigned long num_available_asids = NUM_USER_ASIDS;
377
378 if (arm64_kernel_unmapped_at_el0()) {
379 num_available_asids /= 2;
380 if (pinned_asid_map)
381 set_kpti_asid_bits(pinned_asid_map);
382 }
383
384
385
386
387 WARN_ON(num_available_asids - 1 <= num_possible_cpus());
388 pr_info("ASID allocator initialised with %lu entries\n",
389 num_available_asids);
390
391
392
393
394
395
396 max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
397 return 0;
398}
399arch_initcall(asids_update_limit);
400
401static int asids_init(void)
402{
403 asid_bits = get_cpu_asid_bits();
404 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
405 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
406 GFP_KERNEL);
407 if (!asid_map)
408 panic("Failed to allocate bitmap for %lu ASIDs\n",
409 NUM_USER_ASIDS);
410
411 pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
412 sizeof(*pinned_asid_map), GFP_KERNEL);
413 nr_pinned_asids = 0;
414
415
416
417
418
419
420 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
421 set_kpti_asid_bits(asid_map);
422 return 0;
423}
424early_initcall(asids_init);
425