1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/bitops.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/mm.h>
24
25#include <asm/cpufeature.h>
26#include <asm/mmu_context.h>
27#include <asm/smp.h>
28#include <asm/tlbflush.h>
29
30static u32 asid_bits;
31static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
32
33static atomic64_t asid_generation;
34static unsigned long *asid_map;
35
36static DEFINE_PER_CPU(atomic64_t, active_asids);
37static DEFINE_PER_CPU(u64, reserved_asids);
38static cpumask_t tlb_flush_pending;
39
40#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
41#define ASID_FIRST_VERSION (1UL << asid_bits)
42#define NUM_USER_ASIDS ASID_FIRST_VERSION
43
44
45static u32 get_cpu_asid_bits(void)
46{
47 u32 asid;
48 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
49 ID_AA64MMFR0_ASID_SHIFT);
50
51 switch (fld) {
52 default:
53 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
54 smp_processor_id(), fld);
55
56 case 0:
57 asid = 8;
58 break;
59 case 2:
60 asid = 16;
61 }
62
63 return asid;
64}
65
66
67void verify_cpu_asid_bits(void)
68{
69 u32 asid = get_cpu_asid_bits();
70
71 if (asid < asid_bits) {
72
73
74
75
76 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
77 smp_processor_id(), asid, asid_bits);
78 update_cpu_boot_status(CPU_PANIC_KERNEL);
79 cpu_park_loop();
80 }
81}
82
83static void flush_context(unsigned int cpu)
84{
85 int i;
86 u64 asid;
87
88
89 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
90
91
92
93
94
95 smp_wmb();
96
97 for_each_possible_cpu(i) {
98 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
99
100
101
102
103
104
105
106 if (asid == 0)
107 asid = per_cpu(reserved_asids, i);
108 __set_bit(asid & ~ASID_MASK, asid_map);
109 per_cpu(reserved_asids, i) = asid;
110 }
111
112
113 cpumask_setall(&tlb_flush_pending);
114
115 if (icache_is_aivivt())
116 __flush_icache_all();
117}
118
119static bool check_update_reserved_asid(u64 asid, u64 newasid)
120{
121 int cpu;
122 bool hit = false;
123
124
125
126
127
128
129
130
131
132
133 for_each_possible_cpu(cpu) {
134 if (per_cpu(reserved_asids, cpu) == asid) {
135 hit = true;
136 per_cpu(reserved_asids, cpu) = newasid;
137 }
138 }
139
140 return hit;
141}
142
143static u64 new_context(struct mm_struct *mm, unsigned int cpu)
144{
145 static u32 cur_idx = 1;
146 u64 asid = atomic64_read(&mm->context.id);
147 u64 generation = atomic64_read(&asid_generation);
148
149 if (asid != 0) {
150 u64 newasid = generation | (asid & ~ASID_MASK);
151
152
153
154
155
156 if (check_update_reserved_asid(asid, newasid))
157 return newasid;
158
159
160
161
162
163 asid &= ~ASID_MASK;
164 if (!__test_and_set_bit(asid, asid_map))
165 return newasid;
166 }
167
168
169
170
171
172
173
174 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
175 if (asid != NUM_USER_ASIDS)
176 goto set_asid;
177
178
179 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
180 &asid_generation);
181 flush_context(cpu);
182
183
184 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
185
186set_asid:
187 __set_bit(asid, asid_map);
188 cur_idx = asid;
189 return asid | generation;
190}
191
192void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
193{
194 unsigned long flags;
195 u64 asid;
196
197 asid = atomic64_read(&mm->context.id);
198
199
200
201
202
203
204
205
206 if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
207 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
208 goto switch_mm_fastpath;
209
210 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
211
212 asid = atomic64_read(&mm->context.id);
213 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
214 asid = new_context(mm, cpu);
215 atomic64_set(&mm->context.id, asid);
216 }
217
218 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
219 local_flush_tlb_all();
220
221 atomic64_set(&per_cpu(active_asids, cpu), asid);
222 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
223
224switch_mm_fastpath:
225 cpu_switch_mm(mm->pgd, mm);
226}
227
228static int asids_init(void)
229{
230 asid_bits = get_cpu_asid_bits();
231
232 WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
233 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
234 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
235 GFP_KERNEL);
236 if (!asid_map)
237 panic("Failed to allocate bitmap for %lu ASIDs\n",
238 NUM_USER_ASIDS);
239
240 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
241 return 0;
242}
243early_initcall(asids_init);
244