1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/percpu.h>
18
19#include <asm/mmu_context.h>
20#include <asm/smp_plat.h>
21#include <asm/thread_notify.h>
22#include <asm/tlbflush.h>
23#include <asm/proc-fns.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43#define NUM_USER_ASIDS ASID_FIRST_VERSION
44
45static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
47static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48
49static DEFINE_PER_CPU(atomic64_t, active_asids);
50static DEFINE_PER_CPU(u64, reserved_asids);
51static cpumask_t tlb_flush_pending;
52
53#ifdef CONFIG_ARM_ERRATA_798181
54void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
55 cpumask_t *mask)
56{
57 int cpu;
58 unsigned long flags;
59 u64 context_id, asid;
60
61 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
62 context_id = mm->context.id.counter;
63 for_each_online_cpu(cpu) {
64 if (cpu == this_cpu)
65 continue;
66
67
68
69
70 asid = per_cpu(active_asids, cpu).counter;
71 if (asid == 0)
72 asid = per_cpu(reserved_asids, cpu);
73 if (context_id == asid)
74 cpumask_set_cpu(cpu, mask);
75 }
76 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
77}
78#endif
79
80#ifdef CONFIG_ARM_LPAE
81
82
83
84
85
86#define cpu_set_reserved_ttbr0()
87#else
88static void cpu_set_reserved_ttbr0(void)
89{
90 u32 ttb;
91
92
93
94
95
96 asm volatile(
97 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
98 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
99 : "=r" (ttb));
100 isb();
101}
102#endif
103
104#ifdef CONFIG_PID_IN_CONTEXTIDR
105static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
106 void *t)
107{
108 u32 contextidr;
109 pid_t pid;
110 struct thread_info *thread = t;
111
112 if (cmd != THREAD_NOTIFY_SWITCH)
113 return NOTIFY_DONE;
114
115 pid = task_pid_nr(thread->task) << ASID_BITS;
116 asm volatile(
117 " mrc p15, 0, %0, c13, c0, 1\n"
118 " and %0, %0, %2\n"
119 " orr %0, %0, %1\n"
120 " mcr p15, 0, %0, c13, c0, 1\n"
121 : "=r" (contextidr), "+r" (pid)
122 : "I" (~ASID_MASK));
123 isb();
124
125 return NOTIFY_OK;
126}
127
128static struct notifier_block contextidr_notifier_block = {
129 .notifier_call = contextidr_notifier,
130};
131
132static int __init contextidr_notifier_init(void)
133{
134 return thread_register_notifier(&contextidr_notifier_block);
135}
136arch_initcall(contextidr_notifier_init);
137#endif
138
139static void flush_context(unsigned int cpu)
140{
141 int i;
142 u64 asid;
143
144
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) {
147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
148
149
150
151
152
153
154
155 if (asid == 0)
156 asid = per_cpu(reserved_asids, i);
157 __set_bit(asid & ~ASID_MASK, asid_map);
158 per_cpu(reserved_asids, i) = asid;
159 }
160
161
162 cpumask_setall(&tlb_flush_pending);
163
164 if (icache_is_vivt_asid_tagged())
165 __flush_icache_all();
166}
167
168static int is_reserved_asid(u64 asid)
169{
170 int cpu;
171 for_each_possible_cpu(cpu)
172 if (per_cpu(reserved_asids, cpu) == asid)
173 return 1;
174 return 0;
175}
176
177static u64 new_context(struct mm_struct *mm, unsigned int cpu)
178{
179 static u32 cur_idx = 1;
180 u64 asid = atomic64_read(&mm->context.id);
181 u64 generation = atomic64_read(&asid_generation);
182
183 if (asid != 0) {
184
185
186
187
188 if (is_reserved_asid(asid))
189 return generation | (asid & ~ASID_MASK);
190
191
192
193
194
195 asid &= ~ASID_MASK;
196 if (!__test_and_set_bit(asid, asid_map))
197 goto bump_gen;
198 }
199
200
201
202
203
204
205
206
207
208
209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
210 if (asid == NUM_USER_ASIDS) {
211 generation = atomic64_add_return(ASID_FIRST_VERSION,
212 &asid_generation);
213 flush_context(cpu);
214 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
215 }
216
217 __set_bit(asid, asid_map);
218 cur_idx = asid;
219
220bump_gen:
221 asid |= generation;
222 cpumask_clear(mm_cpumask(mm));
223 return asid;
224}
225
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
227{
228 unsigned long flags;
229 unsigned int cpu = smp_processor_id();
230 u64 asid;
231
232 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
233 __check_vmalloc_seq(mm);
234
235
236
237
238
239
240 cpu_set_reserved_ttbr0();
241
242 asid = atomic64_read(&mm->context.id);
243 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
244 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
245 goto switch_mm_fastpath;
246
247 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
248
249 asid = atomic64_read(&mm->context.id);
250 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
251 asid = new_context(mm, cpu);
252 atomic64_set(&mm->context.id, asid);
253 }
254
255 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
256 local_flush_bp_all();
257 local_flush_tlb_all();
258 }
259
260 atomic64_set(&per_cpu(active_asids, cpu), asid);
261 cpumask_set_cpu(cpu, mm_cpumask(mm));
262 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
263
264switch_mm_fastpath:
265 cpu_switch_mm(mm->pgd, mm);
266}
267