1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/percpu.h>
18
19#include <asm/mmu_context.h>
20#include <asm/smp_plat.h>
21#include <asm/thread_notify.h>
22#include <asm/tlbflush.h>
23#include <asm/proc-fns.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43#define NUM_USER_ASIDS ASID_FIRST_VERSION
44
45static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
47static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48
49static DEFINE_PER_CPU(atomic64_t, active_asids);
50static DEFINE_PER_CPU(u64, reserved_asids);
51static cpumask_t tlb_flush_pending;
52
53#ifdef CONFIG_ARM_ERRATA_798181
54void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
55 cpumask_t *mask)
56{
57 int cpu;
58 unsigned long flags;
59 u64 context_id, asid;
60
61 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
62 context_id = mm->context.id.counter;
63 for_each_online_cpu(cpu) {
64 if (cpu == this_cpu)
65 continue;
66
67
68
69
70 asid = per_cpu(active_asids, cpu).counter;
71 if (asid == 0)
72 asid = per_cpu(reserved_asids, cpu);
73 if (context_id == asid)
74 cpumask_set_cpu(cpu, mask);
75 }
76 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
77}
78#endif
79
80#ifdef CONFIG_ARM_LPAE
81static void cpu_set_reserved_ttbr0(void)
82{
83
84
85
86
87 cpu_set_ttbr(0, __pa(swapper_pg_dir));
88 isb();
89}
90#else
91static void cpu_set_reserved_ttbr0(void)
92{
93 u32 ttb;
94
95 asm volatile(
96 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
97 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
98 : "=r" (ttb));
99 isb();
100}
101#endif
102
103#ifdef CONFIG_PID_IN_CONTEXTIDR
104static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
105 void *t)
106{
107 u32 contextidr;
108 pid_t pid;
109 struct thread_info *thread = t;
110
111 if (cmd != THREAD_NOTIFY_SWITCH)
112 return NOTIFY_DONE;
113
114 pid = task_pid_nr(thread->task) << ASID_BITS;
115 asm volatile(
116 " mrc p15, 0, %0, c13, c0, 1\n"
117 " and %0, %0, %2\n"
118 " orr %0, %0, %1\n"
119 " mcr p15, 0, %0, c13, c0, 1\n"
120 : "=r" (contextidr), "+r" (pid)
121 : "I" (~ASID_MASK));
122 isb();
123
124 return NOTIFY_OK;
125}
126
127static struct notifier_block contextidr_notifier_block = {
128 .notifier_call = contextidr_notifier,
129};
130
131static int __init contextidr_notifier_init(void)
132{
133 return thread_register_notifier(&contextidr_notifier_block);
134}
135arch_initcall(contextidr_notifier_init);
136#endif
137
138static void flush_context(unsigned int cpu)
139{
140 int i;
141 u64 asid;
142
143
144 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
145 for_each_possible_cpu(i) {
146 if (i == cpu) {
147 asid = 0;
148 } else {
149 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
150
151
152
153
154
155
156
157 if (asid == 0)
158 asid = per_cpu(reserved_asids, i);
159 __set_bit(asid & ~ASID_MASK, asid_map);
160 }
161 per_cpu(reserved_asids, i) = asid;
162 }
163
164
165 if (!tlb_ops_need_broadcast())
166 cpumask_set_cpu(cpu, &tlb_flush_pending);
167 else
168 cpumask_setall(&tlb_flush_pending);
169
170 if (icache_is_vivt_asid_tagged())
171 __flush_icache_all();
172}
173
174static int is_reserved_asid(u64 asid)
175{
176 int cpu;
177 for_each_possible_cpu(cpu)
178 if (per_cpu(reserved_asids, cpu) == asid)
179 return 1;
180 return 0;
181}
182
183static u64 new_context(struct mm_struct *mm, unsigned int cpu)
184{
185 u64 asid = atomic64_read(&mm->context.id);
186 u64 generation = atomic64_read(&asid_generation);
187
188 if (asid != 0 && is_reserved_asid(asid)) {
189
190
191
192
193 asid = generation | (asid & ~ASID_MASK);
194 } else {
195
196
197
198
199
200
201
202 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
203 if (asid == NUM_USER_ASIDS) {
204 generation = atomic64_add_return(ASID_FIRST_VERSION,
205 &asid_generation);
206 flush_context(cpu);
207 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
208 }
209 __set_bit(asid, asid_map);
210 asid |= generation;
211 cpumask_clear(mm_cpumask(mm));
212 }
213
214 return asid;
215}
216
217void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
218{
219 unsigned long flags;
220 unsigned int cpu = smp_processor_id();
221 u64 asid;
222
223 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
224 __check_vmalloc_seq(mm);
225
226
227
228
229
230 cpu_set_reserved_ttbr0();
231
232 asid = atomic64_read(&mm->context.id);
233 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
234 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
235 goto switch_mm_fastpath;
236
237 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
238
239 asid = atomic64_read(&mm->context.id);
240 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
241 asid = new_context(mm, cpu);
242 atomic64_set(&mm->context.id, asid);
243 }
244
245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
246 local_flush_bp_all();
247 local_flush_tlb_all();
248 if (erratum_a15_798181())
249 dummy_flush_tlb_a15_erratum();
250 }
251
252 atomic64_set(&per_cpu(active_asids, cpu), asid);
253 cpumask_set_cpu(cpu, mm_cpumask(mm));
254 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
255
256switch_mm_fastpath:
257 cpu_switch_mm(mm->pgd, mm);
258}
259