1
2
3
4
5
6
7
8
9
10
11#include <linux/slab.h>
12#include <linux/mm_types.h>
13
14#include <asm/asid.h>
15
16#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
17
18#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
19#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
20
21#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
22#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
23
24static void flush_context(struct asid_info *info)
25{
26 int i;
27 u64 asid;
28
29
30 bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
31
32 for_each_possible_cpu(i) {
33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
34
35
36
37
38
39
40
41 if (asid == 0)
42 asid = reserved_asid(info, i);
43 __set_bit(asid2idx(info, asid), info->map);
44 reserved_asid(info, i) = asid;
45 }
46
47
48
49
50
51 cpumask_setall(&info->flush_pending);
52}
53
54static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
55 u64 newasid)
56{
57 int cpu;
58 bool hit = false;
59
60
61
62
63
64
65
66
67
68
69 for_each_possible_cpu(cpu) {
70 if (reserved_asid(info, cpu) == asid) {
71 hit = true;
72 reserved_asid(info, cpu) = newasid;
73 }
74 }
75
76 return hit;
77}
78
79static u64 new_context(struct asid_info *info, atomic64_t *pasid,
80 struct mm_struct *mm)
81{
82 static u32 cur_idx = 1;
83 u64 asid = atomic64_read(pasid);
84 u64 generation = atomic64_read(&info->generation);
85
86 if (asid != 0) {
87 u64 newasid = generation | (asid & ~ASID_MASK(info));
88
89
90
91
92
93 if (check_update_reserved_asid(info, asid, newasid))
94 return newasid;
95
96
97
98
99
100 if (!__test_and_set_bit(asid2idx(info, asid), info->map))
101 return newasid;
102 }
103
104
105
106
107
108
109
110
111 asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
112 if (asid != NUM_CTXT_ASIDS(info))
113 goto set_asid;
114
115
116 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
117 &info->generation);
118 flush_context(info);
119
120
121 asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
122
123set_asid:
124 __set_bit(asid, info->map);
125 cur_idx = asid;
126 cpumask_clear(mm_cpumask(mm));
127 return idx2asid(info, asid) | generation;
128}
129
130
131
132
133
134
135
136
137void asid_new_context(struct asid_info *info, atomic64_t *pasid,
138 unsigned int cpu, struct mm_struct *mm)
139{
140 unsigned long flags;
141 u64 asid;
142
143 raw_spin_lock_irqsave(&info->lock, flags);
144
145 asid = atomic64_read(pasid);
146 if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
147 asid = new_context(info, pasid, mm);
148 atomic64_set(pasid, asid);
149 }
150
151 if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
152 info->flush_cpu_ctxt_cb();
153
154 atomic64_set(&active_asid(info, cpu), asid);
155 cpumask_set_cpu(cpu, mm_cpumask(mm));
156 raw_spin_unlock_irqrestore(&info->lock, flags);
157}
158
159
160
161
162
163
164
165
166
167
168int asid_allocator_init(struct asid_info *info,
169 u32 bits, unsigned int asid_per_ctxt,
170 void (*flush_cpu_ctxt_cb)(void))
171{
172 info->bits = bits;
173 info->ctxt_shift = ilog2(asid_per_ctxt);
174 info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
175
176
177
178
179 WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
180 atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
181 info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
182 sizeof(*info->map), GFP_KERNEL);
183 if (!info->map)
184 return -ENOMEM;
185
186 raw_spin_lock_init(&info->lock);
187
188 return 0;
189}
190