1
2
3
4
5
6
7#include <linux/ioasid.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/xarray.h>
12
13struct ioasid_data {
14 ioasid_t id;
15 struct ioasid_set *set;
16 void *private;
17 struct rcu_head rcu;
18};
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48struct ioasid_allocator_data {
49 struct ioasid_allocator_ops *ops;
50 struct list_head list;
51 struct list_head slist;
52#define IOASID_ALLOCATOR_CUSTOM BIT(0)
53 unsigned long flags;
54 struct xarray xa;
55 struct rcu_head rcu;
56};
57
58static DEFINE_SPINLOCK(ioasid_allocator_lock);
59static LIST_HEAD(allocators_list);
60
61static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
62static void default_free(ioasid_t ioasid, void *opaque);
63
64static struct ioasid_allocator_ops default_ops = {
65 .alloc = default_alloc,
66 .free = default_free,
67};
68
69static struct ioasid_allocator_data default_allocator = {
70 .ops = &default_ops,
71 .flags = 0,
72 .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
73};
74
75static struct ioasid_allocator_data *active_allocator = &default_allocator;
76
77static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
78{
79 ioasid_t id;
80
81 if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
82 pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
83 return INVALID_IOASID;
84 }
85
86 return id;
87}
88
89static void default_free(ioasid_t ioasid, void *opaque)
90{
91 struct ioasid_data *ioasid_data;
92
93 ioasid_data = xa_erase(&default_allocator.xa, ioasid);
94 kfree_rcu(ioasid_data, rcu);
95}
96
97
98static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
99{
100 struct ioasid_allocator_data *ia_data;
101
102 ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
103 if (!ia_data)
104 return NULL;
105
106 xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
107 INIT_LIST_HEAD(&ia_data->slist);
108 ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
109 ia_data->ops = ops;
110
111
112 list_add_tail(&ops->list, &ia_data->slist);
113
114 return ia_data;
115}
116
117static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
118{
119 return (a->free == b->free) && (a->alloc == b->alloc);
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
139{
140 struct ioasid_allocator_data *ia_data;
141 struct ioasid_allocator_data *pallocator;
142 int ret = 0;
143
144 spin_lock(&ioasid_allocator_lock);
145
146 ia_data = ioasid_alloc_allocator(ops);
147 if (!ia_data) {
148 ret = -ENOMEM;
149 goto out_unlock;
150 }
151
152
153
154
155
156
157 if (list_empty(&allocators_list)) {
158 WARN_ON(active_allocator != &default_allocator);
159
160 if (xa_empty(&active_allocator->xa)) {
161 rcu_assign_pointer(active_allocator, ia_data);
162 list_add_tail(&ia_data->list, &allocators_list);
163 goto out_unlock;
164 }
165 pr_warn("Default allocator active with outstanding IOASID\n");
166 ret = -EAGAIN;
167 goto out_free;
168 }
169
170
171 list_for_each_entry(pallocator, &allocators_list, list) {
172 if (pallocator->ops == ops) {
173 pr_err("IOASID allocator already registered\n");
174 ret = -EEXIST;
175 goto out_free;
176 } else if (use_same_ops(pallocator->ops, ops)) {
177
178
179
180
181
182 list_add_tail(&ops->list, &pallocator->slist);
183 goto out_free;
184 }
185 }
186 list_add_tail(&ia_data->list, &allocators_list);
187
188 spin_unlock(&ioasid_allocator_lock);
189 return 0;
190out_free:
191 kfree(ia_data);
192out_unlock:
193 spin_unlock(&ioasid_allocator_lock);
194 return ret;
195}
196EXPORT_SYMBOL_GPL(ioasid_register_allocator);
197
198
199
200
201
202
203
204
205
206void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
207{
208 struct ioasid_allocator_data *pallocator;
209 struct ioasid_allocator_ops *sops;
210
211 spin_lock(&ioasid_allocator_lock);
212 if (list_empty(&allocators_list)) {
213 pr_warn("No custom IOASID allocators active!\n");
214 goto exit_unlock;
215 }
216
217 list_for_each_entry(pallocator, &allocators_list, list) {
218 if (!use_same_ops(pallocator->ops, ops))
219 continue;
220
221 if (list_is_singular(&pallocator->slist)) {
222
223 list_del(&pallocator->list);
224
225
226
227
228
229 WARN_ON(!xa_empty(&pallocator->xa));
230 if (list_empty(&allocators_list)) {
231 pr_info("No custom IOASID allocators, switch to default.\n");
232 rcu_assign_pointer(active_allocator, &default_allocator);
233 } else if (pallocator == active_allocator) {
234 rcu_assign_pointer(active_allocator,
235 list_first_entry(&allocators_list,
236 struct ioasid_allocator_data, list));
237 pr_info("IOASID allocator changed");
238 }
239 kfree_rcu(pallocator, rcu);
240 break;
241 }
242
243
244
245
246 list_for_each_entry(sops, &pallocator->slist, list) {
247 if (sops == ops) {
248 list_del(&ops->list);
249 break;
250 }
251 }
252 break;
253 }
254
255exit_unlock:
256 spin_unlock(&ioasid_allocator_lock);
257}
258EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
259
260
261
262
263
264
265
266
267
268int ioasid_set_data(ioasid_t ioasid, void *data)
269{
270 struct ioasid_data *ioasid_data;
271 int ret = 0;
272
273 spin_lock(&ioasid_allocator_lock);
274 ioasid_data = xa_load(&active_allocator->xa, ioasid);
275 if (ioasid_data)
276 rcu_assign_pointer(ioasid_data->private, data);
277 else
278 ret = -ENOENT;
279 spin_unlock(&ioasid_allocator_lock);
280
281
282
283
284
285 if (!ret)
286 synchronize_rcu();
287
288 return ret;
289}
290EXPORT_SYMBOL_GPL(ioasid_set_data);
291
292
293
294
295
296
297
298
299
300
301
302
303
304ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
305 void *private)
306{
307 struct ioasid_data *data;
308 void *adata;
309 ioasid_t id;
310
311 data = kzalloc(sizeof(*data), GFP_ATOMIC);
312 if (!data)
313 return INVALID_IOASID;
314
315 data->set = set;
316 data->private = private;
317
318
319
320
321
322 spin_lock(&ioasid_allocator_lock);
323 adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
324 id = active_allocator->ops->alloc(min, max, adata);
325 if (id == INVALID_IOASID) {
326 pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
327 goto exit_free;
328 }
329
330 if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
331 xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
332
333 pr_err("Failed to alloc ioasid from %d\n", id);
334 active_allocator->ops->free(id, active_allocator->ops->pdata);
335 goto exit_free;
336 }
337 data->id = id;
338
339 spin_unlock(&ioasid_allocator_lock);
340 return id;
341exit_free:
342 spin_unlock(&ioasid_allocator_lock);
343 kfree(data);
344 return INVALID_IOASID;
345}
346EXPORT_SYMBOL_GPL(ioasid_alloc);
347
348
349
350
351
352void ioasid_free(ioasid_t ioasid)
353{
354 struct ioasid_data *ioasid_data;
355
356 spin_lock(&ioasid_allocator_lock);
357 ioasid_data = xa_load(&active_allocator->xa, ioasid);
358 if (!ioasid_data) {
359 pr_err("Trying to free unknown IOASID %u\n", ioasid);
360 goto exit_unlock;
361 }
362
363 active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
364
365 if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
366 ioasid_data = xa_erase(&active_allocator->xa, ioasid);
367 kfree_rcu(ioasid_data, rcu);
368 }
369
370exit_unlock:
371 spin_unlock(&ioasid_allocator_lock);
372}
373EXPORT_SYMBOL_GPL(ioasid_free);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
390 bool (*getter)(void *))
391{
392 void *priv;
393 struct ioasid_data *ioasid_data;
394 struct ioasid_allocator_data *idata;
395
396 rcu_read_lock();
397 idata = rcu_dereference(active_allocator);
398 ioasid_data = xa_load(&idata->xa, ioasid);
399 if (!ioasid_data) {
400 priv = ERR_PTR(-ENOENT);
401 goto unlock;
402 }
403 if (set && ioasid_data->set != set) {
404
405 priv = ERR_PTR(-EACCES);
406 goto unlock;
407 }
408
409 priv = rcu_dereference(ioasid_data->private);
410 if (getter && !getter(priv))
411 priv = NULL;
412unlock:
413 rcu_read_unlock();
414
415 return priv;
416}
417EXPORT_SYMBOL_GPL(ioasid_find);
418
419MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
420MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
421MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
422MODULE_LICENSE("GPL");
423