1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/iova.h>
21
22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{
25 spin_lock_init(&iovad->iova_rbtree_lock);
26 iovad->rbroot = RB_ROOT;
27 iovad->cached32_node = NULL;
28 iovad->dma_32bit_pfn = pfn_32bit;
29}
30
31static struct rb_node *
32__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
33{
34 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
35 (iovad->cached32_node == NULL))
36 return rb_last(&iovad->rbroot);
37 else {
38 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
39 struct iova *curr_iova =
40 container_of(iovad->cached32_node, struct iova, node);
41 *limit_pfn = curr_iova->pfn_lo - 1;
42 return prev_node;
43 }
44}
45
46static void
47__cached_rbnode_insert_update(struct iova_domain *iovad,
48 unsigned long limit_pfn, struct iova *new)
49{
50 if (limit_pfn != iovad->dma_32bit_pfn)
51 return;
52 iovad->cached32_node = &new->node;
53}
54
55static void
56__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
57{
58 struct iova *cached_iova;
59 struct rb_node *curr;
60
61 if (!iovad->cached32_node)
62 return;
63 curr = iovad->cached32_node;
64 cached_iova = container_of(curr, struct iova, node);
65
66 if (free->pfn_lo >= cached_iova->pfn_lo) {
67 struct rb_node *node = rb_next(&free->node);
68 struct iova *iova = container_of(node, struct iova, node);
69
70
71 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
72 iovad->cached32_node = node;
73 else
74 iovad->cached32_node = NULL;
75 }
76}
77
78
79
80
81static int
82iova_get_pad_size(int size, unsigned int limit_pfn)
83{
84 unsigned int pad_size = 0;
85 unsigned int order = ilog2(size);
86
87 if (order)
88 pad_size = (limit_pfn + 1) % (1 << order);
89
90 return pad_size;
91}
92
93static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
94 unsigned long size, unsigned long limit_pfn,
95 struct iova *new, bool size_aligned)
96{
97 struct rb_node *prev, *curr = NULL;
98 unsigned long flags;
99 unsigned long saved_pfn;
100 unsigned int pad_size = 0;
101
102
103 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
104 saved_pfn = limit_pfn;
105 curr = __get_cached_rbnode(iovad, &limit_pfn);
106 prev = curr;
107 while (curr) {
108 struct iova *curr_iova = container_of(curr, struct iova, node);
109
110 if (limit_pfn < curr_iova->pfn_lo)
111 goto move_left;
112 else if (limit_pfn < curr_iova->pfn_hi)
113 goto adjust_limit_pfn;
114 else {
115 if (size_aligned)
116 pad_size = iova_get_pad_size(size, limit_pfn);
117 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
118 break;
119 }
120adjust_limit_pfn:
121 limit_pfn = curr_iova->pfn_lo - 1;
122move_left:
123 prev = curr;
124 curr = rb_prev(curr);
125 }
126
127 if (!curr) {
128 if (size_aligned)
129 pad_size = iova_get_pad_size(size, limit_pfn);
130 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
131 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
132 return -ENOMEM;
133 }
134 }
135
136
137 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
138 new->pfn_hi = new->pfn_lo + size - 1;
139
140
141
142 {
143 struct rb_node **entry, *parent = NULL;
144
145
146
147 if (prev)
148 entry = &prev;
149 else
150 entry = &iovad->rbroot.rb_node;
151
152
153 while (*entry) {
154 struct iova *this = container_of(*entry,
155 struct iova, node);
156 parent = *entry;
157
158 if (new->pfn_lo < this->pfn_lo)
159 entry = &((*entry)->rb_left);
160 else if (new->pfn_lo > this->pfn_lo)
161 entry = &((*entry)->rb_right);
162 else
163 BUG();
164 }
165
166
167 rb_link_node(&new->node, parent, entry);
168 rb_insert_color(&new->node, &iovad->rbroot);
169 }
170 __cached_rbnode_insert_update(iovad, saved_pfn, new);
171
172 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
173
174
175 return 0;
176}
177
178static void
179iova_insert_rbtree(struct rb_root *root, struct iova *iova)
180{
181 struct rb_node **new = &(root->rb_node), *parent = NULL;
182
183 while (*new) {
184 struct iova *this = container_of(*new, struct iova, node);
185 parent = *new;
186
187 if (iova->pfn_lo < this->pfn_lo)
188 new = &((*new)->rb_left);
189 else if (iova->pfn_lo > this->pfn_lo)
190 new = &((*new)->rb_right);
191 else
192 BUG();
193 }
194
195 rb_link_node(&iova->node, parent, new);
196 rb_insert_color(&iova->node, root);
197}
198
199
200
201
202
203
204
205
206
207
208
209
210struct iova *
211alloc_iova(struct iova_domain *iovad, unsigned long size,
212 unsigned long limit_pfn,
213 bool size_aligned)
214{
215 struct iova *new_iova;
216 int ret;
217
218 new_iova = alloc_iova_mem();
219 if (!new_iova)
220 return NULL;
221
222
223
224
225 if (size_aligned)
226 size = __roundup_pow_of_two(size);
227
228 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
229 new_iova, size_aligned);
230
231 if (ret) {
232 free_iova_mem(new_iova);
233 return NULL;
234 }
235
236 return new_iova;
237}
238
239
240
241
242
243
244
245
246struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
247{
248 unsigned long flags;
249 struct rb_node *node;
250
251
252 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
253 node = iovad->rbroot.rb_node;
254 while (node) {
255 struct iova *iova = container_of(node, struct iova, node);
256
257
258 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
259 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
260
261
262
263
264
265
266 return iova;
267 }
268
269 if (pfn < iova->pfn_lo)
270 node = node->rb_left;
271 else if (pfn > iova->pfn_lo)
272 node = node->rb_right;
273 }
274
275 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
276 return NULL;
277}
278
279
280
281
282
283
284
285void
286__free_iova(struct iova_domain *iovad, struct iova *iova)
287{
288 unsigned long flags;
289
290 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
291 __cached_rbnode_delete_update(iovad, iova);
292 rb_erase(&iova->node, &iovad->rbroot);
293 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
294 free_iova_mem(iova);
295}
296
297
298
299
300
301
302
303
304void
305free_iova(struct iova_domain *iovad, unsigned long pfn)
306{
307 struct iova *iova = find_iova(iovad, pfn);
308 if (iova)
309 __free_iova(iovad, iova);
310
311}
312
313
314
315
316
317
318void put_iova_domain(struct iova_domain *iovad)
319{
320 struct rb_node *node;
321 unsigned long flags;
322
323 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
324 node = rb_first(&iovad->rbroot);
325 while (node) {
326 struct iova *iova = container_of(node, struct iova, node);
327 rb_erase(node, &iovad->rbroot);
328 free_iova_mem(iova);
329 node = rb_first(&iovad->rbroot);
330 }
331 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
332}
333
334static int
335__is_range_overlap(struct rb_node *node,
336 unsigned long pfn_lo, unsigned long pfn_hi)
337{
338 struct iova *iova = container_of(node, struct iova, node);
339
340 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
341 return 1;
342 return 0;
343}
344
345static inline struct iova *
346alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
347{
348 struct iova *iova;
349
350 iova = alloc_iova_mem();
351 if (iova) {
352 iova->pfn_lo = pfn_lo;
353 iova->pfn_hi = pfn_hi;
354 }
355
356 return iova;
357}
358
359static struct iova *
360__insert_new_range(struct iova_domain *iovad,
361 unsigned long pfn_lo, unsigned long pfn_hi)
362{
363 struct iova *iova;
364
365 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
366 if (iova)
367 iova_insert_rbtree(&iovad->rbroot, iova);
368
369 return iova;
370}
371
372static void
373__adjust_overlap_range(struct iova *iova,
374 unsigned long *pfn_lo, unsigned long *pfn_hi)
375{
376 if (*pfn_lo < iova->pfn_lo)
377 iova->pfn_lo = *pfn_lo;
378 if (*pfn_hi > iova->pfn_hi)
379 *pfn_lo = iova->pfn_hi + 1;
380}
381
382
383
384
385
386
387
388
389
390struct iova *
391reserve_iova(struct iova_domain *iovad,
392 unsigned long pfn_lo, unsigned long pfn_hi)
393{
394 struct rb_node *node;
395 unsigned long flags;
396 struct iova *iova;
397 unsigned int overlap = 0;
398
399 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
400 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
401 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
402 iova = container_of(node, struct iova, node);
403 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
404 if ((pfn_lo >= iova->pfn_lo) &&
405 (pfn_hi <= iova->pfn_hi))
406 goto finish;
407 overlap = 1;
408
409 } else if (overlap)
410 break;
411 }
412
413
414
415
416 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
417finish:
418
419 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
420 return iova;
421}
422
423
424
425
426
427
428
429
430void
431copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
432{
433 unsigned long flags;
434 struct rb_node *node;
435
436 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
437 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
438 struct iova *iova = container_of(node, struct iova, node);
439 struct iova *new_iova;
440 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
441 if (!new_iova)
442 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
443 iova->pfn_lo, iova->pfn_lo);
444 }
445 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
446}
447
448struct iova *
449split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
450 unsigned long pfn_lo, unsigned long pfn_hi)
451{
452 unsigned long flags;
453 struct iova *prev = NULL, *next = NULL;
454
455 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
456 if (iova->pfn_lo < pfn_lo) {
457 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
458 if (prev == NULL)
459 goto error;
460 }
461 if (iova->pfn_hi > pfn_hi) {
462 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
463 if (next == NULL)
464 goto error;
465 }
466
467 __cached_rbnode_delete_update(iovad, iova);
468 rb_erase(&iova->node, &iovad->rbroot);
469
470 if (prev) {
471 iova_insert_rbtree(&iovad->rbroot, prev);
472 iova->pfn_lo = pfn_lo;
473 }
474 if (next) {
475 iova_insert_rbtree(&iovad->rbroot, next);
476 iova->pfn_hi = pfn_hi;
477 }
478 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
479
480 return iova;
481
482error:
483 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
484 if (prev)
485 free_iova_mem(prev);
486 return NULL;
487}
488