1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/iova.h>
21
22void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
24{
25 spin_lock_init(&iovad->iova_rbtree_lock);
26 iovad->rbroot = RB_ROOT;
27 iovad->cached32_node = NULL;
28 iovad->dma_32bit_pfn = pfn_32bit;
29}
30
31static struct rb_node *
32__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
33{
34 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
35 (iovad->cached32_node == NULL))
36 return rb_last(&iovad->rbroot);
37 else {
38 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
39 struct iova *curr_iova =
40 container_of(iovad->cached32_node, struct iova, node);
41 *limit_pfn = curr_iova->pfn_lo - 1;
42 return prev_node;
43 }
44}
45
46static void
47__cached_rbnode_insert_update(struct iova_domain *iovad,
48 unsigned long limit_pfn, struct iova *new)
49{
50 if (limit_pfn != iovad->dma_32bit_pfn)
51 return;
52 iovad->cached32_node = &new->node;
53}
54
55static void
56__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
57{
58 struct iova *cached_iova;
59 struct rb_node *curr;
60
61 if (!iovad->cached32_node)
62 return;
63 curr = iovad->cached32_node;
64 cached_iova = container_of(curr, struct iova, node);
65
66 if (free->pfn_lo >= cached_iova->pfn_lo)
67 iovad->cached32_node = rb_next(&free->node);
68}
69
70
71
72
73static int
74iova_get_pad_size(int size, unsigned int limit_pfn)
75{
76 unsigned int pad_size = 0;
77 unsigned int order = ilog2(size);
78
79 if (order)
80 pad_size = (limit_pfn + 1) % (1 << order);
81
82 return pad_size;
83}
84
85static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
86 unsigned long size, unsigned long limit_pfn,
87 struct iova *new, bool size_aligned)
88{
89 struct rb_node *prev, *curr = NULL;
90 unsigned long flags;
91 unsigned long saved_pfn;
92 unsigned int pad_size = 0;
93
94
95 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
96 saved_pfn = limit_pfn;
97 curr = __get_cached_rbnode(iovad, &limit_pfn);
98 prev = curr;
99 while (curr) {
100 struct iova *curr_iova = container_of(curr, struct iova, node);
101
102 if (limit_pfn < curr_iova->pfn_lo)
103 goto move_left;
104 else if (limit_pfn < curr_iova->pfn_hi)
105 goto adjust_limit_pfn;
106 else {
107 if (size_aligned)
108 pad_size = iova_get_pad_size(size, limit_pfn);
109 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
110 break;
111 }
112adjust_limit_pfn:
113 limit_pfn = curr_iova->pfn_lo - 1;
114move_left:
115 prev = curr;
116 curr = rb_prev(curr);
117 }
118
119 if (!curr) {
120 if (size_aligned)
121 pad_size = iova_get_pad_size(size, limit_pfn);
122 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
123 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
124 return -ENOMEM;
125 }
126 }
127
128
129 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
130 new->pfn_hi = new->pfn_lo + size - 1;
131
132
133
134 {
135 struct rb_node **entry, *parent = NULL;
136
137
138
139 if (prev)
140 entry = &prev;
141 else
142 entry = &iovad->rbroot.rb_node;
143
144
145 while (*entry) {
146 struct iova *this = container_of(*entry,
147 struct iova, node);
148 parent = *entry;
149
150 if (new->pfn_lo < this->pfn_lo)
151 entry = &((*entry)->rb_left);
152 else if (new->pfn_lo > this->pfn_lo)
153 entry = &((*entry)->rb_right);
154 else
155 BUG();
156 }
157
158
159 rb_link_node(&new->node, parent, entry);
160 rb_insert_color(&new->node, &iovad->rbroot);
161 }
162 __cached_rbnode_insert_update(iovad, saved_pfn, new);
163
164 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
165
166
167 return 0;
168}
169
170static void
171iova_insert_rbtree(struct rb_root *root, struct iova *iova)
172{
173 struct rb_node **new = &(root->rb_node), *parent = NULL;
174
175 while (*new) {
176 struct iova *this = container_of(*new, struct iova, node);
177 parent = *new;
178
179 if (iova->pfn_lo < this->pfn_lo)
180 new = &((*new)->rb_left);
181 else if (iova->pfn_lo > this->pfn_lo)
182 new = &((*new)->rb_right);
183 else
184 BUG();
185 }
186
187 rb_link_node(&iova->node, parent, new);
188 rb_insert_color(&iova->node, root);
189}
190
191
192
193
194
195
196
197
198
199
200
201
202struct iova *
203alloc_iova(struct iova_domain *iovad, unsigned long size,
204 unsigned long limit_pfn,
205 bool size_aligned)
206{
207 struct iova *new_iova;
208 int ret;
209
210 new_iova = alloc_iova_mem();
211 if (!new_iova)
212 return NULL;
213
214
215
216
217 if (size_aligned)
218 size = __roundup_pow_of_two(size);
219
220 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
221 new_iova, size_aligned);
222
223 if (ret) {
224 free_iova_mem(new_iova);
225 return NULL;
226 }
227
228 return new_iova;
229}
230
231
232
233
234
235
236
237
238struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
239{
240 unsigned long flags;
241 struct rb_node *node;
242
243
244 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
245 node = iovad->rbroot.rb_node;
246 while (node) {
247 struct iova *iova = container_of(node, struct iova, node);
248
249
250 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
251 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
252
253
254
255
256
257
258 return iova;
259 }
260
261 if (pfn < iova->pfn_lo)
262 node = node->rb_left;
263 else if (pfn > iova->pfn_lo)
264 node = node->rb_right;
265 }
266
267 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
268 return NULL;
269}
270
271
272
273
274
275
276
277void
278__free_iova(struct iova_domain *iovad, struct iova *iova)
279{
280 unsigned long flags;
281
282 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
283 __cached_rbnode_delete_update(iovad, iova);
284 rb_erase(&iova->node, &iovad->rbroot);
285 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
286 free_iova_mem(iova);
287}
288
289
290
291
292
293
294
295
296void
297free_iova(struct iova_domain *iovad, unsigned long pfn)
298{
299 struct iova *iova = find_iova(iovad, pfn);
300 if (iova)
301 __free_iova(iovad, iova);
302
303}
304
305
306
307
308
309
310void put_iova_domain(struct iova_domain *iovad)
311{
312 struct rb_node *node;
313 unsigned long flags;
314
315 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
316 node = rb_first(&iovad->rbroot);
317 while (node) {
318 struct iova *iova = container_of(node, struct iova, node);
319 rb_erase(node, &iovad->rbroot);
320 free_iova_mem(iova);
321 node = rb_first(&iovad->rbroot);
322 }
323 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
324}
325
326static int
327__is_range_overlap(struct rb_node *node,
328 unsigned long pfn_lo, unsigned long pfn_hi)
329{
330 struct iova *iova = container_of(node, struct iova, node);
331
332 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
333 return 1;
334 return 0;
335}
336
337static struct iova *
338__insert_new_range(struct iova_domain *iovad,
339 unsigned long pfn_lo, unsigned long pfn_hi)
340{
341 struct iova *iova;
342
343 iova = alloc_iova_mem();
344 if (!iova)
345 return iova;
346
347 iova->pfn_hi = pfn_hi;
348 iova->pfn_lo = pfn_lo;
349 iova_insert_rbtree(&iovad->rbroot, iova);
350 return iova;
351}
352
353static void
354__adjust_overlap_range(struct iova *iova,
355 unsigned long *pfn_lo, unsigned long *pfn_hi)
356{
357 if (*pfn_lo < iova->pfn_lo)
358 iova->pfn_lo = *pfn_lo;
359 if (*pfn_hi > iova->pfn_hi)
360 *pfn_lo = iova->pfn_hi + 1;
361}
362
363
364
365
366
367
368
369
370
371struct iova *
372reserve_iova(struct iova_domain *iovad,
373 unsigned long pfn_lo, unsigned long pfn_hi)
374{
375 struct rb_node *node;
376 unsigned long flags;
377 struct iova *iova;
378 unsigned int overlap = 0;
379
380 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
381 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
382 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
383 iova = container_of(node, struct iova, node);
384 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
385 if ((pfn_lo >= iova->pfn_lo) &&
386 (pfn_hi <= iova->pfn_hi))
387 goto finish;
388 overlap = 1;
389
390 } else if (overlap)
391 break;
392 }
393
394
395
396
397 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
398finish:
399
400 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
401 return iova;
402}
403
404
405
406
407
408
409
410
411void
412copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
413{
414 unsigned long flags;
415 struct rb_node *node;
416
417 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
418 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
419 struct iova *iova = container_of(node, struct iova, node);
420 struct iova *new_iova;
421 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
422 if (!new_iova)
423 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
424 iova->pfn_lo, iova->pfn_lo);
425 }
426 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
427}
428