1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <drm/drmP.h>
46#include <drm/drm_mm.h>
47#include <linux/slab.h>
48#include <linux/seq_file.h>
49#include <linux/export.h>
50#include <linux/interval_tree_generic.h>
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100#ifdef CONFIG_DRM_DEBUG_MM
101#include <linux/stackdepot.h>
102
103#define STACKDEPTH 32
104#define BUFSZ 4096
105
106static noinline void save_stack(struct drm_mm_node *node)
107{
108 unsigned long entries[STACKDEPTH];
109 struct stack_trace trace = {
110 .entries = entries,
111 .max_entries = STACKDEPTH,
112 .skip = 1
113 };
114
115 save_stack_trace(&trace);
116 if (trace.nr_entries != 0 &&
117 trace.entries[trace.nr_entries-1] == ULONG_MAX)
118 trace.nr_entries--;
119
120
121 node->stack = depot_save_stack(&trace, GFP_NOWAIT);
122}
123
124static void show_leaks(struct drm_mm *mm)
125{
126 struct drm_mm_node *node;
127 unsigned long entries[STACKDEPTH];
128 char *buf;
129
130 buf = kmalloc(BUFSZ, GFP_KERNEL);
131 if (!buf)
132 return;
133
134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 struct stack_trace trace = {
136 .entries = entries,
137 .max_entries = STACKDEPTH
138 };
139
140 if (!node->stack) {
141 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
142 node->start, node->size);
143 continue;
144 }
145
146 depot_fetch_stack(node->stack, &trace);
147 snprint_stack_trace(buf, BUFSZ, &trace, 0);
148 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
149 node->start, node->size, buf);
150 }
151
152 kfree(buf);
153}
154
155#undef STACKDEPTH
156#undef BUFSZ
157#else
158static void save_stack(struct drm_mm_node *node) { }
159static void show_leaks(struct drm_mm *mm) { }
160#endif
161
162#define START(node) ((node)->start)
163#define LAST(node) ((node)->start + (node)->size - 1)
164
165INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
166 u64, __subtree_last,
167 START, LAST, static inline, drm_mm_interval_tree)
168
169struct drm_mm_node *
170__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
171{
172 return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
173 start, last) ?: (struct drm_mm_node *)&mm->head_node;
174}
175EXPORT_SYMBOL(__drm_mm_interval_first);
176
177static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
178 struct drm_mm_node *node)
179{
180 struct drm_mm *mm = hole_node->mm;
181 struct rb_node **link, *rb;
182 struct drm_mm_node *parent;
183
184 node->__subtree_last = LAST(node);
185
186 if (hole_node->allocated) {
187 rb = &hole_node->rb;
188 while (rb) {
189 parent = rb_entry(rb, struct drm_mm_node, rb);
190 if (parent->__subtree_last >= node->__subtree_last)
191 break;
192
193 parent->__subtree_last = node->__subtree_last;
194 rb = rb_parent(rb);
195 }
196
197 rb = &hole_node->rb;
198 link = &hole_node->rb.rb_right;
199 } else {
200 rb = NULL;
201 link = &mm->interval_tree.rb_node;
202 }
203
204 while (*link) {
205 rb = *link;
206 parent = rb_entry(rb, struct drm_mm_node, rb);
207 if (parent->__subtree_last < node->__subtree_last)
208 parent->__subtree_last = node->__subtree_last;
209 if (node->start < parent->start)
210 link = &parent->rb.rb_left;
211 else
212 link = &parent->rb.rb_right;
213 }
214
215 rb_link_node(&node->rb, rb, link);
216 rb_insert_augmented(&node->rb,
217 &mm->interval_tree,
218 &drm_mm_interval_tree_augment);
219}
220
221#define RB_INSERT(root, member, expr) do { \
222 struct rb_node **link = &root.rb_node, *rb = NULL; \
223 u64 x = expr(node); \
224 while (*link) { \
225 rb = *link; \
226 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
227 link = &rb->rb_left; \
228 else \
229 link = &rb->rb_right; \
230 } \
231 rb_link_node(&node->member, rb, link); \
232 rb_insert_color(&node->member, &root); \
233} while (0)
234
235#define HOLE_SIZE(NODE) ((NODE)->hole_size)
236#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
237
238static void add_hole(struct drm_mm_node *node)
239{
240 struct drm_mm *mm = node->mm;
241
242 node->hole_size =
243 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
244 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
245
246 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
247 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
248
249 list_add(&node->hole_stack, &mm->hole_stack);
250}
251
252static void rm_hole(struct drm_mm_node *node)
253{
254 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
255
256 list_del(&node->hole_stack);
257 rb_erase(&node->rb_hole_size, &node->mm->holes_size);
258 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
259 node->hole_size = 0;
260
261 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
262}
263
264static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
265{
266 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
267}
268
269static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
270{
271 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
272}
273
274static inline u64 rb_hole_size(struct rb_node *rb)
275{
276 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
277}
278
279static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
280{
281 struct rb_node *best = NULL;
282 struct rb_node **link = &mm->holes_size.rb_node;
283
284 while (*link) {
285 struct rb_node *rb = *link;
286
287 if (size <= rb_hole_size(rb)) {
288 link = &rb->rb_left;
289 best = rb;
290 } else {
291 link = &rb->rb_right;
292 }
293 }
294
295 return rb_hole_size_to_node(best);
296}
297
298static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
299{
300 struct drm_mm_node *node = NULL;
301 struct rb_node **link = &mm->holes_addr.rb_node;
302
303 while (*link) {
304 u64 hole_start;
305
306 node = rb_hole_addr_to_node(*link);
307 hole_start = __drm_mm_hole_node_start(node);
308
309 if (addr < hole_start)
310 link = &node->rb_hole_addr.rb_left;
311 else if (addr > hole_start + node->hole_size)
312 link = &node->rb_hole_addr.rb_right;
313 else
314 break;
315 }
316
317 return node;
318}
319
320static struct drm_mm_node *
321first_hole(struct drm_mm *mm,
322 u64 start, u64 end, u64 size,
323 enum drm_mm_insert_mode mode)
324{
325 if (RB_EMPTY_ROOT(&mm->holes_size))
326 return NULL;
327
328 switch (mode) {
329 default:
330 case DRM_MM_INSERT_BEST:
331 return best_hole(mm, size);
332
333 case DRM_MM_INSERT_LOW:
334 return find_hole(mm, start);
335
336 case DRM_MM_INSERT_HIGH:
337 return find_hole(mm, end);
338
339 case DRM_MM_INSERT_EVICT:
340 return list_first_entry_or_null(&mm->hole_stack,
341 struct drm_mm_node,
342 hole_stack);
343 }
344}
345
346static struct drm_mm_node *
347next_hole(struct drm_mm *mm,
348 struct drm_mm_node *node,
349 enum drm_mm_insert_mode mode)
350{
351 switch (mode) {
352 default:
353 case DRM_MM_INSERT_BEST:
354 return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
355
356 case DRM_MM_INSERT_LOW:
357 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
358
359 case DRM_MM_INSERT_HIGH:
360 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
361
362 case DRM_MM_INSERT_EVICT:
363 node = list_next_entry(node, hole_stack);
364 return &node->hole_stack == &mm->hole_stack ? NULL : node;
365 }
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
383{
384 u64 end = node->start + node->size;
385 struct drm_mm_node *hole;
386 u64 hole_start, hole_end;
387 u64 adj_start, adj_end;
388
389 end = node->start + node->size;
390 if (unlikely(end <= node->start))
391 return -ENOSPC;
392
393
394 hole = find_hole(mm, node->start);
395 if (!hole)
396 return -ENOSPC;
397
398 adj_start = hole_start = __drm_mm_hole_node_start(hole);
399 adj_end = hole_end = hole_start + hole->hole_size;
400
401 if (mm->color_adjust)
402 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
403
404 if (adj_start > node->start || adj_end < end)
405 return -ENOSPC;
406
407 node->mm = mm;
408
409 list_add(&node->node_list, &hole->node_list);
410 drm_mm_interval_tree_add_node(hole, node);
411 node->allocated = true;
412 node->hole_size = 0;
413
414 rm_hole(hole);
415 if (node->start > hole_start)
416 add_hole(hole);
417 if (end < hole_end)
418 add_hole(node);
419
420 save_stack(node);
421 return 0;
422}
423EXPORT_SYMBOL(drm_mm_reserve_node);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int drm_mm_insert_node_in_range(struct drm_mm * const mm,
442 struct drm_mm_node * const node,
443 u64 size, u64 alignment,
444 unsigned long color,
445 u64 range_start, u64 range_end,
446 enum drm_mm_insert_mode mode)
447{
448 struct drm_mm_node *hole;
449 u64 remainder_mask;
450
451 DRM_MM_BUG_ON(range_start >= range_end);
452
453 if (unlikely(size == 0 || range_end - range_start < size))
454 return -ENOSPC;
455
456 if (alignment <= 1)
457 alignment = 0;
458
459 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
460 for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
461 hole = next_hole(mm, hole, mode)) {
462 u64 hole_start = __drm_mm_hole_node_start(hole);
463 u64 hole_end = hole_start + hole->hole_size;
464 u64 adj_start, adj_end;
465 u64 col_start, col_end;
466
467 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
468 break;
469
470 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
471 break;
472
473 col_start = hole_start;
474 col_end = hole_end;
475 if (mm->color_adjust)
476 mm->color_adjust(hole, color, &col_start, &col_end);
477
478 adj_start = max(col_start, range_start);
479 adj_end = min(col_end, range_end);
480
481 if (adj_end <= adj_start || adj_end - adj_start < size)
482 continue;
483
484 if (mode == DRM_MM_INSERT_HIGH)
485 adj_start = adj_end - size;
486
487 if (alignment) {
488 u64 rem;
489
490 if (likely(remainder_mask))
491 rem = adj_start & remainder_mask;
492 else
493 div64_u64_rem(adj_start, alignment, &rem);
494 if (rem) {
495 adj_start -= rem;
496 if (mode != DRM_MM_INSERT_HIGH)
497 adj_start += alignment;
498
499 if (adj_start < max(col_start, range_start) ||
500 min(col_end, range_end) - adj_start < size)
501 continue;
502
503 if (adj_end <= adj_start ||
504 adj_end - adj_start < size)
505 continue;
506 }
507 }
508
509 node->mm = mm;
510 node->size = size;
511 node->start = adj_start;
512 node->color = color;
513 node->hole_size = 0;
514
515 list_add(&node->node_list, &hole->node_list);
516 drm_mm_interval_tree_add_node(hole, node);
517 node->allocated = true;
518
519 rm_hole(hole);
520 if (adj_start > hole_start)
521 add_hole(hole);
522 if (adj_start + size < hole_end)
523 add_hole(node);
524
525 save_stack(node);
526 return 0;
527 }
528
529 return -ENOSPC;
530}
531EXPORT_SYMBOL(drm_mm_insert_node_in_range);
532
533
534
535
536
537
538
539
540
541void drm_mm_remove_node(struct drm_mm_node *node)
542{
543 struct drm_mm *mm = node->mm;
544 struct drm_mm_node *prev_node;
545
546 DRM_MM_BUG_ON(!node->allocated);
547 DRM_MM_BUG_ON(node->scanned_block);
548
549 prev_node = list_prev_entry(node, node_list);
550
551 if (drm_mm_hole_follows(node))
552 rm_hole(node);
553
554 drm_mm_interval_tree_remove(node, &mm->interval_tree);
555 list_del(&node->node_list);
556 node->allocated = false;
557
558 if (drm_mm_hole_follows(prev_node))
559 rm_hole(prev_node);
560 add_hole(prev_node);
561}
562EXPORT_SYMBOL(drm_mm_remove_node);
563
564
565
566
567
568
569
570
571
572
573void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
574{
575 DRM_MM_BUG_ON(!old->allocated);
576
577 *new = *old;
578
579 list_replace(&old->node_list, &new->node_list);
580 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
581
582 if (drm_mm_hole_follows(old)) {
583 list_replace(&old->hole_stack, &new->hole_stack);
584 rb_replace_node(&old->rb_hole_size,
585 &new->rb_hole_size,
586 &old->mm->holes_size);
587 rb_replace_node(&old->rb_hole_addr,
588 &new->rb_hole_addr,
589 &old->mm->holes_addr);
590 }
591
592 old->allocated = false;
593 new->allocated = true;
594}
595EXPORT_SYMBOL(drm_mm_replace_node);
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
648 struct drm_mm *mm,
649 u64 size,
650 u64 alignment,
651 unsigned long color,
652 u64 start,
653 u64 end,
654 enum drm_mm_insert_mode mode)
655{
656 DRM_MM_BUG_ON(start >= end);
657 DRM_MM_BUG_ON(!size || size > end - start);
658 DRM_MM_BUG_ON(mm->scan_active);
659
660 scan->mm = mm;
661
662 if (alignment <= 1)
663 alignment = 0;
664
665 scan->color = color;
666 scan->alignment = alignment;
667 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
668 scan->size = size;
669 scan->mode = mode;
670
671 DRM_MM_BUG_ON(end <= start);
672 scan->range_start = start;
673 scan->range_end = end;
674
675 scan->hit_start = U64_MAX;
676 scan->hit_end = 0;
677}
678EXPORT_SYMBOL(drm_mm_scan_init_with_range);
679
680
681
682
683
684
685
686
687
688
689
690
691bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
692 struct drm_mm_node *node)
693{
694 struct drm_mm *mm = scan->mm;
695 struct drm_mm_node *hole;
696 u64 hole_start, hole_end;
697 u64 col_start, col_end;
698 u64 adj_start, adj_end;
699
700 DRM_MM_BUG_ON(node->mm != mm);
701 DRM_MM_BUG_ON(!node->allocated);
702 DRM_MM_BUG_ON(node->scanned_block);
703 node->scanned_block = true;
704 mm->scan_active++;
705
706
707
708
709
710
711 hole = list_prev_entry(node, node_list);
712 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
713 __list_del_entry(&node->node_list);
714
715 hole_start = __drm_mm_hole_node_start(hole);
716 hole_end = __drm_mm_hole_node_end(hole);
717
718 col_start = hole_start;
719 col_end = hole_end;
720 if (mm->color_adjust)
721 mm->color_adjust(hole, scan->color, &col_start, &col_end);
722
723 adj_start = max(col_start, scan->range_start);
724 adj_end = min(col_end, scan->range_end);
725 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
726 return false;
727
728 if (scan->mode == DRM_MM_INSERT_HIGH)
729 adj_start = adj_end - scan->size;
730
731 if (scan->alignment) {
732 u64 rem;
733
734 if (likely(scan->remainder_mask))
735 rem = adj_start & scan->remainder_mask;
736 else
737 div64_u64_rem(adj_start, scan->alignment, &rem);
738 if (rem) {
739 adj_start -= rem;
740 if (scan->mode != DRM_MM_INSERT_HIGH)
741 adj_start += scan->alignment;
742 if (adj_start < max(col_start, scan->range_start) ||
743 min(col_end, scan->range_end) - adj_start < scan->size)
744 return false;
745
746 if (adj_end <= adj_start ||
747 adj_end - adj_start < scan->size)
748 return false;
749 }
750 }
751
752 scan->hit_start = adj_start;
753 scan->hit_end = adj_start + scan->size;
754
755 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
756 DRM_MM_BUG_ON(scan->hit_start < hole_start);
757 DRM_MM_BUG_ON(scan->hit_end > hole_end);
758
759 return true;
760}
761EXPORT_SYMBOL(drm_mm_scan_add_block);
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
783 struct drm_mm_node *node)
784{
785 struct drm_mm_node *prev_node;
786
787 DRM_MM_BUG_ON(node->mm != scan->mm);
788 DRM_MM_BUG_ON(!node->scanned_block);
789 node->scanned_block = false;
790
791 DRM_MM_BUG_ON(!node->mm->scan_active);
792 node->mm->scan_active--;
793
794
795
796
797
798
799
800
801
802 prev_node = list_prev_entry(node, node_list);
803 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
804 list_next_entry(node, node_list));
805 list_add(&node->node_list, &prev_node->node_list);
806
807 return (node->start + node->size > scan->hit_start &&
808 node->start < scan->hit_end);
809}
810EXPORT_SYMBOL(drm_mm_scan_remove_block);
811
812
813
814
815
816
817
818
819
820
821
822
823struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
824{
825 struct drm_mm *mm = scan->mm;
826 struct drm_mm_node *hole;
827 u64 hole_start, hole_end;
828
829 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
830
831 if (!mm->color_adjust)
832 return NULL;
833
834 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
835 hole_start = __drm_mm_hole_node_start(hole);
836 hole_end = hole_start + hole->hole_size;
837
838 DRM_MM_BUG_ON(hole_start > scan->hit_start);
839 DRM_MM_BUG_ON(hole_end < scan->hit_end);
840
841 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
842 if (hole_start > scan->hit_start)
843 return hole;
844 if (hole_end < scan->hit_end)
845 return list_next_entry(hole, node_list);
846
847 return NULL;
848}
849EXPORT_SYMBOL(drm_mm_scan_color_evict);
850
851
852
853
854
855
856
857
858
859void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
860{
861 DRM_MM_BUG_ON(start + size <= start);
862
863 mm->color_adjust = NULL;
864
865 INIT_LIST_HEAD(&mm->hole_stack);
866 mm->interval_tree = RB_ROOT;
867 mm->holes_size = RB_ROOT;
868 mm->holes_addr = RB_ROOT;
869
870
871 INIT_LIST_HEAD(&mm->head_node.node_list);
872 mm->head_node.allocated = false;
873 mm->head_node.mm = mm;
874 mm->head_node.start = start + size;
875 mm->head_node.size = -size;
876 add_hole(&mm->head_node);
877
878 mm->scan_active = 0;
879}
880EXPORT_SYMBOL(drm_mm_init);
881
882
883
884
885
886
887
888
889void drm_mm_takedown(struct drm_mm *mm)
890{
891 if (WARN(!drm_mm_clean(mm),
892 "Memory manager not clean during takedown.\n"))
893 show_leaks(mm);
894}
895EXPORT_SYMBOL(drm_mm_takedown);
896
897static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
898{
899 u64 start, size;
900
901 size = entry->hole_size;
902 if (size) {
903 start = drm_mm_hole_node_start(entry);
904 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
905 start, start + size, size);
906 }
907
908 return size;
909}
910
911
912
913
914
915void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
916{
917 const struct drm_mm_node *entry;
918 u64 total_used = 0, total_free = 0, total = 0;
919
920 total_free += drm_mm_dump_hole(p, &mm->head_node);
921
922 drm_mm_for_each_node(entry, mm) {
923 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
924 entry->start + entry->size, entry->size);
925 total_used += entry->size;
926 total_free += drm_mm_dump_hole(p, entry);
927 }
928 total = total_free + total_used;
929
930 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
931 total_used, total_free);
932}
933EXPORT_SYMBOL(drm_mm_print);
934