1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <drm/drmP.h>
46#include <drm/drm_mm.h>
47#include <linux/slab.h>
48#include <linux/seq_file.h>
49#include <linux/export.h>
50#include <linux/interval_tree_generic.h>
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100#ifdef CONFIG_DRM_DEBUG_MM
101#include <linux/stackdepot.h>
102
103#define STACKDEPTH 32
104#define BUFSZ 4096
105
106static noinline void save_stack(struct drm_mm_node *node)
107{
108 unsigned long entries[STACKDEPTH];
109 unsigned int n;
110
111 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
112
113
114 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
115}
116
117static void show_leaks(struct drm_mm *mm)
118{
119 struct drm_mm_node *node;
120 unsigned long *entries;
121 unsigned int nr_entries;
122 char *buf;
123
124 buf = kmalloc(BUFSZ, GFP_KERNEL);
125 if (!buf)
126 return;
127
128 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
129 if (!node->stack) {
130 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
131 node->start, node->size);
132 continue;
133 }
134
135 nr_entries = stack_depot_fetch(node->stack, &entries);
136 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
137 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
138 node->start, node->size, buf);
139 }
140
141 kfree(buf);
142}
143
144#undef STACKDEPTH
145#undef BUFSZ
146#else
147static void save_stack(struct drm_mm_node *node) { }
148static void show_leaks(struct drm_mm *mm) { }
149#endif
150
151#define START(node) ((node)->start)
152#define LAST(node) ((node)->start + (node)->size - 1)
153
154INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
155 u64, __subtree_last,
156 START, LAST, static inline, drm_mm_interval_tree)
157
158struct drm_mm_node *
159__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
160{
161 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
162 start, last) ?: (struct drm_mm_node *)&mm->head_node;
163}
164EXPORT_SYMBOL(__drm_mm_interval_first);
165
166static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
167 struct drm_mm_node *node)
168{
169 struct drm_mm *mm = hole_node->mm;
170 struct rb_node **link, *rb;
171 struct drm_mm_node *parent;
172 bool leftmost;
173
174 node->__subtree_last = LAST(node);
175
176 if (hole_node->allocated) {
177 rb = &hole_node->rb;
178 while (rb) {
179 parent = rb_entry(rb, struct drm_mm_node, rb);
180 if (parent->__subtree_last >= node->__subtree_last)
181 break;
182
183 parent->__subtree_last = node->__subtree_last;
184 rb = rb_parent(rb);
185 }
186
187 rb = &hole_node->rb;
188 link = &hole_node->rb.rb_right;
189 leftmost = false;
190 } else {
191 rb = NULL;
192 link = &mm->interval_tree.rb_root.rb_node;
193 leftmost = true;
194 }
195
196 while (*link) {
197 rb = *link;
198 parent = rb_entry(rb, struct drm_mm_node, rb);
199 if (parent->__subtree_last < node->__subtree_last)
200 parent->__subtree_last = node->__subtree_last;
201 if (node->start < parent->start) {
202 link = &parent->rb.rb_left;
203 } else {
204 link = &parent->rb.rb_right;
205 leftmost = false;
206 }
207 }
208
209 rb_link_node(&node->rb, rb, link);
210 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
211 &drm_mm_interval_tree_augment);
212}
213
214#define RB_INSERT(root, member, expr) do { \
215 struct rb_node **link = &root.rb_node, *rb = NULL; \
216 u64 x = expr(node); \
217 while (*link) { \
218 rb = *link; \
219 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
220 link = &rb->rb_left; \
221 else \
222 link = &rb->rb_right; \
223 } \
224 rb_link_node(&node->member, rb, link); \
225 rb_insert_color(&node->member, &root); \
226} while (0)
227
228#define HOLE_SIZE(NODE) ((NODE)->hole_size)
229#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
230
231static u64 rb_to_hole_size(struct rb_node *rb)
232{
233 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
234}
235
236static void insert_hole_size(struct rb_root_cached *root,
237 struct drm_mm_node *node)
238{
239 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
240 u64 x = node->hole_size;
241 bool first = true;
242
243 while (*link) {
244 rb = *link;
245 if (x > rb_to_hole_size(rb)) {
246 link = &rb->rb_left;
247 } else {
248 link = &rb->rb_right;
249 first = false;
250 }
251 }
252
253 rb_link_node(&node->rb_hole_size, rb, link);
254 rb_insert_color_cached(&node->rb_hole_size, root, first);
255}
256
257static void add_hole(struct drm_mm_node *node)
258{
259 struct drm_mm *mm = node->mm;
260
261 node->hole_size =
262 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
263 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
264
265 insert_hole_size(&mm->holes_size, node);
266 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
267
268 list_add(&node->hole_stack, &mm->hole_stack);
269}
270
271static void rm_hole(struct drm_mm_node *node)
272{
273 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
274
275 list_del(&node->hole_stack);
276 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
277 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
278 node->hole_size = 0;
279
280 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
281}
282
283static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
284{
285 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
286}
287
288static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
289{
290 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
291}
292
293static inline u64 rb_hole_size(struct rb_node *rb)
294{
295 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
296}
297
298static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
299{
300 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
301 struct drm_mm_node *best = NULL;
302
303 do {
304 struct drm_mm_node *node =
305 rb_entry(rb, struct drm_mm_node, rb_hole_size);
306
307 if (size <= node->hole_size) {
308 best = node;
309 rb = rb->rb_right;
310 } else {
311 rb = rb->rb_left;
312 }
313 } while (rb);
314
315 return best;
316}
317
318static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
319{
320 struct rb_node *rb = mm->holes_addr.rb_node;
321 struct drm_mm_node *node = NULL;
322
323 while (rb) {
324 u64 hole_start;
325
326 node = rb_hole_addr_to_node(rb);
327 hole_start = __drm_mm_hole_node_start(node);
328
329 if (addr < hole_start)
330 rb = node->rb_hole_addr.rb_left;
331 else if (addr > hole_start + node->hole_size)
332 rb = node->rb_hole_addr.rb_right;
333 else
334 break;
335 }
336
337 return node;
338}
339
340static struct drm_mm_node *
341first_hole(struct drm_mm *mm,
342 u64 start, u64 end, u64 size,
343 enum drm_mm_insert_mode mode)
344{
345 switch (mode) {
346 default:
347 case DRM_MM_INSERT_BEST:
348 return best_hole(mm, size);
349
350 case DRM_MM_INSERT_LOW:
351 return find_hole(mm, start);
352
353 case DRM_MM_INSERT_HIGH:
354 return find_hole(mm, end);
355
356 case DRM_MM_INSERT_EVICT:
357 return list_first_entry_or_null(&mm->hole_stack,
358 struct drm_mm_node,
359 hole_stack);
360 }
361}
362
363static struct drm_mm_node *
364next_hole(struct drm_mm *mm,
365 struct drm_mm_node *node,
366 enum drm_mm_insert_mode mode)
367{
368 switch (mode) {
369 default:
370 case DRM_MM_INSERT_BEST:
371 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
372
373 case DRM_MM_INSERT_LOW:
374 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
375
376 case DRM_MM_INSERT_HIGH:
377 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
378
379 case DRM_MM_INSERT_EVICT:
380 node = list_next_entry(node, hole_stack);
381 return &node->hole_stack == &mm->hole_stack ? NULL : node;
382 }
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
400{
401 u64 end = node->start + node->size;
402 struct drm_mm_node *hole;
403 u64 hole_start, hole_end;
404 u64 adj_start, adj_end;
405
406 end = node->start + node->size;
407 if (unlikely(end <= node->start))
408 return -ENOSPC;
409
410
411 hole = find_hole(mm, node->start);
412 if (!hole)
413 return -ENOSPC;
414
415 adj_start = hole_start = __drm_mm_hole_node_start(hole);
416 adj_end = hole_end = hole_start + hole->hole_size;
417
418 if (mm->color_adjust)
419 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
420
421 if (adj_start > node->start || adj_end < end)
422 return -ENOSPC;
423
424 node->mm = mm;
425
426 list_add(&node->node_list, &hole->node_list);
427 drm_mm_interval_tree_add_node(hole, node);
428 node->allocated = true;
429 node->hole_size = 0;
430
431 rm_hole(hole);
432 if (node->start > hole_start)
433 add_hole(hole);
434 if (end < hole_end)
435 add_hole(node);
436
437 save_stack(node);
438 return 0;
439}
440EXPORT_SYMBOL(drm_mm_reserve_node);
441
442static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
443{
444 return rb ? rb_to_hole_size(rb) : 0;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463int drm_mm_insert_node_in_range(struct drm_mm * const mm,
464 struct drm_mm_node * const node,
465 u64 size, u64 alignment,
466 unsigned long color,
467 u64 range_start, u64 range_end,
468 enum drm_mm_insert_mode mode)
469{
470 struct drm_mm_node *hole;
471 u64 remainder_mask;
472 bool once;
473
474 DRM_MM_BUG_ON(range_start >= range_end);
475
476 if (unlikely(size == 0 || range_end - range_start < size))
477 return -ENOSPC;
478
479 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
480 return -ENOSPC;
481
482 if (alignment <= 1)
483 alignment = 0;
484
485 once = mode & DRM_MM_INSERT_ONCE;
486 mode &= ~DRM_MM_INSERT_ONCE;
487
488 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
489 for (hole = first_hole(mm, range_start, range_end, size, mode);
490 hole;
491 hole = once ? NULL : next_hole(mm, hole, mode)) {
492 u64 hole_start = __drm_mm_hole_node_start(hole);
493 u64 hole_end = hole_start + hole->hole_size;
494 u64 adj_start, adj_end;
495 u64 col_start, col_end;
496
497 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
498 break;
499
500 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
501 break;
502
503 col_start = hole_start;
504 col_end = hole_end;
505 if (mm->color_adjust)
506 mm->color_adjust(hole, color, &col_start, &col_end);
507
508 adj_start = max(col_start, range_start);
509 adj_end = min(col_end, range_end);
510
511 if (adj_end <= adj_start || adj_end - adj_start < size)
512 continue;
513
514 if (mode == DRM_MM_INSERT_HIGH)
515 adj_start = adj_end - size;
516
517 if (alignment) {
518 u64 rem;
519
520 if (likely(remainder_mask))
521 rem = adj_start & remainder_mask;
522 else
523 div64_u64_rem(adj_start, alignment, &rem);
524 if (rem) {
525 adj_start -= rem;
526 if (mode != DRM_MM_INSERT_HIGH)
527 adj_start += alignment;
528
529 if (adj_start < max(col_start, range_start) ||
530 min(col_end, range_end) - adj_start < size)
531 continue;
532
533 if (adj_end <= adj_start ||
534 adj_end - adj_start < size)
535 continue;
536 }
537 }
538
539 node->mm = mm;
540 node->size = size;
541 node->start = adj_start;
542 node->color = color;
543 node->hole_size = 0;
544
545 list_add(&node->node_list, &hole->node_list);
546 drm_mm_interval_tree_add_node(hole, node);
547 node->allocated = true;
548
549 rm_hole(hole);
550 if (adj_start > hole_start)
551 add_hole(hole);
552 if (adj_start + size < hole_end)
553 add_hole(node);
554
555 save_stack(node);
556 return 0;
557 }
558
559 return -ENOSPC;
560}
561EXPORT_SYMBOL(drm_mm_insert_node_in_range);
562
563
564
565
566
567
568
569
570
571void drm_mm_remove_node(struct drm_mm_node *node)
572{
573 struct drm_mm *mm = node->mm;
574 struct drm_mm_node *prev_node;
575
576 DRM_MM_BUG_ON(!node->allocated);
577 DRM_MM_BUG_ON(node->scanned_block);
578
579 prev_node = list_prev_entry(node, node_list);
580
581 if (drm_mm_hole_follows(node))
582 rm_hole(node);
583
584 drm_mm_interval_tree_remove(node, &mm->interval_tree);
585 list_del(&node->node_list);
586 node->allocated = false;
587
588 if (drm_mm_hole_follows(prev_node))
589 rm_hole(prev_node);
590 add_hole(prev_node);
591}
592EXPORT_SYMBOL(drm_mm_remove_node);
593
594
595
596
597
598
599
600
601
602
603void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
604{
605 struct drm_mm *mm = old->mm;
606
607 DRM_MM_BUG_ON(!old->allocated);
608
609 *new = *old;
610
611 list_replace(&old->node_list, &new->node_list);
612 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
613
614 if (drm_mm_hole_follows(old)) {
615 list_replace(&old->hole_stack, &new->hole_stack);
616 rb_replace_node_cached(&old->rb_hole_size,
617 &new->rb_hole_size,
618 &mm->holes_size);
619 rb_replace_node(&old->rb_hole_addr,
620 &new->rb_hole_addr,
621 &mm->holes_addr);
622 }
623
624 old->allocated = false;
625 new->allocated = true;
626}
627EXPORT_SYMBOL(drm_mm_replace_node);
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
680 struct drm_mm *mm,
681 u64 size,
682 u64 alignment,
683 unsigned long color,
684 u64 start,
685 u64 end,
686 enum drm_mm_insert_mode mode)
687{
688 DRM_MM_BUG_ON(start >= end);
689 DRM_MM_BUG_ON(!size || size > end - start);
690 DRM_MM_BUG_ON(mm->scan_active);
691
692 scan->mm = mm;
693
694 if (alignment <= 1)
695 alignment = 0;
696
697 scan->color = color;
698 scan->alignment = alignment;
699 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
700 scan->size = size;
701 scan->mode = mode;
702
703 DRM_MM_BUG_ON(end <= start);
704 scan->range_start = start;
705 scan->range_end = end;
706
707 scan->hit_start = U64_MAX;
708 scan->hit_end = 0;
709}
710EXPORT_SYMBOL(drm_mm_scan_init_with_range);
711
712
713
714
715
716
717
718
719
720
721
722
723bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
724 struct drm_mm_node *node)
725{
726 struct drm_mm *mm = scan->mm;
727 struct drm_mm_node *hole;
728 u64 hole_start, hole_end;
729 u64 col_start, col_end;
730 u64 adj_start, adj_end;
731
732 DRM_MM_BUG_ON(node->mm != mm);
733 DRM_MM_BUG_ON(!node->allocated);
734 DRM_MM_BUG_ON(node->scanned_block);
735 node->scanned_block = true;
736 mm->scan_active++;
737
738
739
740
741
742
743 hole = list_prev_entry(node, node_list);
744 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
745 __list_del_entry(&node->node_list);
746
747 hole_start = __drm_mm_hole_node_start(hole);
748 hole_end = __drm_mm_hole_node_end(hole);
749
750 col_start = hole_start;
751 col_end = hole_end;
752 if (mm->color_adjust)
753 mm->color_adjust(hole, scan->color, &col_start, &col_end);
754
755 adj_start = max(col_start, scan->range_start);
756 adj_end = min(col_end, scan->range_end);
757 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
758 return false;
759
760 if (scan->mode == DRM_MM_INSERT_HIGH)
761 adj_start = adj_end - scan->size;
762
763 if (scan->alignment) {
764 u64 rem;
765
766 if (likely(scan->remainder_mask))
767 rem = adj_start & scan->remainder_mask;
768 else
769 div64_u64_rem(adj_start, scan->alignment, &rem);
770 if (rem) {
771 adj_start -= rem;
772 if (scan->mode != DRM_MM_INSERT_HIGH)
773 adj_start += scan->alignment;
774 if (adj_start < max(col_start, scan->range_start) ||
775 min(col_end, scan->range_end) - adj_start < scan->size)
776 return false;
777
778 if (adj_end <= adj_start ||
779 adj_end - adj_start < scan->size)
780 return false;
781 }
782 }
783
784 scan->hit_start = adj_start;
785 scan->hit_end = adj_start + scan->size;
786
787 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
788 DRM_MM_BUG_ON(scan->hit_start < hole_start);
789 DRM_MM_BUG_ON(scan->hit_end > hole_end);
790
791 return true;
792}
793EXPORT_SYMBOL(drm_mm_scan_add_block);
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
815 struct drm_mm_node *node)
816{
817 struct drm_mm_node *prev_node;
818
819 DRM_MM_BUG_ON(node->mm != scan->mm);
820 DRM_MM_BUG_ON(!node->scanned_block);
821 node->scanned_block = false;
822
823 DRM_MM_BUG_ON(!node->mm->scan_active);
824 node->mm->scan_active--;
825
826
827
828
829
830
831
832
833
834 prev_node = list_prev_entry(node, node_list);
835 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
836 list_next_entry(node, node_list));
837 list_add(&node->node_list, &prev_node->node_list);
838
839 return (node->start + node->size > scan->hit_start &&
840 node->start < scan->hit_end);
841}
842EXPORT_SYMBOL(drm_mm_scan_remove_block);
843
844
845
846
847
848
849
850
851
852
853
854
855struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
856{
857 struct drm_mm *mm = scan->mm;
858 struct drm_mm_node *hole;
859 u64 hole_start, hole_end;
860
861 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
862
863 if (!mm->color_adjust)
864 return NULL;
865
866
867
868
869
870
871 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
872 hole_start = __drm_mm_hole_node_start(hole);
873 hole_end = hole_start + hole->hole_size;
874
875 if (hole_start <= scan->hit_start &&
876 hole_end >= scan->hit_end)
877 break;
878 }
879
880
881 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
882 if (unlikely(&hole->hole_stack == &mm->hole_stack))
883 return NULL;
884
885 DRM_MM_BUG_ON(hole_start > scan->hit_start);
886 DRM_MM_BUG_ON(hole_end < scan->hit_end);
887
888 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
889 if (hole_start > scan->hit_start)
890 return hole;
891 if (hole_end < scan->hit_end)
892 return list_next_entry(hole, node_list);
893
894 return NULL;
895}
896EXPORT_SYMBOL(drm_mm_scan_color_evict);
897
898
899
900
901
902
903
904
905
906void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
907{
908 DRM_MM_BUG_ON(start + size <= start);
909
910 mm->color_adjust = NULL;
911
912 INIT_LIST_HEAD(&mm->hole_stack);
913 mm->interval_tree = RB_ROOT_CACHED;
914 mm->holes_size = RB_ROOT_CACHED;
915 mm->holes_addr = RB_ROOT;
916
917
918 INIT_LIST_HEAD(&mm->head_node.node_list);
919 mm->head_node.allocated = false;
920 mm->head_node.mm = mm;
921 mm->head_node.start = start + size;
922 mm->head_node.size = -size;
923 add_hole(&mm->head_node);
924
925 mm->scan_active = 0;
926}
927EXPORT_SYMBOL(drm_mm_init);
928
929
930
931
932
933
934
935
936void drm_mm_takedown(struct drm_mm *mm)
937{
938 if (WARN(!drm_mm_clean(mm),
939 "Memory manager not clean during takedown.\n"))
940 show_leaks(mm);
941}
942EXPORT_SYMBOL(drm_mm_takedown);
943
944static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
945{
946 u64 start, size;
947
948 size = entry->hole_size;
949 if (size) {
950 start = drm_mm_hole_node_start(entry);
951 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
952 start, start + size, size);
953 }
954
955 return size;
956}
957
958
959
960
961
962void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
963{
964 const struct drm_mm_node *entry;
965 u64 total_used = 0, total_free = 0, total = 0;
966
967 total_free += drm_mm_dump_hole(p, &mm->head_node);
968
969 drm_mm_for_each_node(entry, mm) {
970 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
971 entry->start + entry->size, entry->size);
972 total_used += entry->size;
973 total_free += drm_mm_dump_hole(p, entry);
974 }
975 total = total_free + total_used;
976
977 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
978 total_used, total_free);
979}
980EXPORT_SYMBOL(drm_mm_print);
981