1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) "[TTM] " fmt
33
34#include <drm/ttm/ttm_module.h>
35#include <drm/ttm/ttm_bo_driver.h>
36#include <drm/ttm/ttm_placement.h>
37#include <linux/jiffies.h>
38#include <linux/slab.h>
39#include <linux/sched.h>
40#include <linux/mm.h>
41#include <linux/file.h>
42#include <linux/module.h>
43#include <linux/atomic.h>
44#include <linux/reservation.h>
45
46static void ttm_bo_global_kobj_release(struct kobject *kobj);
47
48static struct attribute ttm_bo_count = {
49 .name = "bo_count",
50 .mode = S_IRUGO
51};
52
53
54static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
55{
56 kfree(bo);
57}
58
59static inline int ttm_mem_type_from_place(const struct ttm_place *place,
60 uint32_t *mem_type)
61{
62 int pos;
63
64 pos = ffs(place->flags & TTM_PL_MASK_MEM);
65 if (unlikely(!pos))
66 return -EINVAL;
67
68 *mem_type = pos - 1;
69 return 0;
70}
71
72static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
73{
74 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
75 struct drm_printer p = drm_debug_printer(TTM_PFX);
76
77 pr_err(" has_type: %d\n", man->has_type);
78 pr_err(" use_type: %d\n", man->use_type);
79 pr_err(" flags: 0x%08X\n", man->flags);
80 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
81 pr_err(" size: %llu\n", man->size);
82 pr_err(" available_caching: 0x%08X\n", man->available_caching);
83 pr_err(" default_caching: 0x%08X\n", man->default_caching);
84 if (mem_type != TTM_PL_SYSTEM)
85 (*man->func->debug)(man, &p);
86}
87
88static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
89 struct ttm_placement *placement)
90{
91 int i, ret, mem_type;
92
93 pr_err("No space for %p (%lu pages, %luK, %luM)\n",
94 bo, bo->mem.num_pages, bo->mem.size >> 10,
95 bo->mem.size >> 20);
96 for (i = 0; i < placement->num_placement; i++) {
97 ret = ttm_mem_type_from_place(&placement->placement[i],
98 &mem_type);
99 if (ret)
100 return;
101 pr_err(" placement[%d]=0x%08X (%d)\n",
102 i, placement->placement[i].flags, mem_type);
103 ttm_mem_type_debug(bo->bdev, mem_type);
104 }
105}
106
107static ssize_t ttm_bo_global_show(struct kobject *kobj,
108 struct attribute *attr,
109 char *buffer)
110{
111 struct ttm_bo_global *glob =
112 container_of(kobj, struct ttm_bo_global, kobj);
113
114 return snprintf(buffer, PAGE_SIZE, "%d\n",
115 atomic_read(&glob->bo_count));
116}
117
118static struct attribute *ttm_bo_global_attrs[] = {
119 &ttm_bo_count,
120 NULL
121};
122
123static const struct sysfs_ops ttm_bo_global_ops = {
124 .show = &ttm_bo_global_show
125};
126
127static struct kobj_type ttm_bo_glob_kobj_type = {
128 .release = &ttm_bo_global_kobj_release,
129 .sysfs_ops = &ttm_bo_global_ops,
130 .default_attrs = ttm_bo_global_attrs
131};
132
133
134static inline uint32_t ttm_bo_type_flags(unsigned type)
135{
136 return 1 << (type);
137}
138
139static void ttm_bo_release_list(struct kref *list_kref)
140{
141 struct ttm_buffer_object *bo =
142 container_of(list_kref, struct ttm_buffer_object, list_kref);
143 struct ttm_bo_device *bdev = bo->bdev;
144 size_t acc_size = bo->acc_size;
145
146 BUG_ON(kref_read(&bo->list_kref));
147 BUG_ON(kref_read(&bo->kref));
148 BUG_ON(atomic_read(&bo->cpu_writers));
149 BUG_ON(bo->mem.mm_node != NULL);
150 BUG_ON(!list_empty(&bo->lru));
151 BUG_ON(!list_empty(&bo->ddestroy));
152 ttm_tt_destroy(bo->ttm);
153 atomic_dec(&bo->bdev->glob->bo_count);
154 dma_fence_put(bo->moving);
155 reservation_object_fini(&bo->ttm_resv);
156 mutex_destroy(&bo->wu_mutex);
157 bo->destroy(bo);
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159}
160
161void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
162{
163 struct ttm_bo_device *bdev = bo->bdev;
164 struct ttm_mem_type_manager *man;
165
166 reservation_object_assert_held(bo->resv);
167
168 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
169 BUG_ON(!list_empty(&bo->lru));
170
171 man = &bdev->man[bo->mem.mem_type];
172 list_add_tail(&bo->lru, &man->lru[bo->priority]);
173 kref_get(&bo->list_kref);
174
175 if (bo->ttm && !(bo->ttm->page_flags &
176 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
177 list_add_tail(&bo->swap,
178 &bdev->glob->swap_lru[bo->priority]);
179 kref_get(&bo->list_kref);
180 }
181 }
182}
183EXPORT_SYMBOL(ttm_bo_add_to_lru);
184
185static void ttm_bo_ref_bug(struct kref *list_kref)
186{
187 BUG();
188}
189
190void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
191{
192 if (!list_empty(&bo->swap)) {
193 list_del_init(&bo->swap);
194 kref_put(&bo->list_kref, ttm_bo_ref_bug);
195 }
196 if (!list_empty(&bo->lru)) {
197 list_del_init(&bo->lru);
198 kref_put(&bo->list_kref, ttm_bo_ref_bug);
199 }
200
201
202
203
204
205}
206
207void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
208{
209 struct ttm_bo_global *glob = bo->bdev->glob;
210
211 spin_lock(&glob->lru_lock);
212 ttm_bo_del_from_lru(bo);
213 spin_unlock(&glob->lru_lock);
214}
215EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
216
217static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
218 struct ttm_buffer_object *bo)
219{
220 if (!pos->first)
221 pos->first = bo;
222 pos->last = bo;
223}
224
225void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
226 struct ttm_lru_bulk_move *bulk)
227{
228 reservation_object_assert_held(bo->resv);
229
230 ttm_bo_del_from_lru(bo);
231 ttm_bo_add_to_lru(bo);
232
233 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
234 switch (bo->mem.mem_type) {
235 case TTM_PL_TT:
236 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
237 break;
238
239 case TTM_PL_VRAM:
240 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
241 break;
242 }
243 if (bo->ttm && !(bo->ttm->page_flags &
244 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
245 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
246 }
247}
248EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
249
250void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
251{
252 unsigned i;
253
254 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
255 struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
256 struct ttm_mem_type_manager *man;
257
258 if (!pos->first)
259 continue;
260
261 reservation_object_assert_held(pos->first->resv);
262 reservation_object_assert_held(pos->last->resv);
263
264 man = &pos->first->bdev->man[TTM_PL_TT];
265 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
266 &pos->last->lru);
267 }
268
269 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
270 struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
271 struct ttm_mem_type_manager *man;
272
273 if (!pos->first)
274 continue;
275
276 reservation_object_assert_held(pos->first->resv);
277 reservation_object_assert_held(pos->last->resv);
278
279 man = &pos->first->bdev->man[TTM_PL_VRAM];
280 list_bulk_move_tail(&man->lru[i], &pos->first->lru,
281 &pos->last->lru);
282 }
283
284 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
285 struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
286 struct list_head *lru;
287
288 if (!pos->first)
289 continue;
290
291 reservation_object_assert_held(pos->first->resv);
292 reservation_object_assert_held(pos->last->resv);
293
294 lru = &pos->first->bdev->glob->swap_lru[i];
295 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
296 }
297}
298EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
299
300static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
301 struct ttm_mem_reg *mem, bool evict,
302 struct ttm_operation_ctx *ctx)
303{
304 struct ttm_bo_device *bdev = bo->bdev;
305 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
306 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
307 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
308 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
309 int ret = 0;
310
311 if (old_is_pci || new_is_pci ||
312 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
313 ret = ttm_mem_io_lock(old_man, true);
314 if (unlikely(ret != 0))
315 goto out_err;
316 ttm_bo_unmap_virtual_locked(bo);
317 ttm_mem_io_unlock(old_man);
318 }
319
320
321
322
323
324 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
325 if (bo->ttm == NULL) {
326 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
327 ret = ttm_tt_create(bo, zero);
328 if (ret)
329 goto out_err;
330 }
331
332 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
333 if (ret)
334 goto out_err;
335
336 if (mem->mem_type != TTM_PL_SYSTEM) {
337 ret = ttm_tt_bind(bo->ttm, mem, ctx);
338 if (ret)
339 goto out_err;
340 }
341
342 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
343 if (bdev->driver->move_notify)
344 bdev->driver->move_notify(bo, evict, mem);
345 bo->mem = *mem;
346 mem->mm_node = NULL;
347 goto moved;
348 }
349 }
350
351 if (bdev->driver->move_notify)
352 bdev->driver->move_notify(bo, evict, mem);
353
354 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
355 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
356 ret = ttm_bo_move_ttm(bo, ctx, mem);
357 else if (bdev->driver->move)
358 ret = bdev->driver->move(bo, evict, ctx, mem);
359 else
360 ret = ttm_bo_move_memcpy(bo, ctx, mem);
361
362 if (ret) {
363 if (bdev->driver->move_notify) {
364 swap(*mem, bo->mem);
365 bdev->driver->move_notify(bo, false, mem);
366 swap(*mem, bo->mem);
367 }
368
369 goto out_err;
370 }
371
372moved:
373 if (bo->evicted) {
374 if (bdev->driver->invalidate_caches) {
375 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
376 if (ret)
377 pr_err("Can not flush read caches\n");
378 }
379 bo->evicted = false;
380 }
381
382 if (bo->mem.mm_node)
383 bo->offset = (bo->mem.start << PAGE_SHIFT) +
384 bdev->man[bo->mem.mem_type].gpu_offset;
385 else
386 bo->offset = 0;
387
388 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
389 return 0;
390
391out_err:
392 new_man = &bdev->man[bo->mem.mem_type];
393 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
394 ttm_tt_destroy(bo->ttm);
395 bo->ttm = NULL;
396 }
397
398 return ret;
399}
400
401
402
403
404
405
406
407
408
409static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
410{
411 if (bo->bdev->driver->move_notify)
412 bo->bdev->driver->move_notify(bo, false, NULL);
413
414 ttm_tt_destroy(bo->ttm);
415 bo->ttm = NULL;
416 ttm_bo_mem_put(bo, &bo->mem);
417}
418
419static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
420{
421 int r;
422
423 if (bo->resv == &bo->ttm_resv)
424 return 0;
425
426 BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
427
428 r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
429 if (r)
430 reservation_object_unlock(&bo->ttm_resv);
431
432 return r;
433}
434
435static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
436{
437 struct reservation_object_list *fobj;
438 struct dma_fence *fence;
439 int i;
440
441 fobj = reservation_object_get_list(&bo->ttm_resv);
442 fence = reservation_object_get_excl(&bo->ttm_resv);
443 if (fence && !fence->ops->signaled)
444 dma_fence_enable_sw_signaling(fence);
445
446 for (i = 0; fobj && i < fobj->shared_count; ++i) {
447 fence = rcu_dereference_protected(fobj->shared[i],
448 reservation_object_held(bo->resv));
449
450 if (!fence->ops->signaled)
451 dma_fence_enable_sw_signaling(fence);
452 }
453}
454
455static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
456{
457 struct ttm_bo_device *bdev = bo->bdev;
458 struct ttm_bo_global *glob = bdev->glob;
459 int ret;
460
461 ret = ttm_bo_individualize_resv(bo);
462 if (ret) {
463
464
465
466 reservation_object_wait_timeout_rcu(bo->resv, true, false,
467 30 * HZ);
468 spin_lock(&glob->lru_lock);
469 goto error;
470 }
471
472 spin_lock(&glob->lru_lock);
473 ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
474 if (!ret) {
475 if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
476 ttm_bo_del_from_lru(bo);
477 spin_unlock(&glob->lru_lock);
478 if (bo->resv != &bo->ttm_resv)
479 reservation_object_unlock(&bo->ttm_resv);
480
481 ttm_bo_cleanup_memtype_use(bo);
482 reservation_object_unlock(bo->resv);
483 return;
484 }
485
486 ttm_bo_flush_all_fences(bo);
487
488
489
490
491
492
493 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
494 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
495 ttm_bo_add_to_lru(bo);
496 }
497
498 reservation_object_unlock(bo->resv);
499 }
500 if (bo->resv != &bo->ttm_resv)
501 reservation_object_unlock(&bo->ttm_resv);
502
503error:
504 kref_get(&bo->list_kref);
505 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
506 spin_unlock(&glob->lru_lock);
507
508 schedule_delayed_work(&bdev->wq,
509 ((HZ / 100) < 1) ? 1 : HZ / 100);
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
526 bool interruptible, bool no_wait_gpu,
527 bool unlock_resv)
528{
529 struct ttm_bo_global *glob = bo->bdev->glob;
530 struct reservation_object *resv;
531 int ret;
532
533 if (unlikely(list_empty(&bo->ddestroy)))
534 resv = bo->resv;
535 else
536 resv = &bo->ttm_resv;
537
538 if (reservation_object_test_signaled_rcu(resv, true))
539 ret = 0;
540 else
541 ret = -EBUSY;
542
543 if (ret && !no_wait_gpu) {
544 long lret;
545
546 if (unlock_resv)
547 reservation_object_unlock(bo->resv);
548 spin_unlock(&glob->lru_lock);
549
550 lret = reservation_object_wait_timeout_rcu(resv, true,
551 interruptible,
552 30 * HZ);
553
554 if (lret < 0)
555 return lret;
556 else if (lret == 0)
557 return -EBUSY;
558
559 spin_lock(&glob->lru_lock);
560 if (unlock_resv && !reservation_object_trylock(bo->resv)) {
561
562
563
564
565
566
567
568
569 spin_unlock(&glob->lru_lock);
570 return 0;
571 }
572 ret = 0;
573 }
574
575 if (ret || unlikely(list_empty(&bo->ddestroy))) {
576 if (unlock_resv)
577 reservation_object_unlock(bo->resv);
578 spin_unlock(&glob->lru_lock);
579 return ret;
580 }
581
582 ttm_bo_del_from_lru(bo);
583 list_del_init(&bo->ddestroy);
584 kref_put(&bo->list_kref, ttm_bo_ref_bug);
585
586 spin_unlock(&glob->lru_lock);
587 ttm_bo_cleanup_memtype_use(bo);
588
589 if (unlock_resv)
590 reservation_object_unlock(bo->resv);
591
592 return 0;
593}
594
595
596
597
598
599static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
600{
601 struct ttm_bo_global *glob = bdev->glob;
602 struct list_head removed;
603 bool empty;
604
605 INIT_LIST_HEAD(&removed);
606
607 spin_lock(&glob->lru_lock);
608 while (!list_empty(&bdev->ddestroy)) {
609 struct ttm_buffer_object *bo;
610
611 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
612 ddestroy);
613 kref_get(&bo->list_kref);
614 list_move_tail(&bo->ddestroy, &removed);
615
616 if (remove_all || bo->resv != &bo->ttm_resv) {
617 spin_unlock(&glob->lru_lock);
618 reservation_object_lock(bo->resv, NULL);
619
620 spin_lock(&glob->lru_lock);
621 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
622
623 } else if (reservation_object_trylock(bo->resv)) {
624 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
625 } else {
626 spin_unlock(&glob->lru_lock);
627 }
628
629 kref_put(&bo->list_kref, ttm_bo_release_list);
630 spin_lock(&glob->lru_lock);
631 }
632 list_splice_tail(&removed, &bdev->ddestroy);
633 empty = list_empty(&bdev->ddestroy);
634 spin_unlock(&glob->lru_lock);
635
636 return empty;
637}
638
639static void ttm_bo_delayed_workqueue(struct work_struct *work)
640{
641 struct ttm_bo_device *bdev =
642 container_of(work, struct ttm_bo_device, wq.work);
643
644 if (!ttm_bo_delayed_delete(bdev, false))
645 schedule_delayed_work(&bdev->wq,
646 ((HZ / 100) < 1) ? 1 : HZ / 100);
647}
648
649static void ttm_bo_release(struct kref *kref)
650{
651 struct ttm_buffer_object *bo =
652 container_of(kref, struct ttm_buffer_object, kref);
653 struct ttm_bo_device *bdev = bo->bdev;
654 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
655
656 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
657 ttm_mem_io_lock(man, false);
658 ttm_mem_io_free_vm(bo);
659 ttm_mem_io_unlock(man);
660 ttm_bo_cleanup_refs_or_queue(bo);
661 kref_put(&bo->list_kref, ttm_bo_release_list);
662}
663
664void ttm_bo_put(struct ttm_buffer_object *bo)
665{
666 kref_put(&bo->kref, ttm_bo_release);
667}
668EXPORT_SYMBOL(ttm_bo_put);
669
670void ttm_bo_unref(struct ttm_buffer_object **p_bo)
671{
672 struct ttm_buffer_object *bo = *p_bo;
673
674 *p_bo = NULL;
675 ttm_bo_put(bo);
676}
677EXPORT_SYMBOL(ttm_bo_unref);
678
679int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
680{
681 return cancel_delayed_work_sync(&bdev->wq);
682}
683EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
684
685void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
686{
687 if (resched)
688 schedule_delayed_work(&bdev->wq,
689 ((HZ / 100) < 1) ? 1 : HZ / 100);
690}
691EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
692
693static int ttm_bo_evict(struct ttm_buffer_object *bo,
694 struct ttm_operation_ctx *ctx)
695{
696 struct ttm_bo_device *bdev = bo->bdev;
697 struct ttm_mem_reg evict_mem;
698 struct ttm_placement placement;
699 int ret = 0;
700
701 reservation_object_assert_held(bo->resv);
702
703 placement.num_placement = 0;
704 placement.num_busy_placement = 0;
705 bdev->driver->evict_flags(bo, &placement);
706
707 if (!placement.num_placement && !placement.num_busy_placement) {
708 ret = ttm_bo_pipeline_gutting(bo);
709 if (ret)
710 return ret;
711
712 return ttm_tt_create(bo, false);
713 }
714
715 evict_mem = bo->mem;
716 evict_mem.mm_node = NULL;
717 evict_mem.bus.io_reserved_vm = false;
718 evict_mem.bus.io_reserved_count = 0;
719
720 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
721 if (ret) {
722 if (ret != -ERESTARTSYS) {
723 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
724 bo);
725 ttm_bo_mem_space_debug(bo, &placement);
726 }
727 goto out;
728 }
729
730 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
731 if (unlikely(ret)) {
732 if (ret != -ERESTARTSYS)
733 pr_err("Buffer eviction failed\n");
734 ttm_bo_mem_put(bo, &evict_mem);
735 goto out;
736 }
737 bo->evicted = true;
738out:
739 return ret;
740}
741
742bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
743 const struct ttm_place *place)
744{
745
746
747
748 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
749 (place->lpfn && place->lpfn <= bo->mem.start))
750 return false;
751
752 return true;
753}
754EXPORT_SYMBOL(ttm_bo_eviction_valuable);
755
756
757
758
759
760
761
762
763
764
765
766static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
767 struct ttm_operation_ctx *ctx, bool *locked)
768{
769 bool ret = false;
770
771 *locked = false;
772 if (bo->resv == ctx->resv) {
773 reservation_object_assert_held(bo->resv);
774 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
775 || !list_empty(&bo->ddestroy))
776 ret = true;
777 } else {
778 *locked = reservation_object_trylock(bo->resv);
779 ret = *locked;
780 }
781
782 return ret;
783}
784
785static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
786 uint32_t mem_type,
787 const struct ttm_place *place,
788 struct ttm_operation_ctx *ctx)
789{
790 struct ttm_bo_global *glob = bdev->glob;
791 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
792 struct ttm_buffer_object *bo = NULL;
793 bool locked = false;
794 unsigned i;
795 int ret;
796
797 spin_lock(&glob->lru_lock);
798 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
799 list_for_each_entry(bo, &man->lru[i], lru) {
800 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
801 continue;
802
803 if (place && !bdev->driver->eviction_valuable(bo,
804 place)) {
805 if (locked)
806 reservation_object_unlock(bo->resv);
807 continue;
808 }
809 break;
810 }
811
812
813 if (&bo->lru != &man->lru[i])
814 break;
815
816 bo = NULL;
817 }
818
819 if (!bo) {
820 spin_unlock(&glob->lru_lock);
821 return -EBUSY;
822 }
823
824 kref_get(&bo->list_kref);
825
826 if (!list_empty(&bo->ddestroy)) {
827 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
828 ctx->no_wait_gpu, locked);
829 kref_put(&bo->list_kref, ttm_bo_release_list);
830 return ret;
831 }
832
833 ttm_bo_del_from_lru(bo);
834 spin_unlock(&glob->lru_lock);
835
836 ret = ttm_bo_evict(bo, ctx);
837 if (locked) {
838 ttm_bo_unreserve(bo);
839 } else {
840 spin_lock(&glob->lru_lock);
841 ttm_bo_add_to_lru(bo);
842 spin_unlock(&glob->lru_lock);
843 }
844
845 kref_put(&bo->list_kref, ttm_bo_release_list);
846 return ret;
847}
848
849void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
850{
851 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
852
853 if (mem->mm_node)
854 (*man->func->put_node)(man, mem);
855}
856EXPORT_SYMBOL(ttm_bo_mem_put);
857
858
859
860
861static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
862 struct ttm_mem_type_manager *man,
863 struct ttm_mem_reg *mem)
864{
865 struct dma_fence *fence;
866 int ret;
867
868 spin_lock(&man->move_lock);
869 fence = dma_fence_get(man->move);
870 spin_unlock(&man->move_lock);
871
872 if (fence) {
873 reservation_object_add_shared_fence(bo->resv, fence);
874
875 ret = reservation_object_reserve_shared(bo->resv);
876 if (unlikely(ret))
877 return ret;
878
879 dma_fence_put(bo->moving);
880 bo->moving = fence;
881 }
882
883 return 0;
884}
885
886
887
888
889
890static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
891 uint32_t mem_type,
892 const struct ttm_place *place,
893 struct ttm_mem_reg *mem,
894 struct ttm_operation_ctx *ctx)
895{
896 struct ttm_bo_device *bdev = bo->bdev;
897 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
898 int ret;
899
900 do {
901 ret = (*man->func->get_node)(man, bo, place, mem);
902 if (unlikely(ret != 0))
903 return ret;
904 if (mem->mm_node)
905 break;
906 ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
907 if (unlikely(ret != 0))
908 return ret;
909 } while (1);
910 mem->mem_type = mem_type;
911 return ttm_bo_add_move_fence(bo, man, mem);
912}
913
914static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
915 uint32_t cur_placement,
916 uint32_t proposed_placement)
917{
918 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
919 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
920
921
922
923
924
925 if ((cur_placement & caching) != 0)
926 result |= (cur_placement & caching);
927 else if ((man->default_caching & caching) != 0)
928 result |= man->default_caching;
929 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
930 result |= TTM_PL_FLAG_CACHED;
931 else if ((TTM_PL_FLAG_WC & caching) != 0)
932 result |= TTM_PL_FLAG_WC;
933 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
934 result |= TTM_PL_FLAG_UNCACHED;
935
936 return result;
937}
938
939static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
940 uint32_t mem_type,
941 const struct ttm_place *place,
942 uint32_t *masked_placement)
943{
944 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
945
946 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
947 return false;
948
949 if ((place->flags & man->available_caching) == 0)
950 return false;
951
952 cur_flags |= (place->flags & man->available_caching);
953
954 *masked_placement = cur_flags;
955 return true;
956}
957
958
959
960
961
962
963
964
965
966int ttm_bo_mem_space(struct ttm_buffer_object *bo,
967 struct ttm_placement *placement,
968 struct ttm_mem_reg *mem,
969 struct ttm_operation_ctx *ctx)
970{
971 struct ttm_bo_device *bdev = bo->bdev;
972 struct ttm_mem_type_manager *man;
973 uint32_t mem_type = TTM_PL_SYSTEM;
974 uint32_t cur_flags = 0;
975 bool type_found = false;
976 bool type_ok = false;
977 bool has_erestartsys = false;
978 int i, ret;
979
980 ret = reservation_object_reserve_shared(bo->resv);
981 if (unlikely(ret))
982 return ret;
983
984 mem->mm_node = NULL;
985 for (i = 0; i < placement->num_placement; ++i) {
986 const struct ttm_place *place = &placement->placement[i];
987
988 ret = ttm_mem_type_from_place(place, &mem_type);
989 if (ret)
990 return ret;
991 man = &bdev->man[mem_type];
992 if (!man->has_type || !man->use_type)
993 continue;
994
995 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
996 &cur_flags);
997
998 if (!type_ok)
999 continue;
1000
1001 type_found = true;
1002 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1003 cur_flags);
1004
1005
1006
1007
1008 ttm_flag_masked(&cur_flags, place->flags,
1009 ~TTM_PL_MASK_MEMTYPE);
1010
1011 if (mem_type == TTM_PL_SYSTEM)
1012 break;
1013
1014 ret = (*man->func->get_node)(man, bo, place, mem);
1015 if (unlikely(ret))
1016 return ret;
1017
1018 if (mem->mm_node) {
1019 ret = ttm_bo_add_move_fence(bo, man, mem);
1020 if (unlikely(ret)) {
1021 (*man->func->put_node)(man, mem);
1022 return ret;
1023 }
1024 break;
1025 }
1026 }
1027
1028 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1029 mem->mem_type = mem_type;
1030 mem->placement = cur_flags;
1031 return 0;
1032 }
1033
1034 for (i = 0; i < placement->num_busy_placement; ++i) {
1035 const struct ttm_place *place = &placement->busy_placement[i];
1036
1037 ret = ttm_mem_type_from_place(place, &mem_type);
1038 if (ret)
1039 return ret;
1040 man = &bdev->man[mem_type];
1041 if (!man->has_type || !man->use_type)
1042 continue;
1043 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1044 continue;
1045
1046 type_found = true;
1047 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1048 cur_flags);
1049
1050
1051
1052
1053 ttm_flag_masked(&cur_flags, place->flags,
1054 ~TTM_PL_MASK_MEMTYPE);
1055
1056 if (mem_type == TTM_PL_SYSTEM) {
1057 mem->mem_type = mem_type;
1058 mem->placement = cur_flags;
1059 mem->mm_node = NULL;
1060 return 0;
1061 }
1062
1063 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
1064 if (ret == 0 && mem->mm_node) {
1065 mem->placement = cur_flags;
1066 return 0;
1067 }
1068 if (ret == -ERESTARTSYS)
1069 has_erestartsys = true;
1070 }
1071
1072 if (!type_found) {
1073 pr_err(TTM_PFX "No compatible memory type found\n");
1074 return -EINVAL;
1075 }
1076
1077 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1078}
1079EXPORT_SYMBOL(ttm_bo_mem_space);
1080
1081static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1082 struct ttm_placement *placement,
1083 struct ttm_operation_ctx *ctx)
1084{
1085 int ret = 0;
1086 struct ttm_mem_reg mem;
1087
1088 reservation_object_assert_held(bo->resv);
1089
1090 mem.num_pages = bo->num_pages;
1091 mem.size = mem.num_pages << PAGE_SHIFT;
1092 mem.page_alignment = bo->mem.page_alignment;
1093 mem.bus.io_reserved_vm = false;
1094 mem.bus.io_reserved_count = 0;
1095
1096
1097
1098 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1099 if (ret)
1100 goto out_unlock;
1101 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1102out_unlock:
1103 if (ret && mem.mm_node)
1104 ttm_bo_mem_put(bo, &mem);
1105 return ret;
1106}
1107
1108static bool ttm_bo_places_compat(const struct ttm_place *places,
1109 unsigned num_placement,
1110 struct ttm_mem_reg *mem,
1111 uint32_t *new_flags)
1112{
1113 unsigned i;
1114
1115 for (i = 0; i < num_placement; i++) {
1116 const struct ttm_place *heap = &places[i];
1117
1118 if (mem->mm_node && (mem->start < heap->fpfn ||
1119 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1120 continue;
1121
1122 *new_flags = heap->flags;
1123 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1124 (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1125 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1126 (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1127 return true;
1128 }
1129 return false;
1130}
1131
1132bool ttm_bo_mem_compat(struct ttm_placement *placement,
1133 struct ttm_mem_reg *mem,
1134 uint32_t *new_flags)
1135{
1136 if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1137 mem, new_flags))
1138 return true;
1139
1140 if ((placement->busy_placement != placement->placement ||
1141 placement->num_busy_placement > placement->num_placement) &&
1142 ttm_bo_places_compat(placement->busy_placement,
1143 placement->num_busy_placement,
1144 mem, new_flags))
1145 return true;
1146
1147 return false;
1148}
1149EXPORT_SYMBOL(ttm_bo_mem_compat);
1150
1151int ttm_bo_validate(struct ttm_buffer_object *bo,
1152 struct ttm_placement *placement,
1153 struct ttm_operation_ctx *ctx)
1154{
1155 int ret;
1156 uint32_t new_flags;
1157
1158 reservation_object_assert_held(bo->resv);
1159
1160
1161
1162 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1163 ret = ttm_bo_move_buffer(bo, placement, ctx);
1164 if (ret)
1165 return ret;
1166 } else {
1167
1168
1169
1170
1171 ttm_flag_masked(&bo->mem.placement, new_flags,
1172 ~TTM_PL_MASK_MEMTYPE);
1173 }
1174
1175
1176
1177 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1178 ret = ttm_tt_create(bo, true);
1179 if (ret)
1180 return ret;
1181 }
1182 return 0;
1183}
1184EXPORT_SYMBOL(ttm_bo_validate);
1185
1186int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1187 struct ttm_buffer_object *bo,
1188 unsigned long size,
1189 enum ttm_bo_type type,
1190 struct ttm_placement *placement,
1191 uint32_t page_alignment,
1192 struct ttm_operation_ctx *ctx,
1193 size_t acc_size,
1194 struct sg_table *sg,
1195 struct reservation_object *resv,
1196 void (*destroy) (struct ttm_buffer_object *))
1197{
1198 int ret = 0;
1199 unsigned long num_pages;
1200 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1201 bool locked;
1202
1203 ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1204 if (ret) {
1205 pr_err("Out of kernel memory\n");
1206 if (destroy)
1207 (*destroy)(bo);
1208 else
1209 kfree(bo);
1210 return -ENOMEM;
1211 }
1212
1213 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1214 if (num_pages == 0) {
1215 pr_err("Illegal buffer object size\n");
1216 if (destroy)
1217 (*destroy)(bo);
1218 else
1219 kfree(bo);
1220 ttm_mem_global_free(mem_glob, acc_size);
1221 return -EINVAL;
1222 }
1223 bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1224
1225 kref_init(&bo->kref);
1226 kref_init(&bo->list_kref);
1227 atomic_set(&bo->cpu_writers, 0);
1228 INIT_LIST_HEAD(&bo->lru);
1229 INIT_LIST_HEAD(&bo->ddestroy);
1230 INIT_LIST_HEAD(&bo->swap);
1231 INIT_LIST_HEAD(&bo->io_reserve_lru);
1232 mutex_init(&bo->wu_mutex);
1233 bo->bdev = bdev;
1234 bo->type = type;
1235 bo->num_pages = num_pages;
1236 bo->mem.size = num_pages << PAGE_SHIFT;
1237 bo->mem.mem_type = TTM_PL_SYSTEM;
1238 bo->mem.num_pages = bo->num_pages;
1239 bo->mem.mm_node = NULL;
1240 bo->mem.page_alignment = page_alignment;
1241 bo->mem.bus.io_reserved_vm = false;
1242 bo->mem.bus.io_reserved_count = 0;
1243 bo->moving = NULL;
1244 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1245 bo->acc_size = acc_size;
1246 bo->sg = sg;
1247 if (resv) {
1248 bo->resv = resv;
1249 reservation_object_assert_held(bo->resv);
1250 } else {
1251 bo->resv = &bo->ttm_resv;
1252 }
1253 reservation_object_init(&bo->ttm_resv);
1254 atomic_inc(&bo->bdev->glob->bo_count);
1255 drm_vma_node_reset(&bo->vma_node);
1256
1257
1258
1259
1260
1261 if (bo->type == ttm_bo_type_device ||
1262 bo->type == ttm_bo_type_sg)
1263 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1264 bo->mem.num_pages);
1265
1266
1267
1268
1269 if (!resv) {
1270 locked = reservation_object_trylock(bo->resv);
1271 WARN_ON(!locked);
1272 }
1273
1274 if (likely(!ret))
1275 ret = ttm_bo_validate(bo, placement, ctx);
1276
1277 if (unlikely(ret)) {
1278 if (!resv)
1279 ttm_bo_unreserve(bo);
1280
1281 ttm_bo_put(bo);
1282 return ret;
1283 }
1284
1285 if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1286 spin_lock(&bdev->glob->lru_lock);
1287 ttm_bo_add_to_lru(bo);
1288 spin_unlock(&bdev->glob->lru_lock);
1289 }
1290
1291 return ret;
1292}
1293EXPORT_SYMBOL(ttm_bo_init_reserved);
1294
1295int ttm_bo_init(struct ttm_bo_device *bdev,
1296 struct ttm_buffer_object *bo,
1297 unsigned long size,
1298 enum ttm_bo_type type,
1299 struct ttm_placement *placement,
1300 uint32_t page_alignment,
1301 bool interruptible,
1302 size_t acc_size,
1303 struct sg_table *sg,
1304 struct reservation_object *resv,
1305 void (*destroy) (struct ttm_buffer_object *))
1306{
1307 struct ttm_operation_ctx ctx = { interruptible, false };
1308 int ret;
1309
1310 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1311 page_alignment, &ctx, acc_size,
1312 sg, resv, destroy);
1313 if (ret)
1314 return ret;
1315
1316 if (!resv)
1317 ttm_bo_unreserve(bo);
1318
1319 return 0;
1320}
1321EXPORT_SYMBOL(ttm_bo_init);
1322
1323size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1324 unsigned long bo_size,
1325 unsigned struct_size)
1326{
1327 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1328 size_t size = 0;
1329
1330 size += ttm_round_pot(struct_size);
1331 size += ttm_round_pot(npages * sizeof(void *));
1332 size += ttm_round_pot(sizeof(struct ttm_tt));
1333 return size;
1334}
1335EXPORT_SYMBOL(ttm_bo_acc_size);
1336
1337size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1338 unsigned long bo_size,
1339 unsigned struct_size)
1340{
1341 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1342 size_t size = 0;
1343
1344 size += ttm_round_pot(struct_size);
1345 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1346 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1347 return size;
1348}
1349EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1350
1351int ttm_bo_create(struct ttm_bo_device *bdev,
1352 unsigned long size,
1353 enum ttm_bo_type type,
1354 struct ttm_placement *placement,
1355 uint32_t page_alignment,
1356 bool interruptible,
1357 struct ttm_buffer_object **p_bo)
1358{
1359 struct ttm_buffer_object *bo;
1360 size_t acc_size;
1361 int ret;
1362
1363 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1364 if (unlikely(bo == NULL))
1365 return -ENOMEM;
1366
1367 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1368 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1369 interruptible, acc_size,
1370 NULL, NULL, NULL);
1371 if (likely(ret == 0))
1372 *p_bo = bo;
1373
1374 return ret;
1375}
1376EXPORT_SYMBOL(ttm_bo_create);
1377
1378static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1379 unsigned mem_type)
1380{
1381 struct ttm_operation_ctx ctx = {
1382 .interruptible = false,
1383 .no_wait_gpu = false,
1384 .flags = TTM_OPT_FLAG_FORCE_ALLOC
1385 };
1386 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1387 struct ttm_bo_global *glob = bdev->glob;
1388 struct dma_fence *fence;
1389 int ret;
1390 unsigned i;
1391
1392
1393
1394
1395
1396 spin_lock(&glob->lru_lock);
1397 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1398 while (!list_empty(&man->lru[i])) {
1399 spin_unlock(&glob->lru_lock);
1400 ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
1401 if (ret)
1402 return ret;
1403 spin_lock(&glob->lru_lock);
1404 }
1405 }
1406 spin_unlock(&glob->lru_lock);
1407
1408 spin_lock(&man->move_lock);
1409 fence = dma_fence_get(man->move);
1410 spin_unlock(&man->move_lock);
1411
1412 if (fence) {
1413 ret = dma_fence_wait(fence, false);
1414 dma_fence_put(fence);
1415 if (ret)
1416 return ret;
1417 }
1418
1419 return 0;
1420}
1421
1422int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1423{
1424 struct ttm_mem_type_manager *man;
1425 int ret = -EINVAL;
1426
1427 if (mem_type >= TTM_NUM_MEM_TYPES) {
1428 pr_err("Illegal memory type %d\n", mem_type);
1429 return ret;
1430 }
1431 man = &bdev->man[mem_type];
1432
1433 if (!man->has_type) {
1434 pr_err("Trying to take down uninitialized memory manager type %u\n",
1435 mem_type);
1436 return ret;
1437 }
1438
1439 man->use_type = false;
1440 man->has_type = false;
1441
1442 ret = 0;
1443 if (mem_type > 0) {
1444 ret = ttm_bo_force_list_clean(bdev, mem_type);
1445 if (ret) {
1446 pr_err("Cleanup eviction failed\n");
1447 return ret;
1448 }
1449
1450 ret = (*man->func->takedown)(man);
1451 }
1452
1453 dma_fence_put(man->move);
1454 man->move = NULL;
1455
1456 return ret;
1457}
1458EXPORT_SYMBOL(ttm_bo_clean_mm);
1459
1460int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1461{
1462 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1463
1464 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1465 pr_err("Illegal memory manager memory type %u\n", mem_type);
1466 return -EINVAL;
1467 }
1468
1469 if (!man->has_type) {
1470 pr_err("Memory type %u has not been initialized\n", mem_type);
1471 return 0;
1472 }
1473
1474 return ttm_bo_force_list_clean(bdev, mem_type);
1475}
1476EXPORT_SYMBOL(ttm_bo_evict_mm);
1477
1478int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1479 unsigned long p_size)
1480{
1481 int ret;
1482 struct ttm_mem_type_manager *man;
1483 unsigned i;
1484
1485 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1486 man = &bdev->man[type];
1487 BUG_ON(man->has_type);
1488 man->io_reserve_fastpath = true;
1489 man->use_io_reserve_lru = false;
1490 mutex_init(&man->io_reserve_mutex);
1491 spin_lock_init(&man->move_lock);
1492 INIT_LIST_HEAD(&man->io_reserve_lru);
1493
1494 ret = bdev->driver->init_mem_type(bdev, type, man);
1495 if (ret)
1496 return ret;
1497 man->bdev = bdev;
1498
1499 if (type != TTM_PL_SYSTEM) {
1500 ret = (*man->func->init)(man, p_size);
1501 if (ret)
1502 return ret;
1503 }
1504 man->has_type = true;
1505 man->use_type = true;
1506 man->size = p_size;
1507
1508 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1509 INIT_LIST_HEAD(&man->lru[i]);
1510 man->move = NULL;
1511
1512 return 0;
1513}
1514EXPORT_SYMBOL(ttm_bo_init_mm);
1515
1516static void ttm_bo_global_kobj_release(struct kobject *kobj)
1517{
1518 struct ttm_bo_global *glob =
1519 container_of(kobj, struct ttm_bo_global, kobj);
1520
1521 __free_page(glob->dummy_read_page);
1522 kfree(glob);
1523}
1524
1525void ttm_bo_global_release(struct drm_global_reference *ref)
1526{
1527 struct ttm_bo_global *glob = ref->object;
1528
1529 kobject_del(&glob->kobj);
1530 kobject_put(&glob->kobj);
1531}
1532EXPORT_SYMBOL(ttm_bo_global_release);
1533
1534int ttm_bo_global_init(struct drm_global_reference *ref)
1535{
1536 struct ttm_bo_global_ref *bo_ref =
1537 container_of(ref, struct ttm_bo_global_ref, ref);
1538 struct ttm_bo_global *glob = ref->object;
1539 int ret;
1540 unsigned i;
1541
1542 mutex_init(&glob->device_list_mutex);
1543 spin_lock_init(&glob->lru_lock);
1544 glob->mem_glob = bo_ref->mem_glob;
1545 glob->mem_glob->bo_glob = glob;
1546 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1547
1548 if (unlikely(glob->dummy_read_page == NULL)) {
1549 ret = -ENOMEM;
1550 goto out_no_drp;
1551 }
1552
1553 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1554 INIT_LIST_HEAD(&glob->swap_lru[i]);
1555 INIT_LIST_HEAD(&glob->device_list);
1556 atomic_set(&glob->bo_count, 0);
1557
1558 ret = kobject_init_and_add(
1559 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1560 if (unlikely(ret != 0))
1561 kobject_put(&glob->kobj);
1562 return ret;
1563out_no_drp:
1564 kfree(glob);
1565 return ret;
1566}
1567EXPORT_SYMBOL(ttm_bo_global_init);
1568
1569
1570int ttm_bo_device_release(struct ttm_bo_device *bdev)
1571{
1572 int ret = 0;
1573 unsigned i = TTM_NUM_MEM_TYPES;
1574 struct ttm_mem_type_manager *man;
1575 struct ttm_bo_global *glob = bdev->glob;
1576
1577 while (i--) {
1578 man = &bdev->man[i];
1579 if (man->has_type) {
1580 man->use_type = false;
1581 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1582 ret = -EBUSY;
1583 pr_err("DRM memory manager type %d is not clean\n",
1584 i);
1585 }
1586 man->has_type = false;
1587 }
1588 }
1589
1590 mutex_lock(&glob->device_list_mutex);
1591 list_del(&bdev->device_list);
1592 mutex_unlock(&glob->device_list_mutex);
1593
1594 cancel_delayed_work_sync(&bdev->wq);
1595
1596 if (ttm_bo_delayed_delete(bdev, true))
1597 pr_debug("Delayed destroy list was clean\n");
1598
1599 spin_lock(&glob->lru_lock);
1600 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1601 if (list_empty(&bdev->man[0].lru[0]))
1602 pr_debug("Swap list %d was clean\n", i);
1603 spin_unlock(&glob->lru_lock);
1604
1605 drm_vma_offset_manager_destroy(&bdev->vma_manager);
1606
1607 return ret;
1608}
1609EXPORT_SYMBOL(ttm_bo_device_release);
1610
1611int ttm_bo_device_init(struct ttm_bo_device *bdev,
1612 struct ttm_bo_global *glob,
1613 struct ttm_bo_driver *driver,
1614 struct address_space *mapping,
1615 uint64_t file_page_offset,
1616 bool need_dma32)
1617{
1618 int ret = -EINVAL;
1619
1620 bdev->driver = driver;
1621
1622 memset(bdev->man, 0, sizeof(bdev->man));
1623
1624
1625
1626
1627
1628 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1629 if (unlikely(ret != 0))
1630 goto out_no_sys;
1631
1632 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1633 0x10000000);
1634 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1635 INIT_LIST_HEAD(&bdev->ddestroy);
1636 bdev->dev_mapping = mapping;
1637 bdev->glob = glob;
1638 bdev->need_dma32 = need_dma32;
1639 mutex_lock(&glob->device_list_mutex);
1640 list_add_tail(&bdev->device_list, &glob->device_list);
1641 mutex_unlock(&glob->device_list_mutex);
1642
1643 return 0;
1644out_no_sys:
1645 return ret;
1646}
1647EXPORT_SYMBOL(ttm_bo_device_init);
1648
1649
1650
1651
1652
1653bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1654{
1655 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1656
1657 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1658 if (mem->mem_type == TTM_PL_SYSTEM)
1659 return false;
1660
1661 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1662 return false;
1663
1664 if (mem->placement & TTM_PL_FLAG_CACHED)
1665 return false;
1666 }
1667 return true;
1668}
1669
1670void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1671{
1672 struct ttm_bo_device *bdev = bo->bdev;
1673
1674 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1675 ttm_mem_io_free_vm(bo);
1676}
1677
1678void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1679{
1680 struct ttm_bo_device *bdev = bo->bdev;
1681 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1682
1683 ttm_mem_io_lock(man, false);
1684 ttm_bo_unmap_virtual_locked(bo);
1685 ttm_mem_io_unlock(man);
1686}
1687
1688
1689EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1690
1691int ttm_bo_wait(struct ttm_buffer_object *bo,
1692 bool interruptible, bool no_wait)
1693{
1694 long timeout = 15 * HZ;
1695
1696 if (no_wait) {
1697 if (reservation_object_test_signaled_rcu(bo->resv, true))
1698 return 0;
1699 else
1700 return -EBUSY;
1701 }
1702
1703 timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1704 interruptible, timeout);
1705 if (timeout < 0)
1706 return timeout;
1707
1708 if (timeout == 0)
1709 return -EBUSY;
1710
1711 reservation_object_add_excl_fence(bo->resv, NULL);
1712 return 0;
1713}
1714EXPORT_SYMBOL(ttm_bo_wait);
1715
1716int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1717{
1718 int ret = 0;
1719
1720
1721
1722
1723
1724 ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1725 if (unlikely(ret != 0))
1726 return ret;
1727 ret = ttm_bo_wait(bo, true, no_wait);
1728 if (likely(ret == 0))
1729 atomic_inc(&bo->cpu_writers);
1730 ttm_bo_unreserve(bo);
1731 return ret;
1732}
1733EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1734
1735void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1736{
1737 atomic_dec(&bo->cpu_writers);
1738}
1739EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1740
1741
1742
1743
1744
1745int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1746{
1747 struct ttm_buffer_object *bo;
1748 int ret = -EBUSY;
1749 bool locked;
1750 unsigned i;
1751
1752 spin_lock(&glob->lru_lock);
1753 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1754 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1755 if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
1756 ret = 0;
1757 break;
1758 }
1759 }
1760 if (!ret)
1761 break;
1762 }
1763
1764 if (ret) {
1765 spin_unlock(&glob->lru_lock);
1766 return ret;
1767 }
1768
1769 kref_get(&bo->list_kref);
1770
1771 if (!list_empty(&bo->ddestroy)) {
1772 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1773 kref_put(&bo->list_kref, ttm_bo_release_list);
1774 return ret;
1775 }
1776
1777 ttm_bo_del_from_lru(bo);
1778 spin_unlock(&glob->lru_lock);
1779
1780
1781
1782
1783
1784 if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1785 bo->ttm->caching_state != tt_cached) {
1786 struct ttm_operation_ctx ctx = { false, false };
1787 struct ttm_mem_reg evict_mem;
1788
1789 evict_mem = bo->mem;
1790 evict_mem.mm_node = NULL;
1791 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1792 evict_mem.mem_type = TTM_PL_SYSTEM;
1793
1794 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1795 if (unlikely(ret != 0))
1796 goto out;
1797 }
1798
1799
1800
1801
1802
1803 ret = ttm_bo_wait(bo, false, false);
1804 if (unlikely(ret != 0))
1805 goto out;
1806
1807 ttm_bo_unmap_virtual(bo);
1808
1809
1810
1811
1812
1813
1814 if (bo->bdev->driver->swap_notify)
1815 bo->bdev->driver->swap_notify(bo);
1816
1817 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1818out:
1819
1820
1821
1822
1823
1824
1825 if (locked)
1826 reservation_object_unlock(bo->resv);
1827 kref_put(&bo->list_kref, ttm_bo_release_list);
1828 return ret;
1829}
1830EXPORT_SYMBOL(ttm_bo_swapout);
1831
1832void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1833{
1834 struct ttm_operation_ctx ctx = {
1835 .interruptible = false,
1836 .no_wait_gpu = false
1837 };
1838
1839 while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1840 ;
1841}
1842EXPORT_SYMBOL(ttm_bo_swapout_all);
1843
1844
1845
1846
1847
1848
1849
1850int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1851{
1852 int ret;
1853
1854
1855
1856
1857
1858
1859
1860
1861 ret = mutex_lock_interruptible(&bo->wu_mutex);
1862 if (unlikely(ret != 0))
1863 return -ERESTARTSYS;
1864 if (!ww_mutex_is_locked(&bo->resv->lock))
1865 goto out_unlock;
1866 ret = reservation_object_lock_interruptible(bo->resv, NULL);
1867 if (ret == -EINTR)
1868 ret = -ERESTARTSYS;
1869 if (unlikely(ret != 0))
1870 goto out_unlock;
1871 reservation_object_unlock(bo->resv);
1872
1873out_unlock:
1874 mutex_unlock(&bo->wu_mutex);
1875 return ret;
1876}
1877