1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/io.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36
37#include <drm/drm_cache.h>
38#include <drm/drm_prime.h>
39#include <drm/radeon_drm.h>
40
41#include "radeon.h"
42#include "radeon_trace.h"
43#include "radeon_ttm.h"
44
45static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
46
47
48
49
50
51
52static void radeon_update_memory_usage(struct ttm_buffer_object *bo,
53 unsigned int mem_type, int sign)
54{
55 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
56
57 switch (mem_type) {
58 case TTM_PL_TT:
59 if (sign > 0)
60 atomic64_add(bo->base.size, &rdev->gtt_usage);
61 else
62 atomic64_sub(bo->base.size, &rdev->gtt_usage);
63 break;
64 case TTM_PL_VRAM:
65 if (sign > 0)
66 atomic64_add(bo->base.size, &rdev->vram_usage);
67 else
68 atomic64_sub(bo->base.size, &rdev->vram_usage);
69 break;
70 }
71}
72
73static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
74{
75 struct radeon_bo *bo;
76
77 bo = container_of(tbo, struct radeon_bo, tbo);
78
79 mutex_lock(&bo->rdev->gem.mutex);
80 list_del_init(&bo->list);
81 mutex_unlock(&bo->rdev->gem.mutex);
82 radeon_bo_clear_surface_reg(bo);
83 WARN_ON_ONCE(!list_empty(&bo->va));
84 if (bo->tbo.base.import_attach)
85 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
86 drm_gem_object_release(&bo->tbo.base);
87 kfree(bo);
88}
89
90bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
91{
92 if (bo->destroy == &radeon_ttm_bo_destroy)
93 return true;
94 return false;
95}
96
97void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
98{
99 u32 c = 0, i;
100
101 rbo->placement.placement = rbo->placements;
102 rbo->placement.busy_placement = rbo->placements;
103 if (domain & RADEON_GEM_DOMAIN_VRAM) {
104
105
106
107 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
108 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
109 rbo->placements[c].fpfn =
110 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
111 rbo->placements[c].mem_type = TTM_PL_VRAM;
112 rbo->placements[c++].flags = 0;
113 }
114
115 rbo->placements[c].fpfn = 0;
116 rbo->placements[c].mem_type = TTM_PL_VRAM;
117 rbo->placements[c++].flags = 0;
118 }
119
120 if (domain & RADEON_GEM_DOMAIN_GTT) {
121 rbo->placements[c].fpfn = 0;
122 rbo->placements[c].mem_type = TTM_PL_TT;
123 rbo->placements[c++].flags = 0;
124 }
125
126 if (domain & RADEON_GEM_DOMAIN_CPU) {
127 rbo->placements[c].fpfn = 0;
128 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
129 rbo->placements[c++].flags = 0;
130 }
131 if (!c) {
132 rbo->placements[c].fpfn = 0;
133 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
134 rbo->placements[c++].flags = 0;
135 }
136
137 rbo->placement.num_placement = c;
138 rbo->placement.num_busy_placement = c;
139
140 for (i = 0; i < c; ++i) {
141 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
142 (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
143 !rbo->placements[i].fpfn)
144 rbo->placements[i].lpfn =
145 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
146 else
147 rbo->placements[i].lpfn = 0;
148 }
149}
150
151int radeon_bo_create(struct radeon_device *rdev,
152 unsigned long size, int byte_align, bool kernel,
153 u32 domain, u32 flags, struct sg_table *sg,
154 struct dma_resv *resv,
155 struct radeon_bo **bo_ptr)
156{
157 struct radeon_bo *bo;
158 enum ttm_bo_type type;
159 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
160 int r;
161
162 size = ALIGN(size, PAGE_SIZE);
163
164 if (kernel) {
165 type = ttm_bo_type_kernel;
166 } else if (sg) {
167 type = ttm_bo_type_sg;
168 } else {
169 type = ttm_bo_type_device;
170 }
171 *bo_ptr = NULL;
172
173 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
174 if (bo == NULL)
175 return -ENOMEM;
176 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
177 bo->rdev = rdev;
178 bo->surface_reg = -1;
179 INIT_LIST_HEAD(&bo->list);
180 INIT_LIST_HEAD(&bo->va);
181 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
182 RADEON_GEM_DOMAIN_GTT |
183 RADEON_GEM_DOMAIN_CPU);
184
185 bo->flags = flags;
186
187 if (!(rdev->flags & RADEON_IS_PCIE))
188 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
189
190
191
192
193 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
194 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
195
196#ifdef CONFIG_X86_32
197
198
199
200 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
201#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
202
203
204
205
206#ifndef CONFIG_COMPILE_TEST
207#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
208 thanks to write-combining
209#endif
210
211 if (bo->flags & RADEON_GEM_GTT_WC)
212 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
213 "better performance thanks to write-combining\n");
214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
215#else
216
217
218
219 if (!drm_arch_can_wc_memory())
220 bo->flags &= ~RADEON_GEM_GTT_WC;
221#endif
222
223 radeon_ttm_placement_from_domain(bo, domain);
224
225 down_read(&rdev->pm.mclk_lock);
226 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
227 &bo->placement, page_align, !kernel, sg, resv,
228 &radeon_ttm_bo_destroy);
229 up_read(&rdev->pm.mclk_lock);
230 if (unlikely(r != 0)) {
231 return r;
232 }
233 *bo_ptr = bo;
234
235 trace_radeon_bo_create(bo);
236
237 return 0;
238}
239
240int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
241{
242 bool is_iomem;
243 int r;
244
245 if (bo->kptr) {
246 if (ptr) {
247 *ptr = bo->kptr;
248 }
249 return 0;
250 }
251 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
252 if (r) {
253 return r;
254 }
255 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
256 if (ptr) {
257 *ptr = bo->kptr;
258 }
259 radeon_bo_check_tiling(bo, 0, 0);
260 return 0;
261}
262
263void radeon_bo_kunmap(struct radeon_bo *bo)
264{
265 if (bo->kptr == NULL)
266 return;
267 bo->kptr = NULL;
268 radeon_bo_check_tiling(bo, 0, 0);
269 ttm_bo_kunmap(&bo->kmap);
270}
271
272struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
273{
274 if (bo == NULL)
275 return NULL;
276
277 ttm_bo_get(&bo->tbo);
278 return bo;
279}
280
281void radeon_bo_unref(struct radeon_bo **bo)
282{
283 struct ttm_buffer_object *tbo;
284
285 if ((*bo) == NULL)
286 return;
287 tbo = &((*bo)->tbo);
288 ttm_bo_put(tbo);
289 *bo = NULL;
290}
291
292int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
293 u64 *gpu_addr)
294{
295 struct ttm_operation_ctx ctx = { false, false };
296 int r, i;
297
298 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
299 return -EPERM;
300
301 if (bo->tbo.pin_count) {
302 ttm_bo_pin(&bo->tbo);
303 if (gpu_addr)
304 *gpu_addr = radeon_bo_gpu_offset(bo);
305
306 if (max_offset != 0) {
307 u64 domain_start;
308
309 if (domain == RADEON_GEM_DOMAIN_VRAM)
310 domain_start = bo->rdev->mc.vram_start;
311 else
312 domain_start = bo->rdev->mc.gtt_start;
313 WARN_ON_ONCE(max_offset <
314 (radeon_bo_gpu_offset(bo) - domain_start));
315 }
316
317 return 0;
318 }
319 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
320
321 return -EINVAL;
322 }
323
324 radeon_ttm_placement_from_domain(bo, domain);
325 for (i = 0; i < bo->placement.num_placement; i++) {
326
327 if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
328 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
329 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
330 bo->placements[i].lpfn =
331 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
332 else
333 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
334 }
335
336 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
337 if (likely(r == 0)) {
338 ttm_bo_pin(&bo->tbo);
339 if (gpu_addr != NULL)
340 *gpu_addr = radeon_bo_gpu_offset(bo);
341 if (domain == RADEON_GEM_DOMAIN_VRAM)
342 bo->rdev->vram_pin_size += radeon_bo_size(bo);
343 else
344 bo->rdev->gart_pin_size += radeon_bo_size(bo);
345 } else {
346 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
347 }
348 return r;
349}
350
351int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
352{
353 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
354}
355
356void radeon_bo_unpin(struct radeon_bo *bo)
357{
358 ttm_bo_unpin(&bo->tbo);
359 if (!bo->tbo.pin_count) {
360 if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
361 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
362 else
363 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
364 }
365}
366
367int radeon_bo_evict_vram(struct radeon_device *rdev)
368{
369 struct ttm_device *bdev = &rdev->mman.bdev;
370 struct ttm_resource_manager *man;
371
372
373#ifndef CONFIG_HIBERNATION
374 if (rdev->flags & RADEON_IS_IGP) {
375 if (rdev->mc.igp_sideport_enabled == false)
376
377 return 0;
378 }
379#endif
380 man = ttm_manager_type(bdev, TTM_PL_VRAM);
381 if (!man)
382 return 0;
383 return ttm_resource_manager_evict_all(bdev, man);
384}
385
386void radeon_bo_force_delete(struct radeon_device *rdev)
387{
388 struct radeon_bo *bo, *n;
389
390 if (list_empty(&rdev->gem.objects)) {
391 return;
392 }
393 dev_err(rdev->dev, "Userspace still has active objects !\n");
394 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
395 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
396 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
397 *((unsigned long *)&bo->tbo.base.refcount));
398 mutex_lock(&bo->rdev->gem.mutex);
399 list_del_init(&bo->list);
400 mutex_unlock(&bo->rdev->gem.mutex);
401
402 drm_gem_object_put(&bo->tbo.base);
403 }
404}
405
406int radeon_bo_init(struct radeon_device *rdev)
407{
408
409 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
410 rdev->mc.aper_size);
411
412
413 if (!rdev->fastfb_working) {
414 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
415 rdev->mc.aper_size);
416 }
417 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
418 rdev->mc.mc_vram_size >> 20,
419 (unsigned long long)rdev->mc.aper_size >> 20);
420 DRM_INFO("RAM width %dbits %cDR\n",
421 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
422 return radeon_ttm_init(rdev);
423}
424
425void radeon_bo_fini(struct radeon_device *rdev)
426{
427 radeon_ttm_fini(rdev);
428 arch_phys_wc_del(rdev->mc.vram_mtrr);
429 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
430}
431
432
433
434static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
435{
436 u64 real_vram_size = rdev->mc.real_vram_size;
437 u64 vram_usage = atomic64_read(&rdev->vram_usage);
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478 u64 half_vram = real_vram_size >> 1;
479 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
480 u64 bytes_moved_threshold = half_free_vram >> 1;
481 return max(bytes_moved_threshold, 1024*1024ull);
482}
483
484int radeon_bo_list_validate(struct radeon_device *rdev,
485 struct ww_acquire_ctx *ticket,
486 struct list_head *head, int ring)
487{
488 struct ttm_operation_ctx ctx = { true, false };
489 struct radeon_bo_list *lobj;
490 struct list_head duplicates;
491 int r;
492 u64 bytes_moved = 0, initial_bytes_moved;
493 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
494
495 INIT_LIST_HEAD(&duplicates);
496 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
497 if (unlikely(r != 0)) {
498 return r;
499 }
500
501 list_for_each_entry(lobj, head, tv.head) {
502 struct radeon_bo *bo = lobj->robj;
503 if (!bo->tbo.pin_count) {
504 u32 domain = lobj->preferred_domains;
505 u32 allowed = lobj->allowed_domains;
506 u32 current_domain =
507 radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
508
509
510
511
512
513
514
515
516
517 if ((allowed & current_domain) != 0 &&
518 (domain & current_domain) == 0 &&
519 bytes_moved > bytes_moved_threshold) {
520
521 domain = current_domain;
522 }
523
524 retry:
525 radeon_ttm_placement_from_domain(bo, domain);
526 if (ring == R600_RING_TYPE_UVD_INDEX)
527 radeon_uvd_force_into_uvd_segment(bo, allowed);
528
529 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
530 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
531 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
532 initial_bytes_moved;
533
534 if (unlikely(r)) {
535 if (r != -ERESTARTSYS &&
536 domain != lobj->allowed_domains) {
537 domain = lobj->allowed_domains;
538 goto retry;
539 }
540 ttm_eu_backoff_reservation(ticket, head);
541 return r;
542 }
543 }
544 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
545 lobj->tiling_flags = bo->tiling_flags;
546 }
547
548 list_for_each_entry(lobj, &duplicates, tv.head) {
549 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
550 lobj->tiling_flags = lobj->robj->tiling_flags;
551 }
552
553 return 0;
554}
555
556int radeon_bo_get_surface_reg(struct radeon_bo *bo)
557{
558 struct radeon_device *rdev = bo->rdev;
559 struct radeon_surface_reg *reg;
560 struct radeon_bo *old_object;
561 int steal;
562 int i;
563
564 dma_resv_assert_held(bo->tbo.base.resv);
565
566 if (!bo->tiling_flags)
567 return 0;
568
569 if (bo->surface_reg >= 0) {
570 reg = &rdev->surface_regs[bo->surface_reg];
571 i = bo->surface_reg;
572 goto out;
573 }
574
575 steal = -1;
576 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
577
578 reg = &rdev->surface_regs[i];
579 if (!reg->bo)
580 break;
581
582 old_object = reg->bo;
583 if (old_object->tbo.pin_count == 0)
584 steal = i;
585 }
586
587
588 if (i == RADEON_GEM_MAX_SURFACES) {
589 if (steal == -1)
590 return -ENOMEM;
591
592 reg = &rdev->surface_regs[steal];
593 old_object = reg->bo;
594
595 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
596 ttm_bo_unmap_virtual(&old_object->tbo);
597 old_object->surface_reg = -1;
598 i = steal;
599 }
600
601 bo->surface_reg = i;
602 reg->bo = bo;
603
604out:
605 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
606 bo->tbo.resource->start << PAGE_SHIFT,
607 bo->tbo.base.size);
608 return 0;
609}
610
611static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
612{
613 struct radeon_device *rdev = bo->rdev;
614 struct radeon_surface_reg *reg;
615
616 if (bo->surface_reg == -1)
617 return;
618
619 reg = &rdev->surface_regs[bo->surface_reg];
620 radeon_clear_surface_reg(rdev, bo->surface_reg);
621
622 reg->bo = NULL;
623 bo->surface_reg = -1;
624}
625
626int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
627 uint32_t tiling_flags, uint32_t pitch)
628{
629 struct radeon_device *rdev = bo->rdev;
630 int r;
631
632 if (rdev->family >= CHIP_CEDAR) {
633 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
634
635 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
636 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
637 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
638 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
639 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
640 switch (bankw) {
641 case 0:
642 case 1:
643 case 2:
644 case 4:
645 case 8:
646 break;
647 default:
648 return -EINVAL;
649 }
650 switch (bankh) {
651 case 0:
652 case 1:
653 case 2:
654 case 4:
655 case 8:
656 break;
657 default:
658 return -EINVAL;
659 }
660 switch (mtaspect) {
661 case 0:
662 case 1:
663 case 2:
664 case 4:
665 case 8:
666 break;
667 default:
668 return -EINVAL;
669 }
670 if (tilesplit > 6) {
671 return -EINVAL;
672 }
673 if (stilesplit > 6) {
674 return -EINVAL;
675 }
676 }
677 r = radeon_bo_reserve(bo, false);
678 if (unlikely(r != 0))
679 return r;
680 bo->tiling_flags = tiling_flags;
681 bo->pitch = pitch;
682 radeon_bo_unreserve(bo);
683 return 0;
684}
685
686void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
687 uint32_t *tiling_flags,
688 uint32_t *pitch)
689{
690 dma_resv_assert_held(bo->tbo.base.resv);
691
692 if (tiling_flags)
693 *tiling_flags = bo->tiling_flags;
694 if (pitch)
695 *pitch = bo->pitch;
696}
697
698int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
699 bool force_drop)
700{
701 if (!force_drop)
702 dma_resv_assert_held(bo->tbo.base.resv);
703
704 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
705 return 0;
706
707 if (force_drop) {
708 radeon_bo_clear_surface_reg(bo);
709 return 0;
710 }
711
712 if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
713 if (!has_moved)
714 return 0;
715
716 if (bo->surface_reg >= 0)
717 radeon_bo_clear_surface_reg(bo);
718 return 0;
719 }
720
721 if ((bo->surface_reg >= 0) && !has_moved)
722 return 0;
723
724 return radeon_bo_get_surface_reg(bo);
725}
726
727void radeon_bo_move_notify(struct ttm_buffer_object *bo,
728 unsigned int old_type,
729 struct ttm_resource *new_mem)
730{
731 struct radeon_bo *rbo;
732
733 radeon_update_memory_usage(bo, old_type, -1);
734 if (new_mem)
735 radeon_update_memory_usage(bo, new_mem->mem_type, 1);
736
737 if (!radeon_ttm_bo_is_radeon_bo(bo))
738 return;
739
740 rbo = container_of(bo, struct radeon_bo, tbo);
741 radeon_bo_check_tiling(rbo, 0, 1);
742 radeon_vm_bo_invalidate(rbo->rdev, rbo);
743}
744
745vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
746{
747 struct ttm_operation_ctx ctx = { false, false };
748 struct radeon_device *rdev;
749 struct radeon_bo *rbo;
750 unsigned long offset, size, lpfn;
751 int i, r;
752
753 if (!radeon_ttm_bo_is_radeon_bo(bo))
754 return 0;
755 rbo = container_of(bo, struct radeon_bo, tbo);
756 radeon_bo_check_tiling(rbo, 0, 0);
757 rdev = rbo->rdev;
758 if (bo->resource->mem_type != TTM_PL_VRAM)
759 return 0;
760
761 size = bo->resource->num_pages << PAGE_SHIFT;
762 offset = bo->resource->start << PAGE_SHIFT;
763 if ((offset + size) <= rdev->mc.visible_vram_size)
764 return 0;
765
766
767 if (rbo->tbo.pin_count > 0)
768 return VM_FAULT_SIGBUS;
769
770
771 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
772 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
773 for (i = 0; i < rbo->placement.num_placement; i++) {
774
775 if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
776 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
777 rbo->placements[i].lpfn = lpfn;
778 }
779 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
780 if (unlikely(r == -ENOMEM)) {
781 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
782 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
783 } else if (likely(!r)) {
784 offset = bo->resource->start << PAGE_SHIFT;
785
786 if ((offset + size) > rdev->mc.visible_vram_size)
787 return VM_FAULT_SIGBUS;
788 }
789
790 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
791 return VM_FAULT_NOPAGE;
792 else if (unlikely(r))
793 return VM_FAULT_SIGBUS;
794
795 ttm_bo_move_to_lru_tail_unlocked(bo);
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
808 bool shared)
809{
810 struct dma_resv *resv = bo->tbo.base.resv;
811
812 if (shared)
813 dma_resv_add_shared_fence(resv, &fence->base);
814 else
815 dma_resv_add_excl_fence(resv, &fence->base);
816}
817