1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/io.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36
37#include <drm/drm_cache.h>
38#include <drm/drm_prime.h>
39#include <drm/radeon_drm.h>
40
41#include "radeon.h"
42#include "radeon_trace.h"
43#include "radeon_ttm.h"
44
45static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
46
47
48
49
50
51
52static void radeon_update_memory_usage(struct radeon_bo *bo,
53 unsigned mem_type, int sign)
54{
55 struct radeon_device *rdev = bo->rdev;
56
57 switch (mem_type) {
58 case TTM_PL_TT:
59 if (sign > 0)
60 atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
61 else
62 atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
63 break;
64 case TTM_PL_VRAM:
65 if (sign > 0)
66 atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
67 else
68 atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
69 break;
70 }
71}
72
73static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
74{
75 struct radeon_bo *bo;
76
77 bo = container_of(tbo, struct radeon_bo, tbo);
78
79 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
80
81 mutex_lock(&bo->rdev->gem.mutex);
82 list_del_init(&bo->list);
83 mutex_unlock(&bo->rdev->gem.mutex);
84 radeon_bo_clear_surface_reg(bo);
85 WARN_ON_ONCE(!list_empty(&bo->va));
86 if (bo->tbo.base.import_attach)
87 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
88 drm_gem_object_release(&bo->tbo.base);
89 kfree(bo);
90}
91
92bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
93{
94 if (bo->destroy == &radeon_ttm_bo_destroy)
95 return true;
96 return false;
97}
98
99void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
100{
101 u32 c = 0, i;
102
103 rbo->placement.placement = rbo->placements;
104 rbo->placement.busy_placement = rbo->placements;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106
107
108
109 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
110 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
111 rbo->placements[c].fpfn =
112 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
113 rbo->placements[c].mem_type = TTM_PL_VRAM;
114 rbo->placements[c++].flags = 0;
115 }
116
117 rbo->placements[c].fpfn = 0;
118 rbo->placements[c].mem_type = TTM_PL_VRAM;
119 rbo->placements[c++].flags = 0;
120 }
121
122 if (domain & RADEON_GEM_DOMAIN_GTT) {
123 rbo->placements[c].fpfn = 0;
124 rbo->placements[c].mem_type = TTM_PL_TT;
125 rbo->placements[c++].flags = 0;
126 }
127
128 if (domain & RADEON_GEM_DOMAIN_CPU) {
129 rbo->placements[c].fpfn = 0;
130 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
131 rbo->placements[c++].flags = 0;
132 }
133 if (!c) {
134 rbo->placements[c].fpfn = 0;
135 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
136 rbo->placements[c++].flags = 0;
137 }
138
139 rbo->placement.num_placement = c;
140 rbo->placement.num_busy_placement = c;
141
142 for (i = 0; i < c; ++i) {
143 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
144 (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
145 !rbo->placements[i].fpfn)
146 rbo->placements[i].lpfn =
147 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
148 else
149 rbo->placements[i].lpfn = 0;
150 }
151}
152
153int radeon_bo_create(struct radeon_device *rdev,
154 unsigned long size, int byte_align, bool kernel,
155 u32 domain, u32 flags, struct sg_table *sg,
156 struct dma_resv *resv,
157 struct radeon_bo **bo_ptr)
158{
159 struct radeon_bo *bo;
160 enum ttm_bo_type type;
161 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
162 int r;
163
164 size = ALIGN(size, PAGE_SIZE);
165
166 if (kernel) {
167 type = ttm_bo_type_kernel;
168 } else if (sg) {
169 type = ttm_bo_type_sg;
170 } else {
171 type = ttm_bo_type_device;
172 }
173 *bo_ptr = NULL;
174
175 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
176 if (bo == NULL)
177 return -ENOMEM;
178 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
179 bo->rdev = rdev;
180 bo->surface_reg = -1;
181 INIT_LIST_HEAD(&bo->list);
182 INIT_LIST_HEAD(&bo->va);
183 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
184 RADEON_GEM_DOMAIN_GTT |
185 RADEON_GEM_DOMAIN_CPU);
186
187 bo->flags = flags;
188
189 if (!(rdev->flags & RADEON_IS_PCIE))
190 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
191
192
193
194
195 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
196 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
197
198#ifdef CONFIG_X86_32
199
200
201
202 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
203#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
204
205
206
207
208#ifndef CONFIG_COMPILE_TEST
209#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
210 thanks to write-combining
211#endif
212
213 if (bo->flags & RADEON_GEM_GTT_WC)
214 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
215 "better performance thanks to write-combining\n");
216 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
217#else
218
219
220
221 if (!drm_arch_can_wc_memory())
222 bo->flags &= ~RADEON_GEM_GTT_WC;
223#endif
224
225 radeon_ttm_placement_from_domain(bo, domain);
226
227 down_read(&rdev->pm.mclk_lock);
228 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
229 &bo->placement, page_align, !kernel, sg, resv,
230 &radeon_ttm_bo_destroy);
231 up_read(&rdev->pm.mclk_lock);
232 if (unlikely(r != 0)) {
233 return r;
234 }
235 *bo_ptr = bo;
236
237 trace_radeon_bo_create(bo);
238
239 return 0;
240}
241
242int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
243{
244 bool is_iomem;
245 int r;
246
247 if (bo->kptr) {
248 if (ptr) {
249 *ptr = bo->kptr;
250 }
251 return 0;
252 }
253 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
254 if (r) {
255 return r;
256 }
257 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
258 if (ptr) {
259 *ptr = bo->kptr;
260 }
261 radeon_bo_check_tiling(bo, 0, 0);
262 return 0;
263}
264
265void radeon_bo_kunmap(struct radeon_bo *bo)
266{
267 if (bo->kptr == NULL)
268 return;
269 bo->kptr = NULL;
270 radeon_bo_check_tiling(bo, 0, 0);
271 ttm_bo_kunmap(&bo->kmap);
272}
273
274struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
275{
276 if (bo == NULL)
277 return NULL;
278
279 ttm_bo_get(&bo->tbo);
280 return bo;
281}
282
283void radeon_bo_unref(struct radeon_bo **bo)
284{
285 struct ttm_buffer_object *tbo;
286
287 if ((*bo) == NULL)
288 return;
289 tbo = &((*bo)->tbo);
290 ttm_bo_put(tbo);
291 *bo = NULL;
292}
293
294int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
295 u64 *gpu_addr)
296{
297 struct ttm_operation_ctx ctx = { false, false };
298 int r, i;
299
300 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
301 return -EPERM;
302
303 if (bo->tbo.pin_count) {
304 ttm_bo_pin(&bo->tbo);
305 if (gpu_addr)
306 *gpu_addr = radeon_bo_gpu_offset(bo);
307
308 if (max_offset != 0) {
309 u64 domain_start;
310
311 if (domain == RADEON_GEM_DOMAIN_VRAM)
312 domain_start = bo->rdev->mc.vram_start;
313 else
314 domain_start = bo->rdev->mc.gtt_start;
315 WARN_ON_ONCE(max_offset <
316 (radeon_bo_gpu_offset(bo) - domain_start));
317 }
318
319 return 0;
320 }
321 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
322
323 return -EINVAL;
324 }
325
326 radeon_ttm_placement_from_domain(bo, domain);
327 for (i = 0; i < bo->placement.num_placement; i++) {
328
329 if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
330 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
331 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
332 bo->placements[i].lpfn =
333 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
334 else
335 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
336 }
337
338 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
339 if (likely(r == 0)) {
340 ttm_bo_pin(&bo->tbo);
341 if (gpu_addr != NULL)
342 *gpu_addr = radeon_bo_gpu_offset(bo);
343 if (domain == RADEON_GEM_DOMAIN_VRAM)
344 bo->rdev->vram_pin_size += radeon_bo_size(bo);
345 else
346 bo->rdev->gart_pin_size += radeon_bo_size(bo);
347 } else {
348 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
349 }
350 return r;
351}
352
353int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
354{
355 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
356}
357
358void radeon_bo_unpin(struct radeon_bo *bo)
359{
360 ttm_bo_unpin(&bo->tbo);
361 if (!bo->tbo.pin_count) {
362 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
363 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
364 else
365 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
366 }
367}
368
369int radeon_bo_evict_vram(struct radeon_device *rdev)
370{
371 struct ttm_device *bdev = &rdev->mman.bdev;
372 struct ttm_resource_manager *man;
373
374
375#ifndef CONFIG_HIBERNATION
376 if (rdev->flags & RADEON_IS_IGP) {
377 if (rdev->mc.igp_sideport_enabled == false)
378
379 return 0;
380 }
381#endif
382 man = ttm_manager_type(bdev, TTM_PL_VRAM);
383 if (!man)
384 return 0;
385 return ttm_resource_manager_evict_all(bdev, man);
386}
387
388void radeon_bo_force_delete(struct radeon_device *rdev)
389{
390 struct radeon_bo *bo, *n;
391
392 if (list_empty(&rdev->gem.objects)) {
393 return;
394 }
395 dev_err(rdev->dev, "Userspace still has active objects !\n");
396 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
397 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
398 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
399 *((unsigned long *)&bo->tbo.base.refcount));
400 mutex_lock(&bo->rdev->gem.mutex);
401 list_del_init(&bo->list);
402 mutex_unlock(&bo->rdev->gem.mutex);
403
404 drm_gem_object_put(&bo->tbo.base);
405 }
406}
407
408int radeon_bo_init(struct radeon_device *rdev)
409{
410
411 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
412 rdev->mc.aper_size);
413
414
415 if (!rdev->fastfb_working) {
416 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
417 rdev->mc.aper_size);
418 }
419 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
420 rdev->mc.mc_vram_size >> 20,
421 (unsigned long long)rdev->mc.aper_size >> 20);
422 DRM_INFO("RAM width %dbits %cDR\n",
423 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
424 return radeon_ttm_init(rdev);
425}
426
427void radeon_bo_fini(struct radeon_device *rdev)
428{
429 radeon_ttm_fini(rdev);
430 arch_phys_wc_del(rdev->mc.vram_mtrr);
431 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
432}
433
434
435
436static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
437{
438 u64 real_vram_size = rdev->mc.real_vram_size;
439 u64 vram_usage = atomic64_read(&rdev->vram_usage);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 u64 half_vram = real_vram_size >> 1;
481 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
482 u64 bytes_moved_threshold = half_free_vram >> 1;
483 return max(bytes_moved_threshold, 1024*1024ull);
484}
485
486int radeon_bo_list_validate(struct radeon_device *rdev,
487 struct ww_acquire_ctx *ticket,
488 struct list_head *head, int ring)
489{
490 struct ttm_operation_ctx ctx = { true, false };
491 struct radeon_bo_list *lobj;
492 struct list_head duplicates;
493 int r;
494 u64 bytes_moved = 0, initial_bytes_moved;
495 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
496
497 INIT_LIST_HEAD(&duplicates);
498 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
499 if (unlikely(r != 0)) {
500 return r;
501 }
502
503 list_for_each_entry(lobj, head, tv.head) {
504 struct radeon_bo *bo = lobj->robj;
505 if (!bo->tbo.pin_count) {
506 u32 domain = lobj->preferred_domains;
507 u32 allowed = lobj->allowed_domains;
508 u32 current_domain =
509 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
510
511
512
513
514
515
516
517
518
519 if ((allowed & current_domain) != 0 &&
520 (domain & current_domain) == 0 &&
521 bytes_moved > bytes_moved_threshold) {
522
523 domain = current_domain;
524 }
525
526 retry:
527 radeon_ttm_placement_from_domain(bo, domain);
528 if (ring == R600_RING_TYPE_UVD_INDEX)
529 radeon_uvd_force_into_uvd_segment(bo, allowed);
530
531 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
532 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
533 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
534 initial_bytes_moved;
535
536 if (unlikely(r)) {
537 if (r != -ERESTARTSYS &&
538 domain != lobj->allowed_domains) {
539 domain = lobj->allowed_domains;
540 goto retry;
541 }
542 ttm_eu_backoff_reservation(ticket, head);
543 return r;
544 }
545 }
546 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
547 lobj->tiling_flags = bo->tiling_flags;
548 }
549
550 list_for_each_entry(lobj, &duplicates, tv.head) {
551 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
552 lobj->tiling_flags = lobj->robj->tiling_flags;
553 }
554
555 return 0;
556}
557
558int radeon_bo_get_surface_reg(struct radeon_bo *bo)
559{
560 struct radeon_device *rdev = bo->rdev;
561 struct radeon_surface_reg *reg;
562 struct radeon_bo *old_object;
563 int steal;
564 int i;
565
566 dma_resv_assert_held(bo->tbo.base.resv);
567
568 if (!bo->tiling_flags)
569 return 0;
570
571 if (bo->surface_reg >= 0) {
572 reg = &rdev->surface_regs[bo->surface_reg];
573 i = bo->surface_reg;
574 goto out;
575 }
576
577 steal = -1;
578 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
579
580 reg = &rdev->surface_regs[i];
581 if (!reg->bo)
582 break;
583
584 old_object = reg->bo;
585 if (old_object->tbo.pin_count == 0)
586 steal = i;
587 }
588
589
590 if (i == RADEON_GEM_MAX_SURFACES) {
591 if (steal == -1)
592 return -ENOMEM;
593
594 reg = &rdev->surface_regs[steal];
595 old_object = reg->bo;
596
597 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
598 ttm_bo_unmap_virtual(&old_object->tbo);
599 old_object->surface_reg = -1;
600 i = steal;
601 }
602
603 bo->surface_reg = i;
604 reg->bo = bo;
605
606out:
607 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
608 bo->tbo.mem.start << PAGE_SHIFT,
609 bo->tbo.base.size);
610 return 0;
611}
612
613static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
614{
615 struct radeon_device *rdev = bo->rdev;
616 struct radeon_surface_reg *reg;
617
618 if (bo->surface_reg == -1)
619 return;
620
621 reg = &rdev->surface_regs[bo->surface_reg];
622 radeon_clear_surface_reg(rdev, bo->surface_reg);
623
624 reg->bo = NULL;
625 bo->surface_reg = -1;
626}
627
628int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
629 uint32_t tiling_flags, uint32_t pitch)
630{
631 struct radeon_device *rdev = bo->rdev;
632 int r;
633
634 if (rdev->family >= CHIP_CEDAR) {
635 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
636
637 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
638 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
639 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
640 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
641 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
642 switch (bankw) {
643 case 0:
644 case 1:
645 case 2:
646 case 4:
647 case 8:
648 break;
649 default:
650 return -EINVAL;
651 }
652 switch (bankh) {
653 case 0:
654 case 1:
655 case 2:
656 case 4:
657 case 8:
658 break;
659 default:
660 return -EINVAL;
661 }
662 switch (mtaspect) {
663 case 0:
664 case 1:
665 case 2:
666 case 4:
667 case 8:
668 break;
669 default:
670 return -EINVAL;
671 }
672 if (tilesplit > 6) {
673 return -EINVAL;
674 }
675 if (stilesplit > 6) {
676 return -EINVAL;
677 }
678 }
679 r = radeon_bo_reserve(bo, false);
680 if (unlikely(r != 0))
681 return r;
682 bo->tiling_flags = tiling_flags;
683 bo->pitch = pitch;
684 radeon_bo_unreserve(bo);
685 return 0;
686}
687
688void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
689 uint32_t *tiling_flags,
690 uint32_t *pitch)
691{
692 dma_resv_assert_held(bo->tbo.base.resv);
693
694 if (tiling_flags)
695 *tiling_flags = bo->tiling_flags;
696 if (pitch)
697 *pitch = bo->pitch;
698}
699
700int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
701 bool force_drop)
702{
703 if (!force_drop)
704 dma_resv_assert_held(bo->tbo.base.resv);
705
706 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
707 return 0;
708
709 if (force_drop) {
710 radeon_bo_clear_surface_reg(bo);
711 return 0;
712 }
713
714 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
715 if (!has_moved)
716 return 0;
717
718 if (bo->surface_reg >= 0)
719 radeon_bo_clear_surface_reg(bo);
720 return 0;
721 }
722
723 if ((bo->surface_reg >= 0) && !has_moved)
724 return 0;
725
726 return radeon_bo_get_surface_reg(bo);
727}
728
729void radeon_bo_move_notify(struct ttm_buffer_object *bo,
730 bool evict,
731 struct ttm_resource *new_mem)
732{
733 struct radeon_bo *rbo;
734
735 if (!radeon_ttm_bo_is_radeon_bo(bo))
736 return;
737
738 rbo = container_of(bo, struct radeon_bo, tbo);
739 radeon_bo_check_tiling(rbo, 0, 1);
740 radeon_vm_bo_invalidate(rbo->rdev, rbo);
741
742
743 if (!new_mem)
744 return;
745
746 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
747 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
748}
749
750vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
751{
752 struct ttm_operation_ctx ctx = { false, false };
753 struct radeon_device *rdev;
754 struct radeon_bo *rbo;
755 unsigned long offset, size, lpfn;
756 int i, r;
757
758 if (!radeon_ttm_bo_is_radeon_bo(bo))
759 return 0;
760 rbo = container_of(bo, struct radeon_bo, tbo);
761 radeon_bo_check_tiling(rbo, 0, 0);
762 rdev = rbo->rdev;
763 if (bo->mem.mem_type != TTM_PL_VRAM)
764 return 0;
765
766 size = bo->mem.num_pages << PAGE_SHIFT;
767 offset = bo->mem.start << PAGE_SHIFT;
768 if ((offset + size) <= rdev->mc.visible_vram_size)
769 return 0;
770
771
772 if (rbo->tbo.pin_count > 0)
773 return VM_FAULT_SIGBUS;
774
775
776 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
777 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
778 for (i = 0; i < rbo->placement.num_placement; i++) {
779
780 if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
781 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
782 rbo->placements[i].lpfn = lpfn;
783 }
784 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
785 if (unlikely(r == -ENOMEM)) {
786 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
787 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
788 } else if (likely(!r)) {
789 offset = bo->mem.start << PAGE_SHIFT;
790
791 if ((offset + size) > rdev->mc.visible_vram_size)
792 return VM_FAULT_SIGBUS;
793 }
794
795 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
796 return VM_FAULT_NOPAGE;
797 else if (unlikely(r))
798 return VM_FAULT_SIGBUS;
799
800 ttm_bo_move_to_lru_tail_unlocked(bo);
801 return 0;
802}
803
804
805
806
807
808
809
810
811
812void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
813 bool shared)
814{
815 struct dma_resv *resv = bo->tbo.base.resv;
816
817 if (shared)
818 dma_resv_add_shared_fence(resv, &fence->base);
819 else
820 dma_resv_add_excl_fence(resv, &fence->base);
821}
822