1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/iosys-map.h>
30#include <linux/pci.h>
31
32#include <drm/drm_device.h>
33#include <drm/drm_file.h>
34#include <drm/drm_gem_ttm_helper.h>
35#include <drm/radeon_drm.h>
36
37#include "radeon.h"
38#include "radeon_prime.h"
39
40struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 int flags);
42struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43int radeon_gem_prime_pin(struct drm_gem_object *obj);
44void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45
46const struct drm_gem_object_funcs radeon_gem_object_funcs;
47
48static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
49{
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
52 vm_fault_t ret;
53
54 down_read(&rdev->pm.mclk_lock);
55
56 ret = ttm_bo_vm_reserve(bo, vmf);
57 if (ret)
58 goto unlock_mclk;
59
60 ret = radeon_bo_fault_reserve_notify(bo);
61 if (ret)
62 goto unlock_resv;
63
64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 TTM_BO_VM_NUM_PREFAULT);
66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
67 goto unlock_mclk;
68
69unlock_resv:
70 dma_resv_unlock(bo->base.resv);
71
72unlock_mclk:
73 up_read(&rdev->pm.mclk_lock);
74 return ret;
75}
76
77static const struct vm_operations_struct radeon_gem_vm_ops = {
78 .fault = radeon_gem_fault,
79 .open = ttm_bo_vm_open,
80 .close = ttm_bo_vm_close,
81 .access = ttm_bo_vm_access
82};
83
84static void radeon_gem_object_free(struct drm_gem_object *gobj)
85{
86 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
87
88 if (robj) {
89 radeon_mn_unregister(robj);
90 radeon_bo_unref(&robj);
91 }
92}
93
94int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
95 int alignment, int initial_domain,
96 u32 flags, bool kernel,
97 struct drm_gem_object **obj)
98{
99 struct radeon_bo *robj;
100 unsigned long max_size;
101 int r;
102
103 *obj = NULL;
104
105 if (alignment < PAGE_SIZE) {
106 alignment = PAGE_SIZE;
107 }
108
109
110
111
112 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113 if (size > max_size) {
114 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115 size >> 20, max_size >> 20);
116 return -ENOMEM;
117 }
118
119retry:
120 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121 flags, NULL, NULL, &robj);
122 if (r) {
123 if (r != -ERESTARTSYS) {
124 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125 initial_domain |= RADEON_GEM_DOMAIN_GTT;
126 goto retry;
127 }
128 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129 size, initial_domain, alignment, r);
130 }
131 return r;
132 }
133 *obj = &robj->tbo.base;
134 (*obj)->funcs = &radeon_gem_object_funcs;
135 robj->pid = task_pid_nr(current);
136
137 mutex_lock(&rdev->gem.mutex);
138 list_add_tail(&robj->list, &rdev->gem.objects);
139 mutex_unlock(&rdev->gem.mutex);
140
141 return 0;
142}
143
144static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145 uint32_t rdomain, uint32_t wdomain)
146{
147 struct radeon_bo *robj;
148 uint32_t domain;
149 long r;
150
151
152 robj = gem_to_radeon_bo(gobj);
153
154 domain = wdomain;
155 if (!domain) {
156 domain = rdomain;
157 }
158 if (!domain) {
159
160 pr_warn("Set domain without domain !\n");
161 return 0;
162 }
163 if (domain == RADEON_GEM_DOMAIN_CPU) {
164
165 r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
166 if (!r)
167 r = -EBUSY;
168
169 if (r < 0 && r != -EINTR) {
170 pr_err("Failed to wait for object: %li\n", r);
171 return r;
172 }
173 }
174 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
175
176 return -EINVAL;
177 }
178 return 0;
179}
180
181int radeon_gem_init(struct radeon_device *rdev)
182{
183 INIT_LIST_HEAD(&rdev->gem.objects);
184 return 0;
185}
186
187void radeon_gem_fini(struct radeon_device *rdev)
188{
189 radeon_bo_force_delete(rdev);
190}
191
192
193
194
195
196static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
197{
198 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
199 struct radeon_device *rdev = rbo->rdev;
200 struct radeon_fpriv *fpriv = file_priv->driver_priv;
201 struct radeon_vm *vm = &fpriv->vm;
202 struct radeon_bo_va *bo_va;
203 int r;
204
205 if ((rdev->family < CHIP_CAYMAN) ||
206 (!rdev->accel_working)) {
207 return 0;
208 }
209
210 r = radeon_bo_reserve(rbo, false);
211 if (r) {
212 return r;
213 }
214
215 bo_va = radeon_vm_bo_find(vm, rbo);
216 if (!bo_va) {
217 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
218 } else {
219 ++bo_va->ref_count;
220 }
221 radeon_bo_unreserve(rbo);
222
223 return 0;
224}
225
226static void radeon_gem_object_close(struct drm_gem_object *obj,
227 struct drm_file *file_priv)
228{
229 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
230 struct radeon_device *rdev = rbo->rdev;
231 struct radeon_fpriv *fpriv = file_priv->driver_priv;
232 struct radeon_vm *vm = &fpriv->vm;
233 struct radeon_bo_va *bo_va;
234 int r;
235
236 if ((rdev->family < CHIP_CAYMAN) ||
237 (!rdev->accel_working)) {
238 return;
239 }
240
241 r = radeon_bo_reserve(rbo, true);
242 if (r) {
243 dev_err(rdev->dev, "leaking bo va because "
244 "we fail to reserve bo (%d)\n", r);
245 return;
246 }
247 bo_va = radeon_vm_bo_find(vm, rbo);
248 if (bo_va) {
249 if (--bo_va->ref_count == 0) {
250 radeon_vm_bo_rmv(rdev, bo_va);
251 }
252 }
253 radeon_bo_unreserve(rbo);
254}
255
256static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
257{
258 if (r == -EDEADLK) {
259 r = radeon_gpu_reset(rdev);
260 if (!r)
261 r = -EAGAIN;
262 }
263 return r;
264}
265
266static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
267{
268 struct radeon_bo *bo = gem_to_radeon_bo(obj);
269 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
270
271 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
272 return -EPERM;
273
274 return drm_gem_ttm_mmap(obj, vma);
275}
276
277const struct drm_gem_object_funcs radeon_gem_object_funcs = {
278 .free = radeon_gem_object_free,
279 .open = radeon_gem_object_open,
280 .close = radeon_gem_object_close,
281 .export = radeon_gem_prime_export,
282 .pin = radeon_gem_prime_pin,
283 .unpin = radeon_gem_prime_unpin,
284 .get_sg_table = radeon_gem_prime_get_sg_table,
285 .vmap = drm_gem_ttm_vmap,
286 .vunmap = drm_gem_ttm_vunmap,
287 .mmap = radeon_gem_object_mmap,
288 .vm_ops = &radeon_gem_vm_ops,
289};
290
291
292
293
294int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *filp)
296{
297 struct radeon_device *rdev = dev->dev_private;
298 struct drm_radeon_gem_info *args = data;
299 struct ttm_resource_manager *man;
300
301 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
302
303 args->vram_size = (u64)man->size << PAGE_SHIFT;
304 args->vram_visible = rdev->mc.visible_vram_size;
305 args->vram_visible -= rdev->vram_pin_size;
306 args->gart_size = rdev->mc.gtt_size;
307 args->gart_size -= rdev->gart_pin_size;
308
309 return 0;
310}
311
312int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
313 struct drm_file *filp)
314{
315
316 DRM_ERROR("unimplemented %s\n", __func__);
317 return -ENOSYS;
318}
319
320int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
321 struct drm_file *filp)
322{
323
324 DRM_ERROR("unimplemented %s\n", __func__);
325 return -ENOSYS;
326}
327
328int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *filp)
330{
331 struct radeon_device *rdev = dev->dev_private;
332 struct drm_radeon_gem_create *args = data;
333 struct drm_gem_object *gobj;
334 uint32_t handle;
335 int r;
336
337 down_read(&rdev->exclusive_lock);
338
339 args->size = roundup(args->size, PAGE_SIZE);
340 r = radeon_gem_object_create(rdev, args->size, args->alignment,
341 args->initial_domain, args->flags,
342 false, &gobj);
343 if (r) {
344 up_read(&rdev->exclusive_lock);
345 r = radeon_gem_handle_lockup(rdev, r);
346 return r;
347 }
348 r = drm_gem_handle_create(filp, gobj, &handle);
349
350 drm_gem_object_put(gobj);
351 if (r) {
352 up_read(&rdev->exclusive_lock);
353 r = radeon_gem_handle_lockup(rdev, r);
354 return r;
355 }
356 args->handle = handle;
357 up_read(&rdev->exclusive_lock);
358 return 0;
359}
360
361int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *filp)
363{
364 struct ttm_operation_ctx ctx = { true, false };
365 struct radeon_device *rdev = dev->dev_private;
366 struct drm_radeon_gem_userptr *args = data;
367 struct drm_gem_object *gobj;
368 struct radeon_bo *bo;
369 uint32_t handle;
370 int r;
371
372 args->addr = untagged_addr(args->addr);
373
374 if (offset_in_page(args->addr | args->size))
375 return -EINVAL;
376
377
378 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
379 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
380 RADEON_GEM_USERPTR_REGISTER))
381 return -EINVAL;
382
383 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
384
385 if (rdev->family < CHIP_R600)
386 return -EINVAL;
387
388 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
389 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
390
391
392
393 return -EACCES;
394 }
395
396 down_read(&rdev->exclusive_lock);
397
398
399 r = radeon_gem_object_create(rdev, args->size, 0,
400 RADEON_GEM_DOMAIN_CPU, 0,
401 false, &gobj);
402 if (r)
403 goto handle_lockup;
404
405 bo = gem_to_radeon_bo(gobj);
406 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
407 if (r)
408 goto release_object;
409
410 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
411 r = radeon_mn_register(bo, args->addr);
412 if (r)
413 goto release_object;
414 }
415
416 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
417 mmap_read_lock(current->mm);
418 r = radeon_bo_reserve(bo, true);
419 if (r) {
420 mmap_read_unlock(current->mm);
421 goto release_object;
422 }
423
424 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
425 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
426 radeon_bo_unreserve(bo);
427 mmap_read_unlock(current->mm);
428 if (r)
429 goto release_object;
430 }
431
432 r = drm_gem_handle_create(filp, gobj, &handle);
433
434 drm_gem_object_put(gobj);
435 if (r)
436 goto handle_lockup;
437
438 args->handle = handle;
439 up_read(&rdev->exclusive_lock);
440 return 0;
441
442release_object:
443 drm_gem_object_put(gobj);
444
445handle_lockup:
446 up_read(&rdev->exclusive_lock);
447 r = radeon_gem_handle_lockup(rdev, r);
448
449 return r;
450}
451
452int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *filp)
454{
455
456
457 struct radeon_device *rdev = dev->dev_private;
458 struct drm_radeon_gem_set_domain *args = data;
459 struct drm_gem_object *gobj;
460 struct radeon_bo *robj;
461 int r;
462
463
464
465 down_read(&rdev->exclusive_lock);
466
467
468 gobj = drm_gem_object_lookup(filp, args->handle);
469 if (gobj == NULL) {
470 up_read(&rdev->exclusive_lock);
471 return -ENOENT;
472 }
473 robj = gem_to_radeon_bo(gobj);
474
475 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
476
477 drm_gem_object_put(gobj);
478 up_read(&rdev->exclusive_lock);
479 r = radeon_gem_handle_lockup(robj->rdev, r);
480 return r;
481}
482
483int radeon_mode_dumb_mmap(struct drm_file *filp,
484 struct drm_device *dev,
485 uint32_t handle, uint64_t *offset_p)
486{
487 struct drm_gem_object *gobj;
488 struct radeon_bo *robj;
489
490 gobj = drm_gem_object_lookup(filp, handle);
491 if (gobj == NULL) {
492 return -ENOENT;
493 }
494 robj = gem_to_radeon_bo(gobj);
495 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
496 drm_gem_object_put(gobj);
497 return -EPERM;
498 }
499 *offset_p = radeon_bo_mmap_offset(robj);
500 drm_gem_object_put(gobj);
501 return 0;
502}
503
504int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *filp)
506{
507 struct drm_radeon_gem_mmap *args = data;
508
509 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
510}
511
512int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *filp)
514{
515 struct drm_radeon_gem_busy *args = data;
516 struct drm_gem_object *gobj;
517 struct radeon_bo *robj;
518 int r;
519 uint32_t cur_placement = 0;
520
521 gobj = drm_gem_object_lookup(filp, args->handle);
522 if (gobj == NULL) {
523 return -ENOENT;
524 }
525 robj = gem_to_radeon_bo(gobj);
526
527 r = dma_resv_test_signaled(robj->tbo.base.resv, true);
528 if (r == 0)
529 r = -EBUSY;
530 else
531 r = 0;
532
533 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
534 args->domain = radeon_mem_type_to_domain(cur_placement);
535 drm_gem_object_put(gobj);
536 return r;
537}
538
539int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
540 struct drm_file *filp)
541{
542 struct radeon_device *rdev = dev->dev_private;
543 struct drm_radeon_gem_wait_idle *args = data;
544 struct drm_gem_object *gobj;
545 struct radeon_bo *robj;
546 int r = 0;
547 uint32_t cur_placement = 0;
548 long ret;
549
550 gobj = drm_gem_object_lookup(filp, args->handle);
551 if (gobj == NULL) {
552 return -ENOENT;
553 }
554 robj = gem_to_radeon_bo(gobj);
555
556 ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
557 if (ret == 0)
558 r = -EBUSY;
559 else if (ret < 0)
560 r = ret;
561
562
563 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
564 if (rdev->asic->mmio_hdp_flush &&
565 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
566 robj->rdev->asic->mmio_hdp_flush(rdev);
567 drm_gem_object_put(gobj);
568 r = radeon_gem_handle_lockup(rdev, r);
569 return r;
570}
571
572int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
573 struct drm_file *filp)
574{
575 struct drm_radeon_gem_set_tiling *args = data;
576 struct drm_gem_object *gobj;
577 struct radeon_bo *robj;
578 int r = 0;
579
580 DRM_DEBUG("%d \n", args->handle);
581 gobj = drm_gem_object_lookup(filp, args->handle);
582 if (gobj == NULL)
583 return -ENOENT;
584 robj = gem_to_radeon_bo(gobj);
585 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
586 drm_gem_object_put(gobj);
587 return r;
588}
589
590int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
591 struct drm_file *filp)
592{
593 struct drm_radeon_gem_get_tiling *args = data;
594 struct drm_gem_object *gobj;
595 struct radeon_bo *rbo;
596 int r = 0;
597
598 DRM_DEBUG("\n");
599 gobj = drm_gem_object_lookup(filp, args->handle);
600 if (gobj == NULL)
601 return -ENOENT;
602 rbo = gem_to_radeon_bo(gobj);
603 r = radeon_bo_reserve(rbo, false);
604 if (unlikely(r != 0))
605 goto out;
606 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
607 radeon_bo_unreserve(rbo);
608out:
609 drm_gem_object_put(gobj);
610 return r;
611}
612
613
614
615
616
617
618
619
620
621
622static void radeon_gem_va_update_vm(struct radeon_device *rdev,
623 struct radeon_bo_va *bo_va)
624{
625 struct ttm_validate_buffer tv, *entry;
626 struct radeon_bo_list *vm_bos;
627 struct ww_acquire_ctx ticket;
628 struct list_head list;
629 unsigned domain;
630 int r;
631
632 INIT_LIST_HEAD(&list);
633
634 tv.bo = &bo_va->bo->tbo;
635 tv.num_shared = 1;
636 list_add(&tv.head, &list);
637
638 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
639 if (!vm_bos)
640 return;
641
642 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
643 if (r)
644 goto error_free;
645
646 list_for_each_entry(entry, &list, head) {
647 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
648
649
650 if (domain == RADEON_GEM_DOMAIN_CPU)
651 goto error_unreserve;
652 }
653
654 mutex_lock(&bo_va->vm->mutex);
655 r = radeon_vm_clear_freed(rdev, bo_va->vm);
656 if (r)
657 goto error_unlock;
658
659 if (bo_va->it.start)
660 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
661
662error_unlock:
663 mutex_unlock(&bo_va->vm->mutex);
664
665error_unreserve:
666 ttm_eu_backoff_reservation(&ticket, &list);
667
668error_free:
669 kvfree(vm_bos);
670
671 if (r && r != -ERESTARTSYS)
672 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
673}
674
675int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *filp)
677{
678 struct drm_radeon_gem_va *args = data;
679 struct drm_gem_object *gobj;
680 struct radeon_device *rdev = dev->dev_private;
681 struct radeon_fpriv *fpriv = filp->driver_priv;
682 struct radeon_bo *rbo;
683 struct radeon_bo_va *bo_va;
684 u32 invalid_flags;
685 int r = 0;
686
687 if (!rdev->vm_manager.enabled) {
688 args->operation = RADEON_VA_RESULT_ERROR;
689 return -ENOTTY;
690 }
691
692
693
694
695
696
697 if (args->vm_id) {
698 args->operation = RADEON_VA_RESULT_ERROR;
699 return -EINVAL;
700 }
701
702 if (args->offset < RADEON_VA_RESERVED_SIZE) {
703 dev_err(dev->dev,
704 "offset 0x%lX is in reserved area 0x%X\n",
705 (unsigned long)args->offset,
706 RADEON_VA_RESERVED_SIZE);
707 args->operation = RADEON_VA_RESULT_ERROR;
708 return -EINVAL;
709 }
710
711
712
713
714
715 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
716 if ((args->flags & invalid_flags)) {
717 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
718 args->flags, invalid_flags);
719 args->operation = RADEON_VA_RESULT_ERROR;
720 return -EINVAL;
721 }
722
723 switch (args->operation) {
724 case RADEON_VA_MAP:
725 case RADEON_VA_UNMAP:
726 break;
727 default:
728 dev_err(dev->dev, "unsupported operation %d\n",
729 args->operation);
730 args->operation = RADEON_VA_RESULT_ERROR;
731 return -EINVAL;
732 }
733
734 gobj = drm_gem_object_lookup(filp, args->handle);
735 if (gobj == NULL) {
736 args->operation = RADEON_VA_RESULT_ERROR;
737 return -ENOENT;
738 }
739 rbo = gem_to_radeon_bo(gobj);
740 r = radeon_bo_reserve(rbo, false);
741 if (r) {
742 args->operation = RADEON_VA_RESULT_ERROR;
743 drm_gem_object_put(gobj);
744 return r;
745 }
746 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
747 if (!bo_va) {
748 args->operation = RADEON_VA_RESULT_ERROR;
749 radeon_bo_unreserve(rbo);
750 drm_gem_object_put(gobj);
751 return -ENOENT;
752 }
753
754 switch (args->operation) {
755 case RADEON_VA_MAP:
756 if (bo_va->it.start) {
757 args->operation = RADEON_VA_RESULT_VA_EXIST;
758 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
759 radeon_bo_unreserve(rbo);
760 goto out;
761 }
762 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
763 break;
764 case RADEON_VA_UNMAP:
765 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
766 break;
767 default:
768 break;
769 }
770 if (!r)
771 radeon_gem_va_update_vm(rdev, bo_va);
772 args->operation = RADEON_VA_RESULT_OK;
773 if (r) {
774 args->operation = RADEON_VA_RESULT_ERROR;
775 }
776out:
777 drm_gem_object_put(gobj);
778 return r;
779}
780
781int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *filp)
783{
784 struct drm_radeon_gem_op *args = data;
785 struct drm_gem_object *gobj;
786 struct radeon_bo *robj;
787 int r;
788
789 gobj = drm_gem_object_lookup(filp, args->handle);
790 if (gobj == NULL) {
791 return -ENOENT;
792 }
793 robj = gem_to_radeon_bo(gobj);
794
795 r = -EPERM;
796 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
797 goto out;
798
799 r = radeon_bo_reserve(robj, false);
800 if (unlikely(r))
801 goto out;
802
803 switch (args->op) {
804 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
805 args->value = robj->initial_domain;
806 break;
807 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
808 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
809 RADEON_GEM_DOMAIN_GTT |
810 RADEON_GEM_DOMAIN_CPU);
811 break;
812 default:
813 r = -EINVAL;
814 }
815
816 radeon_bo_unreserve(robj);
817out:
818 drm_gem_object_put(gobj);
819 return r;
820}
821
822int radeon_mode_dumb_create(struct drm_file *file_priv,
823 struct drm_device *dev,
824 struct drm_mode_create_dumb *args)
825{
826 struct radeon_device *rdev = dev->dev_private;
827 struct drm_gem_object *gobj;
828 uint32_t handle;
829 int r;
830
831 args->pitch = radeon_align_pitch(rdev, args->width,
832 DIV_ROUND_UP(args->bpp, 8), 0);
833 args->size = args->pitch * args->height;
834 args->size = ALIGN(args->size, PAGE_SIZE);
835
836 r = radeon_gem_object_create(rdev, args->size, 0,
837 RADEON_GEM_DOMAIN_VRAM, 0,
838 false, &gobj);
839 if (r)
840 return -ENOMEM;
841
842 r = drm_gem_handle_create(file_priv, gobj, &handle);
843
844 drm_gem_object_put(gobj);
845 if (r) {
846 return r;
847 }
848 args->handle = handle;
849 return 0;
850}
851
852#if defined(CONFIG_DEBUG_FS)
853static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
854{
855 struct radeon_device *rdev = (struct radeon_device *)m->private;
856 struct radeon_bo *rbo;
857 unsigned i = 0;
858
859 mutex_lock(&rdev->gem.mutex);
860 list_for_each_entry(rbo, &rdev->gem.objects, list) {
861 unsigned domain;
862 const char *placement;
863
864 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
865 switch (domain) {
866 case RADEON_GEM_DOMAIN_VRAM:
867 placement = "VRAM";
868 break;
869 case RADEON_GEM_DOMAIN_GTT:
870 placement = " GTT";
871 break;
872 case RADEON_GEM_DOMAIN_CPU:
873 default:
874 placement = " CPU";
875 break;
876 }
877 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
878 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
879 placement, (unsigned long)rbo->pid);
880 i++;
881 }
882 mutex_unlock(&rdev->gem.mutex);
883 return 0;
884}
885
886DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
887#endif
888
889void radeon_gem_debugfs_init(struct radeon_device *rdev)
890{
891#if defined(CONFIG_DEBUG_FS)
892 struct dentry *root = rdev->ddev->primary->debugfs_root;
893
894 debugfs_create_file("radeon_gem_info", 0444, root, rdev,
895 &radeon_debugfs_gem_info_fops);
896
897#endif
898}
899