1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/ktime.h>
29#include <linux/pagemap.h>
30#include <drm/drmP.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#include "amdgpu_display.h"
34#include "amdgpu_xgmi.h"
35
36void amdgpu_gem_object_free(struct drm_gem_object *gobj)
37{
38 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
39
40 if (robj) {
41 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj);
43 }
44}
45
46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 int alignment, u32 initial_domain,
48 u64 flags, enum ttm_bo_type type,
49 struct reservation_object *resv,
50 struct drm_gem_object **obj)
51{
52 struct amdgpu_bo *bo;
53 struct amdgpu_bo_param bp;
54 int r;
55
56 memset(&bp, 0, sizeof(bp));
57 *obj = NULL;
58
59 bp.size = size;
60 bp.byte_align = alignment;
61 bp.type = type;
62 bp.resv = resv;
63 bp.preferred_domain = initial_domain;
64retry:
65 bp.flags = flags;
66 bp.domain = initial_domain;
67 r = amdgpu_bo_create(adev, &bp, &bo);
68 if (r) {
69 if (r != -ERESTARTSYS) {
70 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
71 flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
72 goto retry;
73 }
74
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
76 initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
77 goto retry;
78 }
79 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
80 size, initial_domain, alignment, r);
81 }
82 return r;
83 }
84 *obj = &bo->gem_base;
85
86 return 0;
87}
88
89void amdgpu_gem_force_release(struct amdgpu_device *adev)
90{
91 struct drm_device *ddev = adev->ddev;
92 struct drm_file *file;
93
94 mutex_lock(&ddev->filelist_mutex);
95
96 list_for_each_entry(file, &ddev->filelist, lhead) {
97 struct drm_gem_object *gobj;
98 int handle;
99
100 WARN_ONCE(1, "Still active user space clients!\n");
101 spin_lock(&file->table_lock);
102 idr_for_each_entry(&file->object_idr, gobj, handle) {
103 WARN_ONCE(1, "And also active allocations!\n");
104 drm_gem_object_put_unlocked(gobj);
105 }
106 idr_destroy(&file->object_idr);
107 spin_unlock(&file->table_lock);
108 }
109
110 mutex_unlock(&ddev->filelist_mutex);
111}
112
113
114
115
116
117int amdgpu_gem_object_open(struct drm_gem_object *obj,
118 struct drm_file *file_priv)
119{
120 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
121 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
122 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
123 struct amdgpu_vm *vm = &fpriv->vm;
124 struct amdgpu_bo_va *bo_va;
125 struct mm_struct *mm;
126 int r;
127
128 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
129 if (mm && mm != current->mm)
130 return -EPERM;
131
132 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
133 abo->tbo.resv != vm->root.base.bo->tbo.resv)
134 return -EPERM;
135
136 r = amdgpu_bo_reserve(abo, false);
137 if (r)
138 return r;
139
140 bo_va = amdgpu_vm_bo_find(vm, abo);
141 if (!bo_va) {
142 bo_va = amdgpu_vm_bo_add(adev, vm, abo);
143 } else {
144 ++bo_va->ref_count;
145 }
146 amdgpu_bo_unreserve(abo);
147 return 0;
148}
149
150void amdgpu_gem_object_close(struct drm_gem_object *obj,
151 struct drm_file *file_priv)
152{
153 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
154 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
155 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
156 struct amdgpu_vm *vm = &fpriv->vm;
157
158 struct amdgpu_bo_list_entry vm_pd;
159 struct list_head list, duplicates;
160 struct ttm_validate_buffer tv;
161 struct ww_acquire_ctx ticket;
162 struct amdgpu_bo_va *bo_va;
163 int r;
164
165 INIT_LIST_HEAD(&list);
166 INIT_LIST_HEAD(&duplicates);
167
168 tv.bo = &bo->tbo;
169 tv.num_shared = 1;
170 list_add(&tv.head, &list);
171
172 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
173
174 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
175 if (r) {
176 dev_err(adev->dev, "leaking bo va because "
177 "we fail to reserve bo (%d)\n", r);
178 return;
179 }
180 bo_va = amdgpu_vm_bo_find(vm, bo);
181 if (bo_va && --bo_va->ref_count == 0) {
182 amdgpu_vm_bo_rmv(adev, bo_va);
183
184 if (amdgpu_vm_ready(vm)) {
185 struct dma_fence *fence = NULL;
186
187 r = amdgpu_vm_clear_freed(adev, vm, &fence);
188 if (unlikely(r)) {
189 dev_err(adev->dev, "failed to clear page "
190 "tables on GEM object close (%d)\n", r);
191 }
192
193 if (fence) {
194 amdgpu_bo_fence(bo, fence, true);
195 dma_fence_put(fence);
196 }
197 }
198 }
199 ttm_eu_backoff_reservation(&ticket, &list);
200}
201
202
203
204
205int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
206 struct drm_file *filp)
207{
208 struct amdgpu_device *adev = dev->dev_private;
209 struct amdgpu_fpriv *fpriv = filp->driver_priv;
210 struct amdgpu_vm *vm = &fpriv->vm;
211 union drm_amdgpu_gem_create *args = data;
212 uint64_t flags = args->in.domain_flags;
213 uint64_t size = args->in.bo_size;
214 struct reservation_object *resv = NULL;
215 struct drm_gem_object *gobj;
216 uint32_t handle;
217 int r;
218
219
220 if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
221 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
222 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
223 AMDGPU_GEM_CREATE_VRAM_CLEARED |
224 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
225 AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
226
227 return -EINVAL;
228
229
230 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
231 return -EINVAL;
232
233
234 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
235 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
236 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
237
238
239
240 DRM_ERROR("GDS bo cannot be per-vm-bo\n");
241 return -EINVAL;
242 }
243 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
244 }
245
246 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
247 r = amdgpu_bo_reserve(vm->root.base.bo, false);
248 if (r)
249 return r;
250
251 resv = vm->root.base.bo->tbo.resv;
252 }
253
254 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
255 (u32)(0xffffffff & args->in.domains),
256 flags, ttm_bo_type_device, resv, &gobj);
257 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
258 if (!r) {
259 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
260
261 abo->parent = amdgpu_bo_ref(vm->root.base.bo);
262 }
263 amdgpu_bo_unreserve(vm->root.base.bo);
264 }
265 if (r)
266 return r;
267
268 r = drm_gem_handle_create(filp, gobj, &handle);
269
270 drm_gem_object_put_unlocked(gobj);
271 if (r)
272 return r;
273
274 memset(args, 0, sizeof(*args));
275 args->out.handle = handle;
276 return 0;
277}
278
279int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
280 struct drm_file *filp)
281{
282 struct ttm_operation_ctx ctx = { true, false };
283 struct amdgpu_device *adev = dev->dev_private;
284 struct drm_amdgpu_gem_userptr *args = data;
285 struct drm_gem_object *gobj;
286 struct amdgpu_bo *bo;
287 uint32_t handle;
288 int r;
289
290 if (offset_in_page(args->addr | args->size))
291 return -EINVAL;
292
293
294 if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
295 AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
296 AMDGPU_GEM_USERPTR_REGISTER))
297 return -EINVAL;
298
299 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
300 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
301
302
303 return -EACCES;
304 }
305
306
307 r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
308 0, ttm_bo_type_device, NULL, &gobj);
309 if (r)
310 return r;
311
312 bo = gem_to_amdgpu_bo(gobj);
313 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
314 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
315 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
316 if (r)
317 goto release_object;
318
319 if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
320 r = amdgpu_mn_register(bo, args->addr);
321 if (r)
322 goto release_object;
323 }
324
325 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
326 r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
327 bo->tbo.ttm->pages);
328 if (r)
329 goto release_object;
330
331 r = amdgpu_bo_reserve(bo, true);
332 if (r)
333 goto free_pages;
334
335 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
336 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
337 amdgpu_bo_unreserve(bo);
338 if (r)
339 goto free_pages;
340 }
341
342 r = drm_gem_handle_create(filp, gobj, &handle);
343
344 drm_gem_object_put_unlocked(gobj);
345 if (r)
346 return r;
347
348 args->handle = handle;
349 return 0;
350
351free_pages:
352 release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
353
354release_object:
355 drm_gem_object_put_unlocked(gobj);
356
357 return r;
358}
359
360int amdgpu_mode_dumb_mmap(struct drm_file *filp,
361 struct drm_device *dev,
362 uint32_t handle, uint64_t *offset_p)
363{
364 struct drm_gem_object *gobj;
365 struct amdgpu_bo *robj;
366
367 gobj = drm_gem_object_lookup(filp, handle);
368 if (gobj == NULL) {
369 return -ENOENT;
370 }
371 robj = gem_to_amdgpu_bo(gobj);
372 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
373 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
374 drm_gem_object_put_unlocked(gobj);
375 return -EPERM;
376 }
377 *offset_p = amdgpu_bo_mmap_offset(robj);
378 drm_gem_object_put_unlocked(gobj);
379 return 0;
380}
381
382int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
383 struct drm_file *filp)
384{
385 union drm_amdgpu_gem_mmap *args = data;
386 uint32_t handle = args->in.handle;
387 memset(args, 0, sizeof(*args));
388 return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
389}
390
391
392
393
394
395
396
397
398unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
399{
400 unsigned long timeout_jiffies;
401 ktime_t timeout;
402
403
404 if (((int64_t)timeout_ns) < 0)
405 return MAX_SCHEDULE_TIMEOUT;
406
407 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
408 if (ktime_to_ns(timeout) < 0)
409 return 0;
410
411 timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
412
413 if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
414 return MAX_SCHEDULE_TIMEOUT - 1;
415
416 return timeout_jiffies;
417}
418
419int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
420 struct drm_file *filp)
421{
422 union drm_amdgpu_gem_wait_idle *args = data;
423 struct drm_gem_object *gobj;
424 struct amdgpu_bo *robj;
425 uint32_t handle = args->in.handle;
426 unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
427 int r = 0;
428 long ret;
429
430 gobj = drm_gem_object_lookup(filp, handle);
431 if (gobj == NULL) {
432 return -ENOENT;
433 }
434 robj = gem_to_amdgpu_bo(gobj);
435 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
436 timeout);
437
438
439
440
441
442 if (ret >= 0) {
443 memset(args, 0, sizeof(*args));
444 args->out.status = (ret == 0);
445 } else
446 r = ret;
447
448 drm_gem_object_put_unlocked(gobj);
449 return r;
450}
451
452int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *filp)
454{
455 struct drm_amdgpu_gem_metadata *args = data;
456 struct drm_gem_object *gobj;
457 struct amdgpu_bo *robj;
458 int r = -1;
459
460 DRM_DEBUG("%d \n", args->handle);
461 gobj = drm_gem_object_lookup(filp, args->handle);
462 if (gobj == NULL)
463 return -ENOENT;
464 robj = gem_to_amdgpu_bo(gobj);
465
466 r = amdgpu_bo_reserve(robj, false);
467 if (unlikely(r != 0))
468 goto out;
469
470 if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
471 amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
472 r = amdgpu_bo_get_metadata(robj, args->data.data,
473 sizeof(args->data.data),
474 &args->data.data_size_bytes,
475 &args->data.flags);
476 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
477 if (args->data.data_size_bytes > sizeof(args->data.data)) {
478 r = -EINVAL;
479 goto unreserve;
480 }
481 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
482 if (!r)
483 r = amdgpu_bo_set_metadata(robj, args->data.data,
484 args->data.data_size_bytes,
485 args->data.flags);
486 }
487
488unreserve:
489 amdgpu_bo_unreserve(robj);
490out:
491 drm_gem_object_put_unlocked(gobj);
492 return r;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
507 struct amdgpu_vm *vm,
508 struct amdgpu_bo_va *bo_va,
509 uint32_t operation)
510{
511 int r;
512
513 if (!amdgpu_vm_ready(vm))
514 return;
515
516 r = amdgpu_vm_clear_freed(adev, vm, NULL);
517 if (r)
518 goto error;
519
520 if (operation == AMDGPU_VA_OP_MAP ||
521 operation == AMDGPU_VA_OP_REPLACE) {
522 r = amdgpu_vm_bo_update(adev, bo_va, false);
523 if (r)
524 goto error;
525 }
526
527 r = amdgpu_vm_update_directories(adev, vm);
528
529error:
530 if (r && r != -ERESTARTSYS)
531 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
532}
533
534int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
535 struct drm_file *filp)
536{
537 const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
538 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
539 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
540 const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
541 AMDGPU_VM_PAGE_PRT;
542
543 struct drm_amdgpu_gem_va *args = data;
544 struct drm_gem_object *gobj;
545 struct amdgpu_device *adev = dev->dev_private;
546 struct amdgpu_fpriv *fpriv = filp->driver_priv;
547 struct amdgpu_bo *abo;
548 struct amdgpu_bo_va *bo_va;
549 struct amdgpu_bo_list_entry vm_pd;
550 struct ttm_validate_buffer tv;
551 struct ww_acquire_ctx ticket;
552 struct list_head list, duplicates;
553 uint64_t va_flags;
554 int r = 0;
555
556 if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
557 dev_dbg(&dev->pdev->dev,
558 "va_address 0x%LX is in reserved area 0x%LX\n",
559 args->va_address, AMDGPU_VA_RESERVED_SIZE);
560 return -EINVAL;
561 }
562
563 if (args->va_address >= AMDGPU_GMC_HOLE_START &&
564 args->va_address < AMDGPU_GMC_HOLE_END) {
565 dev_dbg(&dev->pdev->dev,
566 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
567 args->va_address, AMDGPU_GMC_HOLE_START,
568 AMDGPU_GMC_HOLE_END);
569 return -EINVAL;
570 }
571
572 args->va_address &= AMDGPU_GMC_HOLE_MASK;
573
574 if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
575 dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
576 args->flags);
577 return -EINVAL;
578 }
579
580 switch (args->operation) {
581 case AMDGPU_VA_OP_MAP:
582 case AMDGPU_VA_OP_UNMAP:
583 case AMDGPU_VA_OP_CLEAR:
584 case AMDGPU_VA_OP_REPLACE:
585 break;
586 default:
587 dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
588 args->operation);
589 return -EINVAL;
590 }
591
592 INIT_LIST_HEAD(&list);
593 INIT_LIST_HEAD(&duplicates);
594 if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
595 !(args->flags & AMDGPU_VM_PAGE_PRT)) {
596 gobj = drm_gem_object_lookup(filp, args->handle);
597 if (gobj == NULL)
598 return -ENOENT;
599 abo = gem_to_amdgpu_bo(gobj);
600 tv.bo = &abo->tbo;
601 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
602 tv.num_shared = 1;
603 else
604 tv.num_shared = 0;
605 list_add(&tv.head, &list);
606 } else {
607 gobj = NULL;
608 abo = NULL;
609 }
610
611 amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
612
613 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
614 if (r)
615 goto error_unref;
616
617 if (abo) {
618 bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
619 if (!bo_va) {
620 r = -ENOENT;
621 goto error_backoff;
622 }
623 } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
624 bo_va = fpriv->prt_va;
625 } else {
626 bo_va = NULL;
627 }
628
629 switch (args->operation) {
630 case AMDGPU_VA_OP_MAP:
631 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
632 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
633 args->offset_in_bo, args->map_size,
634 va_flags);
635 break;
636 case AMDGPU_VA_OP_UNMAP:
637 r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
638 break;
639
640 case AMDGPU_VA_OP_CLEAR:
641 r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
642 args->va_address,
643 args->map_size);
644 break;
645 case AMDGPU_VA_OP_REPLACE:
646 va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
647 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
648 args->offset_in_bo, args->map_size,
649 va_flags);
650 break;
651 default:
652 break;
653 }
654 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
655 amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
656 args->operation);
657
658error_backoff:
659 ttm_eu_backoff_reservation(&ticket, &list);
660
661error_unref:
662 drm_gem_object_put_unlocked(gobj);
663 return r;
664}
665
666int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *filp)
668{
669 struct amdgpu_device *adev = dev->dev_private;
670 struct drm_amdgpu_gem_op *args = data;
671 struct drm_gem_object *gobj;
672 struct amdgpu_vm_bo_base *base;
673 struct amdgpu_bo *robj;
674 int r;
675
676 gobj = drm_gem_object_lookup(filp, args->handle);
677 if (gobj == NULL) {
678 return -ENOENT;
679 }
680 robj = gem_to_amdgpu_bo(gobj);
681
682 r = amdgpu_bo_reserve(robj, false);
683 if (unlikely(r))
684 goto out;
685
686 switch (args->op) {
687 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
688 struct drm_amdgpu_gem_create_in info;
689 void __user *out = u64_to_user_ptr(args->value);
690
691 info.bo_size = robj->gem_base.size;
692 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
693 info.domains = robj->preferred_domains;
694 info.domain_flags = robj->flags;
695 amdgpu_bo_unreserve(robj);
696 if (copy_to_user(out, &info, sizeof(info)))
697 r = -EFAULT;
698 break;
699 }
700 case AMDGPU_GEM_OP_SET_PLACEMENT:
701 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
702 r = -EINVAL;
703 amdgpu_bo_unreserve(robj);
704 break;
705 }
706 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
707 r = -EPERM;
708 amdgpu_bo_unreserve(robj);
709 break;
710 }
711 for (base = robj->vm_bo; base; base = base->next)
712 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
713 amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
714 r = -EINVAL;
715 amdgpu_bo_unreserve(robj);
716 goto out;
717 }
718
719
720 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
721 AMDGPU_GEM_DOMAIN_GTT |
722 AMDGPU_GEM_DOMAIN_CPU);
723 robj->allowed_domains = robj->preferred_domains;
724 if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
725 robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
726
727 if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
728 amdgpu_vm_bo_invalidate(adev, robj, true);
729
730 amdgpu_bo_unreserve(robj);
731 break;
732 default:
733 amdgpu_bo_unreserve(robj);
734 r = -EINVAL;
735 }
736
737out:
738 drm_gem_object_put_unlocked(gobj);
739 return r;
740}
741
742int amdgpu_mode_dumb_create(struct drm_file *file_priv,
743 struct drm_device *dev,
744 struct drm_mode_create_dumb *args)
745{
746 struct amdgpu_device *adev = dev->dev_private;
747 struct drm_gem_object *gobj;
748 uint32_t handle;
749 u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
750 u32 domain;
751 int r;
752
753
754
755
756
757
758 if (adev->mman.buffer_funcs_enabled)
759 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
760
761 args->pitch = amdgpu_align_pitch(adev, args->width,
762 DIV_ROUND_UP(args->bpp, 8), 0);
763 args->size = (u64)args->pitch * args->height;
764 args->size = ALIGN(args->size, PAGE_SIZE);
765 domain = amdgpu_bo_get_preferred_pin_domain(adev,
766 amdgpu_display_supported_domains(adev));
767 r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
768 ttm_bo_type_device, NULL, &gobj);
769 if (r)
770 return -ENOMEM;
771
772 r = drm_gem_handle_create(file_priv, gobj, &handle);
773
774 drm_gem_object_put_unlocked(gobj);
775 if (r) {
776 return r;
777 }
778 args->handle = handle;
779 return 0;
780}
781
782#if defined(CONFIG_DEBUG_FS)
783
784#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
785 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
786 seq_printf((m), " " #flag); \
787 }
788
789static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
790{
791 struct drm_gem_object *gobj = ptr;
792 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
793 struct seq_file *m = data;
794
795 struct dma_buf_attachment *attachment;
796 struct dma_buf *dma_buf;
797 unsigned domain;
798 const char *placement;
799 unsigned pin_count;
800
801 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
802 switch (domain) {
803 case AMDGPU_GEM_DOMAIN_VRAM:
804 placement = "VRAM";
805 break;
806 case AMDGPU_GEM_DOMAIN_GTT:
807 placement = " GTT";
808 break;
809 case AMDGPU_GEM_DOMAIN_CPU:
810 default:
811 placement = " CPU";
812 break;
813 }
814 seq_printf(m, "\t0x%08x: %12ld byte %s",
815 id, amdgpu_bo_size(bo), placement);
816
817 pin_count = READ_ONCE(bo->pin_count);
818 if (pin_count)
819 seq_printf(m, " pin count %d", pin_count);
820
821 dma_buf = READ_ONCE(bo->gem_base.dma_buf);
822 attachment = READ_ONCE(bo->gem_base.import_attach);
823
824 if (attachment)
825 seq_printf(m, " imported from %p", dma_buf);
826 else if (dma_buf)
827 seq_printf(m, " exported as %p", dma_buf);
828
829 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
830 amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
831 amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
832 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
833 amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
834 amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
835 amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
836 amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
837
838 seq_printf(m, "\n");
839
840 return 0;
841}
842
843static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
844{
845 struct drm_info_node *node = (struct drm_info_node *)m->private;
846 struct drm_device *dev = node->minor->dev;
847 struct drm_file *file;
848 int r;
849
850 r = mutex_lock_interruptible(&dev->filelist_mutex);
851 if (r)
852 return r;
853
854 list_for_each_entry(file, &dev->filelist, lhead) {
855 struct task_struct *task;
856
857
858
859
860
861
862
863 rcu_read_lock();
864 task = pid_task(file->pid, PIDTYPE_PID);
865 seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
866 task ? task->comm : "<unknown>");
867 rcu_read_unlock();
868
869 spin_lock(&file->table_lock);
870 idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
871 spin_unlock(&file->table_lock);
872 }
873
874 mutex_unlock(&dev->filelist_mutex);
875 return 0;
876}
877
878static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
879 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
880};
881#endif
882
883int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
884{
885#if defined(CONFIG_DEBUG_FS)
886 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
887#endif
888 return 0;
889}
890