1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/dma-mapping.h>
34#include <linux/iommu.h>
35#include <linux/hmm.h>
36#include <linux/pagemap.h>
37#include <linux/sched/task.h>
38#include <linux/seq_file.h>
39#include <linux/slab.h>
40#include <linux/swap.h>
41#include <linux/swiotlb.h>
42
43#include <drm/ttm/ttm_bo_api.h>
44#include <drm/ttm/ttm_bo_driver.h>
45#include <drm/ttm/ttm_placement.h>
46#include <drm/ttm/ttm_module.h>
47#include <drm/ttm/ttm_page_alloc.h>
48
49#include <drm/drm_debugfs.h>
50#include <drm/amdgpu_drm.h>
51
52#include "amdgpu.h"
53#include "amdgpu_object.h"
54#include "amdgpu_trace.h"
55#include "amdgpu_amdkfd.h"
56#include "amdgpu_sdma.h"
57#include "bif/bif_4_1_d.h"
58
59static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
60 struct ttm_mem_reg *mem, unsigned num_pages,
61 uint64_t offset, unsigned window,
62 struct amdgpu_ring *ring,
63 uint64_t *addr);
64
65static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
66static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
67
68static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
69{
70 return 0;
71}
72
73
74
75
76
77
78
79
80
81
82
83
84static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
85 struct ttm_mem_type_manager *man)
86{
87 struct amdgpu_device *adev;
88
89 adev = amdgpu_ttm_adev(bdev);
90
91 switch (type) {
92 case TTM_PL_SYSTEM:
93
94 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
95 man->available_caching = TTM_PL_MASK_CACHING;
96 man->default_caching = TTM_PL_FLAG_CACHED;
97 break;
98 case TTM_PL_TT:
99
100 man->func = &amdgpu_gtt_mgr_func;
101 man->gpu_offset = adev->gmc.gart_start;
102 man->available_caching = TTM_PL_MASK_CACHING;
103 man->default_caching = TTM_PL_FLAG_CACHED;
104 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
105 break;
106 case TTM_PL_VRAM:
107
108 man->func = &amdgpu_vram_mgr_func;
109 man->gpu_offset = adev->gmc.vram_start;
110 man->flags = TTM_MEMTYPE_FLAG_FIXED |
111 TTM_MEMTYPE_FLAG_MAPPABLE;
112 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
113 man->default_caching = TTM_PL_FLAG_WC;
114 break;
115 case AMDGPU_PL_GDS:
116 case AMDGPU_PL_GWS:
117 case AMDGPU_PL_OA:
118
119 man->func = &ttm_bo_manager_func;
120 man->gpu_offset = 0;
121 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
122 man->available_caching = TTM_PL_FLAG_UNCACHED;
123 man->default_caching = TTM_PL_FLAG_UNCACHED;
124 break;
125 default:
126 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
127 return -EINVAL;
128 }
129 return 0;
130}
131
132
133
134
135
136
137
138
139
140static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
141 struct ttm_placement *placement)
142{
143 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
144 struct amdgpu_bo *abo;
145 static const struct ttm_place placements = {
146 .fpfn = 0,
147 .lpfn = 0,
148 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
149 };
150
151
152 if (bo->type == ttm_bo_type_sg) {
153 placement->num_placement = 0;
154 placement->num_busy_placement = 0;
155 return;
156 }
157
158
159 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
160 placement->placement = &placements;
161 placement->busy_placement = &placements;
162 placement->num_placement = 1;
163 placement->num_busy_placement = 1;
164 return;
165 }
166
167 abo = ttm_to_amdgpu_bo(bo);
168 switch (bo->mem.mem_type) {
169 case AMDGPU_PL_GDS:
170 case AMDGPU_PL_GWS:
171 case AMDGPU_PL_OA:
172 placement->num_placement = 0;
173 placement->num_busy_placement = 0;
174 return;
175
176 case TTM_PL_VRAM:
177 if (!adev->mman.buffer_funcs_enabled) {
178
179 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
180 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
181 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
182 amdgpu_bo_in_cpu_visible_vram(abo)) {
183
184
185
186
187
188
189 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
190 AMDGPU_GEM_DOMAIN_GTT);
191 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
192 abo->placements[0].lpfn = 0;
193 abo->placement.busy_placement = &abo->placements[1];
194 abo->placement.num_busy_placement = 1;
195 } else {
196
197 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
198 }
199 break;
200 case TTM_PL_TT:
201 default:
202 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
203 break;
204 }
205 *placement = abo->placement;
206}
207
208
209
210
211
212
213
214
215
216
217static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
218{
219 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
220
221
222
223
224
225 if (abo->kfd_bo)
226 return 0;
227
228 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
229 return -EPERM;
230 return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
231 filp->private_data);
232}
233
234
235
236
237
238
239
240
241
242static void amdgpu_move_null(struct ttm_buffer_object *bo,
243 struct ttm_mem_reg *new_mem)
244{
245 struct ttm_mem_reg *old_mem = &bo->mem;
246
247 BUG_ON(old_mem->mm_node != NULL);
248 *old_mem = *new_mem;
249 new_mem->mm_node = NULL;
250}
251
252
253
254
255
256
257
258
259
260static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
261 struct drm_mm_node *mm_node,
262 struct ttm_mem_reg *mem)
263{
264 uint64_t addr = 0;
265
266 if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
267 addr = mm_node->start << PAGE_SHIFT;
268 addr += bo->bdev->man[mem->mem_type].gpu_offset;
269 }
270 return addr;
271}
272
273
274
275
276
277
278
279
280
281static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
282 unsigned long *offset)
283{
284 struct drm_mm_node *mm_node = mem->mm_node;
285
286 while (*offset >= (mm_node->size << PAGE_SHIFT)) {
287 *offset -= (mm_node->size << PAGE_SHIFT);
288 ++mm_node;
289 }
290 return mm_node;
291}
292
293
294
295
296
297
298
299
300
301
302int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
303 struct amdgpu_copy_mem *src,
304 struct amdgpu_copy_mem *dst,
305 uint64_t size,
306 struct dma_resv *resv,
307 struct dma_fence **f)
308{
309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
310 struct drm_mm_node *src_mm, *dst_mm;
311 uint64_t src_node_start, dst_node_start, src_node_size,
312 dst_node_size, src_page_offset, dst_page_offset;
313 struct dma_fence *fence = NULL;
314 int r = 0;
315 const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
316 AMDGPU_GPU_PAGE_SIZE);
317
318 if (!adev->mman.buffer_funcs_enabled) {
319 DRM_ERROR("Trying to move memory with ring turned off.\n");
320 return -EINVAL;
321 }
322
323 src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
324 src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
325 src->offset;
326 src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
327 src_page_offset = src_node_start & (PAGE_SIZE - 1);
328
329 dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
330 dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
331 dst->offset;
332 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
333 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
334
335 mutex_lock(&adev->mman.gtt_window_lock);
336
337 while (size) {
338 unsigned long cur_size;
339 uint64_t from = src_node_start, to = dst_node_start;
340 struct dma_fence *next;
341
342
343
344
345 cur_size = min3(min(src_node_size, dst_node_size), size,
346 GTT_MAX_BYTES);
347 if (cur_size + src_page_offset > GTT_MAX_BYTES ||
348 cur_size + dst_page_offset > GTT_MAX_BYTES)
349 cur_size -= max(src_page_offset, dst_page_offset);
350
351
352
353
354 if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
355 r = amdgpu_map_buffer(src->bo, src->mem,
356 PFN_UP(cur_size + src_page_offset),
357 src_node_start, 0, ring,
358 &from);
359 if (r)
360 goto error;
361
362
363
364 from += src_page_offset;
365 }
366
367 if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
368 r = amdgpu_map_buffer(dst->bo, dst->mem,
369 PFN_UP(cur_size + dst_page_offset),
370 dst_node_start, 1, ring,
371 &to);
372 if (r)
373 goto error;
374 to += dst_page_offset;
375 }
376
377 r = amdgpu_copy_buffer(ring, from, to, cur_size,
378 resv, &next, false, true);
379 if (r)
380 goto error;
381
382 dma_fence_put(fence);
383 fence = next;
384
385 size -= cur_size;
386 if (!size)
387 break;
388
389 src_node_size -= cur_size;
390 if (!src_node_size) {
391 src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
392 src->mem);
393 src_node_size = (src_mm->size << PAGE_SHIFT);
394 src_page_offset = 0;
395 } else {
396 src_node_start += cur_size;
397 src_page_offset = src_node_start & (PAGE_SIZE - 1);
398 }
399 dst_node_size -= cur_size;
400 if (!dst_node_size) {
401 dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
402 dst->mem);
403 dst_node_size = (dst_mm->size << PAGE_SHIFT);
404 dst_page_offset = 0;
405 } else {
406 dst_node_start += cur_size;
407 dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
408 }
409 }
410error:
411 mutex_unlock(&adev->mman.gtt_window_lock);
412 if (f)
413 *f = dma_fence_get(fence);
414 dma_fence_put(fence);
415 return r;
416}
417
418
419
420
421
422
423
424static int amdgpu_move_blit(struct ttm_buffer_object *bo,
425 bool evict, bool no_wait_gpu,
426 struct ttm_mem_reg *new_mem,
427 struct ttm_mem_reg *old_mem)
428{
429 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
430 struct amdgpu_copy_mem src, dst;
431 struct dma_fence *fence = NULL;
432 int r;
433
434 src.bo = bo;
435 dst.bo = bo;
436 src.mem = old_mem;
437 dst.mem = new_mem;
438 src.offset = 0;
439 dst.offset = 0;
440
441 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
442 new_mem->num_pages << PAGE_SHIFT,
443 bo->base.resv, &fence);
444 if (r)
445 goto error;
446
447
448 if (old_mem->mem_type == TTM_PL_VRAM &&
449 (ttm_to_amdgpu_bo(bo)->flags &
450 AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
451 struct dma_fence *wipe_fence = NULL;
452
453 r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
454 NULL, &wipe_fence);
455 if (r) {
456 goto error;
457 } else if (wipe_fence) {
458 dma_fence_put(fence);
459 fence = wipe_fence;
460 }
461 }
462
463
464 if (bo->type == ttm_bo_type_kernel)
465 r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
466 else
467 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
468 dma_fence_put(fence);
469 return r;
470
471error:
472 if (fence)
473 dma_fence_wait(fence, false);
474 dma_fence_put(fence);
475 return r;
476}
477
478
479
480
481
482
483static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
484 struct ttm_operation_ctx *ctx,
485 struct ttm_mem_reg *new_mem)
486{
487 struct amdgpu_device *adev;
488 struct ttm_mem_reg *old_mem = &bo->mem;
489 struct ttm_mem_reg tmp_mem;
490 struct ttm_place placements;
491 struct ttm_placement placement;
492 int r;
493
494 adev = amdgpu_ttm_adev(bo->bdev);
495
496
497 tmp_mem = *new_mem;
498 tmp_mem.mm_node = NULL;
499 placement.num_placement = 1;
500 placement.placement = &placements;
501 placement.num_busy_placement = 1;
502 placement.busy_placement = &placements;
503 placements.fpfn = 0;
504 placements.lpfn = 0;
505 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
506 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
507 if (unlikely(r)) {
508 pr_err("Failed to find GTT space for blit from VRAM\n");
509 return r;
510 }
511
512
513 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
514 if (unlikely(r)) {
515 goto out_cleanup;
516 }
517
518
519 r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
520 if (unlikely(r)) {
521 goto out_cleanup;
522 }
523
524
525 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
526 if (unlikely(r)) {
527 goto out_cleanup;
528 }
529
530
531 r = ttm_bo_move_ttm(bo, ctx, new_mem);
532out_cleanup:
533 ttm_bo_mem_put(bo, &tmp_mem);
534 return r;
535}
536
537
538
539
540
541
542static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
543 struct ttm_operation_ctx *ctx,
544 struct ttm_mem_reg *new_mem)
545{
546 struct amdgpu_device *adev;
547 struct ttm_mem_reg *old_mem = &bo->mem;
548 struct ttm_mem_reg tmp_mem;
549 struct ttm_placement placement;
550 struct ttm_place placements;
551 int r;
552
553 adev = amdgpu_ttm_adev(bo->bdev);
554
555
556 tmp_mem = *new_mem;
557 tmp_mem.mm_node = NULL;
558 placement.num_placement = 1;
559 placement.placement = &placements;
560 placement.num_busy_placement = 1;
561 placement.busy_placement = &placements;
562 placements.fpfn = 0;
563 placements.lpfn = 0;
564 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
565 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
566 if (unlikely(r)) {
567 pr_err("Failed to find GTT space for blit to VRAM\n");
568 return r;
569 }
570
571
572 r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
573 if (unlikely(r)) {
574 goto out_cleanup;
575 }
576
577
578 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
579 if (unlikely(r)) {
580 goto out_cleanup;
581 }
582out_cleanup:
583 ttm_bo_mem_put(bo, &tmp_mem);
584 return r;
585}
586
587
588
589
590
591
592static bool amdgpu_mem_visible(struct amdgpu_device *adev,
593 struct ttm_mem_reg *mem)
594{
595 struct drm_mm_node *nodes = mem->mm_node;
596
597 if (mem->mem_type == TTM_PL_SYSTEM ||
598 mem->mem_type == TTM_PL_TT)
599 return true;
600 if (mem->mem_type != TTM_PL_VRAM)
601 return false;
602
603
604 if (nodes->size != mem->num_pages)
605 return false;
606
607 return ((nodes->start + nodes->size) << PAGE_SHIFT)
608 <= adev->gmc.visible_vram_size;
609}
610
611
612
613
614
615
616static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
617 struct ttm_operation_ctx *ctx,
618 struct ttm_mem_reg *new_mem)
619{
620 struct amdgpu_device *adev;
621 struct amdgpu_bo *abo;
622 struct ttm_mem_reg *old_mem = &bo->mem;
623 int r;
624
625
626 abo = ttm_to_amdgpu_bo(bo);
627 if (WARN_ON_ONCE(abo->pin_count > 0))
628 return -EINVAL;
629
630 adev = amdgpu_ttm_adev(bo->bdev);
631
632 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
633 amdgpu_move_null(bo, new_mem);
634 return 0;
635 }
636 if ((old_mem->mem_type == TTM_PL_TT &&
637 new_mem->mem_type == TTM_PL_SYSTEM) ||
638 (old_mem->mem_type == TTM_PL_SYSTEM &&
639 new_mem->mem_type == TTM_PL_TT)) {
640
641 amdgpu_move_null(bo, new_mem);
642 return 0;
643 }
644 if (old_mem->mem_type == AMDGPU_PL_GDS ||
645 old_mem->mem_type == AMDGPU_PL_GWS ||
646 old_mem->mem_type == AMDGPU_PL_OA ||
647 new_mem->mem_type == AMDGPU_PL_GDS ||
648 new_mem->mem_type == AMDGPU_PL_GWS ||
649 new_mem->mem_type == AMDGPU_PL_OA) {
650
651 amdgpu_move_null(bo, new_mem);
652 return 0;
653 }
654
655 if (!adev->mman.buffer_funcs_enabled) {
656 r = -ENODEV;
657 goto memcpy;
658 }
659
660 if (old_mem->mem_type == TTM_PL_VRAM &&
661 new_mem->mem_type == TTM_PL_SYSTEM) {
662 r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
663 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
664 new_mem->mem_type == TTM_PL_VRAM) {
665 r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
666 } else {
667 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
668 new_mem, old_mem);
669 }
670
671 if (r) {
672memcpy:
673
674 if (!amdgpu_mem_visible(adev, old_mem) ||
675 !amdgpu_mem_visible(adev, new_mem)) {
676 pr_err("Move buffer fallback to memcpy unavailable\n");
677 return r;
678 }
679
680 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
681 if (r)
682 return r;
683 }
684
685 if (bo->type == ttm_bo_type_device &&
686 new_mem->mem_type == TTM_PL_VRAM &&
687 old_mem->mem_type != TTM_PL_VRAM) {
688
689
690
691 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
692 }
693
694
695 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
696 return 0;
697}
698
699
700
701
702
703
704static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
705{
706 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
707 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
708 struct drm_mm_node *mm_node = mem->mm_node;
709
710 mem->bus.addr = NULL;
711 mem->bus.offset = 0;
712 mem->bus.size = mem->num_pages << PAGE_SHIFT;
713 mem->bus.base = 0;
714 mem->bus.is_iomem = false;
715 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
716 return -EINVAL;
717 switch (mem->mem_type) {
718 case TTM_PL_SYSTEM:
719
720 return 0;
721 case TTM_PL_TT:
722 break;
723 case TTM_PL_VRAM:
724 mem->bus.offset = mem->start << PAGE_SHIFT;
725
726 if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
727 return -EINVAL;
728
729
730
731
732 if (adev->mman.aper_base_kaddr &&
733 (mm_node->size == mem->num_pages))
734 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
735 mem->bus.offset;
736
737 mem->bus.base = adev->gmc.aper_base;
738 mem->bus.is_iomem = true;
739 break;
740 default:
741 return -EINVAL;
742 }
743 return 0;
744}
745
746static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
747{
748}
749
750static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
751 unsigned long page_offset)
752{
753 struct drm_mm_node *mm;
754 unsigned long offset = (page_offset << PAGE_SHIFT);
755
756 mm = amdgpu_find_mm_node(&bo->mem, &offset);
757 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
758 (offset >> PAGE_SHIFT);
759}
760
761
762
763
764struct amdgpu_ttm_tt {
765 struct ttm_dma_tt ttm;
766 u64 offset;
767 uint64_t userptr;
768 struct task_struct *usertask;
769 uint32_t userflags;
770#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
771 struct hmm_range *range;
772#endif
773};
774
775
776
777
778
779
780
781
782#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
783
784#define MAX_RETRY_HMM_RANGE_FAULT 16
785
786int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
787{
788 struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
789 struct ttm_tt *ttm = bo->tbo.ttm;
790 struct amdgpu_ttm_tt *gtt = (void *)ttm;
791 struct mm_struct *mm = gtt->usertask->mm;
792 unsigned long start = gtt->userptr;
793 struct vm_area_struct *vma;
794 struct hmm_range *range;
795 unsigned long i;
796 uint64_t *pfns;
797 int r = 0;
798
799 if (!mm)
800 return -ESRCH;
801
802 if (unlikely(!mirror)) {
803 DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
804 r = -EFAULT;
805 goto out;
806 }
807
808 vma = find_vma(mm, start);
809 if (unlikely(!vma || start < vma->vm_start)) {
810 r = -EFAULT;
811 goto out;
812 }
813 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
814 vma->vm_file)) {
815 r = -EPERM;
816 goto out;
817 }
818
819 range = kzalloc(sizeof(*range), GFP_KERNEL);
820 if (unlikely(!range)) {
821 r = -ENOMEM;
822 goto out;
823 }
824
825 pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
826 if (unlikely(!pfns)) {
827 r = -ENOMEM;
828 goto out_free_ranges;
829 }
830
831 amdgpu_hmm_init_range(range);
832 range->default_flags = range->flags[HMM_PFN_VALID];
833 range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
834 0 : range->flags[HMM_PFN_WRITE];
835 range->pfn_flags_mask = 0;
836 range->pfns = pfns;
837 range->start = start;
838 range->end = start + ttm->num_pages * PAGE_SIZE;
839
840 hmm_range_register(range, mirror);
841
842
843
844
845
846
847 hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
848
849 down_read(&mm->mmap_sem);
850 r = hmm_range_fault(range, 0);
851 up_read(&mm->mmap_sem);
852
853 if (unlikely(r < 0))
854 goto out_free_pfns;
855
856 for (i = 0; i < ttm->num_pages; i++) {
857 pages[i] = hmm_device_entry_to_page(range, pfns[i]);
858 if (unlikely(!pages[i])) {
859 pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
860 i, pfns[i]);
861 r = -ENOMEM;
862
863 goto out_free_pfns;
864 }
865 }
866
867 gtt->range = range;
868
869 return 0;
870
871out_free_pfns:
872 hmm_range_unregister(range);
873 kvfree(pfns);
874out_free_ranges:
875 kfree(range);
876out:
877 return r;
878}
879
880
881
882
883
884
885
886bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
887{
888 struct amdgpu_ttm_tt *gtt = (void *)ttm;
889 bool r = false;
890
891 if (!gtt || !gtt->userptr)
892 return false;
893
894 DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
895 gtt->userptr, ttm->num_pages);
896
897 WARN_ONCE(!gtt->range || !gtt->range->pfns,
898 "No user pages to check\n");
899
900 if (gtt->range) {
901 r = hmm_range_valid(gtt->range);
902 hmm_range_unregister(gtt->range);
903
904 kvfree(gtt->range->pfns);
905 kfree(gtt->range);
906 gtt->range = NULL;
907 }
908
909 return r;
910}
911#endif
912
913
914
915
916
917
918
919
920void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
921{
922 unsigned long i;
923
924 for (i = 0; i < ttm->num_pages; ++i)
925 ttm->pages[i] = pages ? pages[i] : NULL;
926}
927
928
929
930
931
932
933static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
934{
935 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
936 struct amdgpu_ttm_tt *gtt = (void *)ttm;
937 unsigned nents;
938 int r;
939
940 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
941 enum dma_data_direction direction = write ?
942 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
943
944
945 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
946 ttm->num_pages << PAGE_SHIFT,
947 GFP_KERNEL);
948 if (r)
949 goto release_sg;
950
951
952 r = -ENOMEM;
953 nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
954 if (nents != ttm->sg->nents)
955 goto release_sg;
956
957
958 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
959 gtt->ttm.dma_address, ttm->num_pages);
960
961 return 0;
962
963release_sg:
964 kfree(ttm->sg);
965 return r;
966}
967
968
969
970
971static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
972{
973 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
974 struct amdgpu_ttm_tt *gtt = (void *)ttm;
975
976 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
977 enum dma_data_direction direction = write ?
978 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
979
980
981 if (!ttm->sg->sgl)
982 return;
983
984
985 dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
986
987 sg_free_table(ttm->sg);
988
989#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
990 if (gtt->range &&
991 ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
992 gtt->range->pfns[0]))
993 WARN_ONCE(1, "Missing get_user_page_done\n");
994#endif
995}
996
997int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
998 struct ttm_buffer_object *tbo,
999 uint64_t flags)
1000{
1001 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
1002 struct ttm_tt *ttm = tbo->ttm;
1003 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1004 int r;
1005
1006 if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
1007 uint64_t page_idx = 1;
1008
1009 r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
1010 ttm->pages, gtt->ttm.dma_address, flags);
1011 if (r)
1012 goto gart_bind_fail;
1013
1014
1015 flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1016 flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1017
1018 r = amdgpu_gart_bind(adev,
1019 gtt->offset + (page_idx << PAGE_SHIFT),
1020 ttm->num_pages - page_idx,
1021 &ttm->pages[page_idx],
1022 &(gtt->ttm.dma_address[page_idx]), flags);
1023 } else {
1024 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1025 ttm->pages, gtt->ttm.dma_address, flags);
1026 }
1027
1028gart_bind_fail:
1029 if (r)
1030 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1031 ttm->num_pages, gtt->offset);
1032
1033 return r;
1034}
1035
1036
1037
1038
1039
1040
1041
1042static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
1043 struct ttm_mem_reg *bo_mem)
1044{
1045 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1046 struct amdgpu_ttm_tt *gtt = (void*)ttm;
1047 uint64_t flags;
1048 int r = 0;
1049
1050 if (gtt->userptr) {
1051 r = amdgpu_ttm_tt_pin_userptr(ttm);
1052 if (r) {
1053 DRM_ERROR("failed to pin userptr\n");
1054 return r;
1055 }
1056 }
1057 if (!ttm->num_pages) {
1058 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
1059 ttm->num_pages, bo_mem, ttm);
1060 }
1061
1062 if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1063 bo_mem->mem_type == AMDGPU_PL_GWS ||
1064 bo_mem->mem_type == AMDGPU_PL_OA)
1065 return -EINVAL;
1066
1067 if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1068 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1069 return 0;
1070 }
1071
1072
1073 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1074
1075
1076 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1077 r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1078 ttm->pages, gtt->ttm.dma_address, flags);
1079
1080 if (r)
1081 DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
1082 ttm->num_pages, gtt->offset);
1083 return r;
1084}
1085
1086
1087
1088
1089int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1090{
1091 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1092 struct ttm_operation_ctx ctx = { false, false };
1093 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1094 struct ttm_mem_reg tmp;
1095 struct ttm_placement placement;
1096 struct ttm_place placements;
1097 uint64_t addr, flags;
1098 int r;
1099
1100 if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1101 return 0;
1102
1103 addr = amdgpu_gmc_agp_addr(bo);
1104 if (addr != AMDGPU_BO_INVALID_OFFSET) {
1105 bo->mem.start = addr >> PAGE_SHIFT;
1106 } else {
1107
1108
1109 tmp = bo->mem;
1110 tmp.mm_node = NULL;
1111 placement.num_placement = 1;
1112 placement.placement = &placements;
1113 placement.num_busy_placement = 1;
1114 placement.busy_placement = &placements;
1115 placements.fpfn = 0;
1116 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1117 placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
1118 TTM_PL_FLAG_TT;
1119
1120 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1121 if (unlikely(r))
1122 return r;
1123
1124
1125 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1126
1127
1128 gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1129 r = amdgpu_ttm_gart_bind(adev, bo, flags);
1130 if (unlikely(r)) {
1131 ttm_bo_mem_put(bo, &tmp);
1132 return r;
1133 }
1134
1135 ttm_bo_mem_put(bo, &bo->mem);
1136 bo->mem = tmp;
1137 }
1138
1139 bo->offset = (bo->mem.start << PAGE_SHIFT) +
1140 bo->bdev->man[bo->mem.mem_type].gpu_offset;
1141
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149
1150
1151int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1152{
1153 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1154 uint64_t flags;
1155 int r;
1156
1157 if (!tbo->ttm)
1158 return 0;
1159
1160 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1161 r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1162
1163 return r;
1164}
1165
1166
1167
1168
1169
1170
1171
1172static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
1173{
1174 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1175 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1176 int r;
1177
1178
1179 if (gtt->userptr)
1180 amdgpu_ttm_tt_unpin_userptr(ttm);
1181
1182 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1183 return 0;
1184
1185
1186 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1187 if (r)
1188 DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
1189 gtt->ttm.ttm.num_pages, gtt->offset);
1190 return r;
1191}
1192
1193static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
1194{
1195 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1196
1197 if (gtt->usertask)
1198 put_task_struct(gtt->usertask);
1199
1200 ttm_dma_tt_fini(>t->ttm);
1201 kfree(gtt);
1202}
1203
1204static struct ttm_backend_func amdgpu_backend_func = {
1205 .bind = &amdgpu_ttm_backend_bind,
1206 .unbind = &amdgpu_ttm_backend_unbind,
1207 .destroy = &amdgpu_ttm_backend_destroy,
1208};
1209
1210
1211
1212
1213
1214
1215
1216
1217static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1218 uint32_t page_flags)
1219{
1220 struct amdgpu_device *adev;
1221 struct amdgpu_ttm_tt *gtt;
1222
1223 adev = amdgpu_ttm_adev(bo->bdev);
1224
1225 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1226 if (gtt == NULL) {
1227 return NULL;
1228 }
1229 gtt->ttm.ttm.func = &amdgpu_backend_func;
1230
1231
1232 if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) {
1233 kfree(gtt);
1234 return NULL;
1235 }
1236 return >t->ttm.ttm;
1237}
1238
1239
1240
1241
1242
1243
1244
1245static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
1246 struct ttm_operation_ctx *ctx)
1247{
1248 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
1249 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1250 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1251
1252
1253 if (gtt && gtt->userptr) {
1254 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1255 if (!ttm->sg)
1256 return -ENOMEM;
1257
1258 ttm->page_flags |= TTM_PAGE_FLAG_SG;
1259 ttm->state = tt_unbound;
1260 return 0;
1261 }
1262
1263 if (slave && ttm->sg) {
1264 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1265 gtt->ttm.dma_address,
1266 ttm->num_pages);
1267 ttm->state = tt_unbound;
1268 return 0;
1269 }
1270
1271#ifdef CONFIG_SWIOTLB
1272 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1273 return ttm_dma_populate(>t->ttm, adev->dev, ctx);
1274 }
1275#endif
1276
1277
1278
1279 return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
1280}
1281
1282
1283
1284
1285
1286
1287
1288static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1289{
1290 struct amdgpu_device *adev;
1291 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1292 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1293
1294 if (gtt && gtt->userptr) {
1295 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1296 kfree(ttm->sg);
1297 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1298 return;
1299 }
1300
1301 if (slave)
1302 return;
1303
1304 adev = amdgpu_ttm_adev(ttm->bdev);
1305
1306#ifdef CONFIG_SWIOTLB
1307 if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1308 ttm_dma_unpopulate(>t->ttm, adev->dev);
1309 return;
1310 }
1311#endif
1312
1313
1314 ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
1329 uint32_t flags)
1330{
1331 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1332
1333 if (gtt == NULL)
1334 return -EINVAL;
1335
1336 gtt->userptr = addr;
1337 gtt->userflags = flags;
1338
1339 if (gtt->usertask)
1340 put_task_struct(gtt->usertask);
1341 gtt->usertask = current->group_leader;
1342 get_task_struct(gtt->usertask);
1343
1344 return 0;
1345}
1346
1347
1348
1349
1350struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1351{
1352 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1353
1354 if (gtt == NULL)
1355 return NULL;
1356
1357 if (gtt->usertask == NULL)
1358 return NULL;
1359
1360 return gtt->usertask->mm;
1361}
1362
1363
1364
1365
1366
1367
1368bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1369 unsigned long end)
1370{
1371 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1372 unsigned long size;
1373
1374 if (gtt == NULL || !gtt->userptr)
1375 return false;
1376
1377
1378
1379
1380 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
1381 if (gtt->userptr > end || gtt->userptr + size <= start)
1382 return false;
1383
1384 return true;
1385}
1386
1387
1388
1389
1390bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1391{
1392 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1393
1394 if (gtt == NULL || !gtt->userptr)
1395 return false;
1396
1397 return true;
1398}
1399
1400
1401
1402
1403bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1404{
1405 struct amdgpu_ttm_tt *gtt = (void *)ttm;
1406
1407 if (gtt == NULL)
1408 return false;
1409
1410 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
1422{
1423 uint64_t flags = 0;
1424
1425 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1426 flags |= AMDGPU_PTE_VALID;
1427
1428 if (mem && mem->mem_type == TTM_PL_TT) {
1429 flags |= AMDGPU_PTE_SYSTEM;
1430
1431 if (ttm->caching_state == tt_cached)
1432 flags |= AMDGPU_PTE_SNOOPED;
1433 }
1434
1435 return flags;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1447 struct ttm_mem_reg *mem)
1448{
1449 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1450
1451 flags |= adev->gart.gart_pte_flags;
1452 flags |= AMDGPU_PTE_READABLE;
1453
1454 if (!amdgpu_ttm_tt_is_readonly(ttm))
1455 flags |= AMDGPU_PTE_WRITEABLE;
1456
1457 return flags;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1470 const struct ttm_place *place)
1471{
1472 unsigned long num_pages = bo->mem.num_pages;
1473 struct drm_mm_node *node = bo->mem.mm_node;
1474 struct dma_resv_list *flist;
1475 struct dma_fence *f;
1476 int i;
1477
1478
1479
1480
1481 if (bo->type == ttm_bo_type_kernel &&
1482 !dma_resv_test_signaled_rcu(bo->base.resv, true))
1483 return false;
1484
1485
1486
1487
1488
1489 flist = dma_resv_get_list(bo->base.resv);
1490 if (flist) {
1491 for (i = 0; i < flist->shared_count; ++i) {
1492 f = rcu_dereference_protected(flist->shared[i],
1493 dma_resv_held(bo->base.resv));
1494 if (amdkfd_fence_check_mm(f, current->mm))
1495 return false;
1496 }
1497 }
1498
1499 switch (bo->mem.mem_type) {
1500 case TTM_PL_TT:
1501 return true;
1502
1503 case TTM_PL_VRAM:
1504
1505 while (num_pages) {
1506 if (place->fpfn < (node->start + node->size) &&
1507 !(place->lpfn && place->lpfn <= node->start))
1508 return true;
1509
1510 num_pages -= node->size;
1511 ++node;
1512 }
1513 return false;
1514
1515 default:
1516 break;
1517 }
1518
1519 return ttm_bo_eviction_valuable(bo, place);
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1535 unsigned long offset,
1536 void *buf, int len, int write)
1537{
1538 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1539 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1540 struct drm_mm_node *nodes;
1541 uint32_t value = 0;
1542 int ret = 0;
1543 uint64_t pos;
1544 unsigned long flags;
1545
1546 if (bo->mem.mem_type != TTM_PL_VRAM)
1547 return -EIO;
1548
1549 nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1550 pos = (nodes->start << PAGE_SHIFT) + offset;
1551
1552 while (len && pos < adev->gmc.mc_vram_size) {
1553 uint64_t aligned_pos = pos & ~(uint64_t)3;
1554 uint32_t bytes = 4 - (pos & 3);
1555 uint32_t shift = (pos & 3) * 8;
1556 uint32_t mask = 0xffffffff << shift;
1557
1558 if (len < bytes) {
1559 mask &= 0xffffffff >> (bytes - len) * 8;
1560 bytes = len;
1561 }
1562
1563 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1564 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1565 WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1566 if (!write || mask != 0xffffffff)
1567 value = RREG32_NO_KIQ(mmMM_DATA);
1568 if (write) {
1569 value &= ~mask;
1570 value |= (*(uint32_t *)buf << shift) & mask;
1571 WREG32_NO_KIQ(mmMM_DATA, value);
1572 }
1573 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1574 if (!write) {
1575 value = (value & mask) >> shift;
1576 memcpy(buf, &value, bytes);
1577 }
1578
1579 ret += bytes;
1580 buf = (uint8_t *)buf + bytes;
1581 pos += bytes;
1582 len -= bytes;
1583 if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1584 ++nodes;
1585 pos = (nodes->start << PAGE_SHIFT);
1586 }
1587 }
1588
1589 return ret;
1590}
1591
1592static struct ttm_bo_driver amdgpu_bo_driver = {
1593 .ttm_tt_create = &amdgpu_ttm_tt_create,
1594 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1595 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1596 .invalidate_caches = &amdgpu_invalidate_caches,
1597 .init_mem_type = &amdgpu_init_mem_type,
1598 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1599 .evict_flags = &amdgpu_evict_flags,
1600 .move = &amdgpu_bo_move,
1601 .verify_access = &amdgpu_verify_access,
1602 .move_notify = &amdgpu_bo_move_notify,
1603 .release_notify = &amdgpu_bo_release_notify,
1604 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1605 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1606 .io_mem_free = &amdgpu_ttm_io_mem_free,
1607 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1608 .access_memory = &amdgpu_ttm_access_memory,
1609 .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1610};
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1623{
1624 amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
1625 NULL, &adev->fw_vram_usage.va);
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1636{
1637 struct ttm_operation_ctx ctx = { false, false };
1638 struct amdgpu_bo_param bp;
1639 int r = 0;
1640 int i;
1641 u64 vram_size = adev->gmc.visible_vram_size;
1642 u64 offset = adev->fw_vram_usage.start_offset;
1643 u64 size = adev->fw_vram_usage.size;
1644 struct amdgpu_bo *bo;
1645
1646 memset(&bp, 0, sizeof(bp));
1647 bp.size = adev->fw_vram_usage.size;
1648 bp.byte_align = PAGE_SIZE;
1649 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
1650 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1651 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1652 bp.type = ttm_bo_type_kernel;
1653 bp.resv = NULL;
1654 adev->fw_vram_usage.va = NULL;
1655 adev->fw_vram_usage.reserved_bo = NULL;
1656
1657 if (adev->fw_vram_usage.size > 0 &&
1658 adev->fw_vram_usage.size <= vram_size) {
1659
1660 r = amdgpu_bo_create(adev, &bp,
1661 &adev->fw_vram_usage.reserved_bo);
1662 if (r)
1663 goto error_create;
1664
1665 r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
1666 if (r)
1667 goto error_reserve;
1668
1669
1670
1671
1672 bo = adev->fw_vram_usage.reserved_bo;
1673 offset = ALIGN(offset, PAGE_SIZE);
1674 for (i = 0; i < bo->placement.num_placement; ++i) {
1675 bo->placements[i].fpfn = offset >> PAGE_SHIFT;
1676 bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
1677 }
1678
1679 ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
1680 r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
1681 &bo->tbo.mem, &ctx);
1682 if (r)
1683 goto error_pin;
1684
1685 r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
1686 AMDGPU_GEM_DOMAIN_VRAM,
1687 adev->fw_vram_usage.start_offset,
1688 (adev->fw_vram_usage.start_offset +
1689 adev->fw_vram_usage.size));
1690 if (r)
1691 goto error_pin;
1692 r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
1693 &adev->fw_vram_usage.va);
1694 if (r)
1695 goto error_kmap;
1696
1697 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1698 }
1699 return r;
1700
1701error_kmap:
1702 amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
1703error_pin:
1704 amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
1705error_reserve:
1706 amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
1707error_create:
1708 adev->fw_vram_usage.va = NULL;
1709 adev->fw_vram_usage.reserved_bo = NULL;
1710 return r;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721int amdgpu_ttm_init(struct amdgpu_device *adev)
1722{
1723 uint64_t gtt_size;
1724 int r;
1725 u64 vis_vram_limit;
1726 void *stolen_vga_buf;
1727
1728 mutex_init(&adev->mman.gtt_window_lock);
1729
1730
1731 r = ttm_bo_device_init(&adev->mman.bdev,
1732 &amdgpu_bo_driver,
1733 adev->ddev->anon_inode->i_mapping,
1734 dma_addressing_limited(adev->dev));
1735 if (r) {
1736 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1737 return r;
1738 }
1739 adev->mman.initialized = true;
1740
1741
1742 adev->mman.bdev.no_retry = true;
1743
1744
1745 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1746 adev->gmc.real_vram_size >> PAGE_SHIFT);
1747 if (r) {
1748 DRM_ERROR("Failed initializing VRAM heap.\n");
1749 return r;
1750 }
1751
1752
1753 vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1754 if (amdgpu_vis_vram_limit > 0 &&
1755 vis_vram_limit <= adev->gmc.visible_vram_size)
1756 adev->gmc.visible_vram_size = vis_vram_limit;
1757
1758
1759 amdgpu_ttm_set_buffer_funcs_status(adev, false);
1760#ifdef CONFIG_64BIT
1761 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1762 adev->gmc.visible_vram_size);
1763#endif
1764
1765
1766
1767
1768
1769 r = amdgpu_ttm_fw_reserve_vram_init(adev);
1770 if (r) {
1771 return r;
1772 }
1773
1774
1775
1776
1777
1778 r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
1779 AMDGPU_GEM_DOMAIN_VRAM,
1780 &adev->stolen_vga_memory,
1781 NULL, &stolen_vga_buf);
1782 if (r)
1783 return r;
1784 DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1785 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1786
1787
1788
1789 if (amdgpu_gtt_size == -1) {
1790 struct sysinfo si;
1791
1792 si_meminfo(&si);
1793 gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1794 adev->gmc.mc_vram_size),
1795 ((uint64_t)si.totalram * si.mem_unit * 3/4));
1796 }
1797 else
1798 gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1799
1800
1801 r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
1802 if (r) {
1803 DRM_ERROR("Failed initializing GTT heap.\n");
1804 return r;
1805 }
1806 DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1807 (unsigned)(gtt_size / (1024 * 1024)));
1808
1809
1810 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1811 adev->gds.gds_size);
1812 if (r) {
1813 DRM_ERROR("Failed initializing GDS heap.\n");
1814 return r;
1815 }
1816
1817 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1818 adev->gds.gws_size);
1819 if (r) {
1820 DRM_ERROR("Failed initializing gws heap.\n");
1821 return r;
1822 }
1823
1824 r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1825 adev->gds.oa_size);
1826 if (r) {
1827 DRM_ERROR("Failed initializing oa heap.\n");
1828 return r;
1829 }
1830
1831
1832 r = amdgpu_ttm_debugfs_init(adev);
1833 if (r) {
1834 DRM_ERROR("Failed to init debugfs\n");
1835 return r;
1836 }
1837 return 0;
1838}
1839
1840
1841
1842
1843void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1844{
1845 void *stolen_vga_buf;
1846
1847 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1848}
1849
1850
1851
1852
1853void amdgpu_ttm_fini(struct amdgpu_device *adev)
1854{
1855 if (!adev->mman.initialized)
1856 return;
1857
1858 amdgpu_ttm_debugfs_fini(adev);
1859 amdgpu_ttm_fw_reserve_vram_fini(adev);
1860 if (adev->mman.aper_base_kaddr)
1861 iounmap(adev->mman.aper_base_kaddr);
1862 adev->mman.aper_base_kaddr = NULL;
1863
1864 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
1865 ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1866 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
1867 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
1868 ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
1869 ttm_bo_device_release(&adev->mman.bdev);
1870 adev->mman.initialized = false;
1871 DRM_INFO("amdgpu: ttm finalized\n");
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1884{
1885 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
1886 uint64_t size;
1887 int r;
1888
1889 if (!adev->mman.initialized || adev->in_gpu_reset ||
1890 adev->mman.buffer_funcs_enabled == enable)
1891 return;
1892
1893 if (enable) {
1894 struct amdgpu_ring *ring;
1895 struct drm_sched_rq *rq;
1896
1897 ring = adev->mman.buffer_funcs_ring;
1898 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1899 r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
1900 if (r) {
1901 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1902 r);
1903 return;
1904 }
1905 } else {
1906 drm_sched_entity_destroy(&adev->mman.entity);
1907 dma_fence_put(man->move);
1908 man->move = NULL;
1909 }
1910
1911
1912 if (enable)
1913 size = adev->gmc.real_vram_size;
1914 else
1915 size = adev->gmc.visible_vram_size;
1916 man->size = size >> PAGE_SHIFT;
1917 adev->mman.buffer_funcs_enabled = enable;
1918}
1919
1920int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
1921{
1922 struct drm_file *file_priv = filp->private_data;
1923 struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
1924
1925 if (adev == NULL)
1926 return -EINVAL;
1927
1928 return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
1929}
1930
1931static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
1932 struct ttm_mem_reg *mem, unsigned num_pages,
1933 uint64_t offset, unsigned window,
1934 struct amdgpu_ring *ring,
1935 uint64_t *addr)
1936{
1937 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1938 struct amdgpu_device *adev = ring->adev;
1939 struct ttm_tt *ttm = bo->ttm;
1940 struct amdgpu_job *job;
1941 unsigned num_dw, num_bytes;
1942 dma_addr_t *dma_address;
1943 struct dma_fence *fence;
1944 uint64_t src_addr, dst_addr;
1945 uint64_t flags;
1946 int r;
1947
1948 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
1949 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
1950
1951 *addr = adev->gmc.gart_start;
1952 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
1953 AMDGPU_GPU_PAGE_SIZE;
1954
1955 num_dw = adev->mman.buffer_funcs->copy_num_dw;
1956 while (num_dw & 0x7)
1957 num_dw++;
1958
1959 num_bytes = num_pages * 8;
1960
1961 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
1962 if (r)
1963 return r;
1964
1965 src_addr = num_dw * 4;
1966 src_addr += job->ibs[0].gpu_addr;
1967
1968 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
1969 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
1970 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
1971 dst_addr, num_bytes);
1972
1973 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
1974 WARN_ON(job->ibs[0].length_dw > num_dw);
1975
1976 dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT];
1977 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
1978 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
1979 &job->ibs[0].ptr[num_dw]);
1980 if (r)
1981 goto error_free;
1982
1983 r = amdgpu_job_submit(job, &adev->mman.entity,
1984 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
1985 if (r)
1986 goto error_free;
1987
1988 dma_fence_put(fence);
1989
1990 return r;
1991
1992error_free:
1993 amdgpu_job_free(job);
1994 return r;
1995}
1996
1997int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
1998 uint64_t dst_offset, uint32_t byte_count,
1999 struct dma_resv *resv,
2000 struct dma_fence **fence, bool direct_submit,
2001 bool vm_needs_flush)
2002{
2003 struct amdgpu_device *adev = ring->adev;
2004 struct amdgpu_job *job;
2005
2006 uint32_t max_bytes;
2007 unsigned num_loops, num_dw;
2008 unsigned i;
2009 int r;
2010
2011 if (direct_submit && !ring->sched.ready) {
2012 DRM_ERROR("Trying to move memory with ring turned off.\n");
2013 return -EINVAL;
2014 }
2015
2016 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2017 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2018 num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
2019
2020
2021 while (num_dw & 0x7)
2022 num_dw++;
2023
2024 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2025 if (r)
2026 return r;
2027
2028 if (vm_needs_flush) {
2029 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2030 job->vm_needs_flush = true;
2031 }
2032 if (resv) {
2033 r = amdgpu_sync_resv(adev, &job->sync, resv,
2034 AMDGPU_FENCE_OWNER_UNDEFINED,
2035 false);
2036 if (r) {
2037 DRM_ERROR("sync failed (%d).\n", r);
2038 goto error_free;
2039 }
2040 }
2041
2042 for (i = 0; i < num_loops; i++) {
2043 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2044
2045 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2046 dst_offset, cur_size_in_bytes);
2047
2048 src_offset += cur_size_in_bytes;
2049 dst_offset += cur_size_in_bytes;
2050 byte_count -= cur_size_in_bytes;
2051 }
2052
2053 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2054 WARN_ON(job->ibs[0].length_dw > num_dw);
2055 if (direct_submit)
2056 r = amdgpu_job_submit_direct(job, ring, fence);
2057 else
2058 r = amdgpu_job_submit(job, &adev->mman.entity,
2059 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2060 if (r)
2061 goto error_free;
2062
2063 return r;
2064
2065error_free:
2066 amdgpu_job_free(job);
2067 DRM_ERROR("Error scheduling IBs (%d)\n", r);
2068 return r;
2069}
2070
2071int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2072 uint32_t src_data,
2073 struct dma_resv *resv,
2074 struct dma_fence **fence)
2075{
2076 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2077 uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2078 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2079
2080 struct drm_mm_node *mm_node;
2081 unsigned long num_pages;
2082 unsigned int num_loops, num_dw;
2083
2084 struct amdgpu_job *job;
2085 int r;
2086
2087 if (!adev->mman.buffer_funcs_enabled) {
2088 DRM_ERROR("Trying to clear memory with ring turned off.\n");
2089 return -EINVAL;
2090 }
2091
2092 if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2093 r = amdgpu_ttm_alloc_gart(&bo->tbo);
2094 if (r)
2095 return r;
2096 }
2097
2098 num_pages = bo->tbo.num_pages;
2099 mm_node = bo->tbo.mem.mm_node;
2100 num_loops = 0;
2101 while (num_pages) {
2102 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2103
2104 num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2105 num_pages -= mm_node->size;
2106 ++mm_node;
2107 }
2108 num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2109
2110
2111 num_dw += 64;
2112
2113 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
2114 if (r)
2115 return r;
2116
2117 if (resv) {
2118 r = amdgpu_sync_resv(adev, &job->sync, resv,
2119 AMDGPU_FENCE_OWNER_UNDEFINED, false);
2120 if (r) {
2121 DRM_ERROR("sync failed (%d).\n", r);
2122 goto error_free;
2123 }
2124 }
2125
2126 num_pages = bo->tbo.num_pages;
2127 mm_node = bo->tbo.mem.mm_node;
2128
2129 while (num_pages) {
2130 uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2131 uint64_t dst_addr;
2132
2133 dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2134 while (byte_count) {
2135 uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2136 max_bytes);
2137
2138 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2139 dst_addr, cur_size_in_bytes);
2140
2141 dst_addr += cur_size_in_bytes;
2142 byte_count -= cur_size_in_bytes;
2143 }
2144
2145 num_pages -= mm_node->size;
2146 ++mm_node;
2147 }
2148
2149 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2150 WARN_ON(job->ibs[0].length_dw > num_dw);
2151 r = amdgpu_job_submit(job, &adev->mman.entity,
2152 AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2153 if (r)
2154 goto error_free;
2155
2156 return 0;
2157
2158error_free:
2159 amdgpu_job_free(job);
2160 return r;
2161}
2162
2163#if defined(CONFIG_DEBUG_FS)
2164
2165static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2166{
2167 struct drm_info_node *node = (struct drm_info_node *)m->private;
2168 unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2169 struct drm_device *dev = node->minor->dev;
2170 struct amdgpu_device *adev = dev->dev_private;
2171 struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
2172 struct drm_printer p = drm_seq_file_printer(m);
2173
2174 man->func->debug(man, &p);
2175 return 0;
2176}
2177
2178static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2179 {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2180 {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2181 {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2182 {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2183 {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2184 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
2185#ifdef CONFIG_SWIOTLB
2186 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
2187#endif
2188};
2189
2190
2191
2192
2193
2194
2195static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2196 size_t size, loff_t *pos)
2197{
2198 struct amdgpu_device *adev = file_inode(f)->i_private;
2199 ssize_t result = 0;
2200 int r;
2201
2202 if (size & 0x3 || *pos & 0x3)
2203 return -EINVAL;
2204
2205 if (*pos >= adev->gmc.mc_vram_size)
2206 return -ENXIO;
2207
2208 while (size) {
2209 unsigned long flags;
2210 uint32_t value;
2211
2212 if (*pos >= adev->gmc.mc_vram_size)
2213 return result;
2214
2215 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2216 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2217 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2218 value = RREG32_NO_KIQ(mmMM_DATA);
2219 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2220
2221 r = put_user(value, (uint32_t *)buf);
2222 if (r)
2223 return r;
2224
2225 result += 4;
2226 buf += 4;
2227 *pos += 4;
2228 size -= 4;
2229 }
2230
2231 return result;
2232}
2233
2234
2235
2236
2237
2238
2239static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2240 size_t size, loff_t *pos)
2241{
2242 struct amdgpu_device *adev = file_inode(f)->i_private;
2243 ssize_t result = 0;
2244 int r;
2245
2246 if (size & 0x3 || *pos & 0x3)
2247 return -EINVAL;
2248
2249 if (*pos >= adev->gmc.mc_vram_size)
2250 return -ENXIO;
2251
2252 while (size) {
2253 unsigned long flags;
2254 uint32_t value;
2255
2256 if (*pos >= adev->gmc.mc_vram_size)
2257 return result;
2258
2259 r = get_user(value, (uint32_t *)buf);
2260 if (r)
2261 return r;
2262
2263 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2264 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2265 WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2266 WREG32_NO_KIQ(mmMM_DATA, value);
2267 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2268
2269 result += 4;
2270 buf += 4;
2271 *pos += 4;
2272 size -= 4;
2273 }
2274
2275 return result;
2276}
2277
2278static const struct file_operations amdgpu_ttm_vram_fops = {
2279 .owner = THIS_MODULE,
2280 .read = amdgpu_ttm_vram_read,
2281 .write = amdgpu_ttm_vram_write,
2282 .llseek = default_llseek,
2283};
2284
2285#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2286
2287
2288
2289
2290static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2291 size_t size, loff_t *pos)
2292{
2293 struct amdgpu_device *adev = file_inode(f)->i_private;
2294 ssize_t result = 0;
2295 int r;
2296
2297 while (size) {
2298 loff_t p = *pos / PAGE_SIZE;
2299 unsigned off = *pos & ~PAGE_MASK;
2300 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2301 struct page *page;
2302 void *ptr;
2303
2304 if (p >= adev->gart.num_cpu_pages)
2305 return result;
2306
2307 page = adev->gart.pages[p];
2308 if (page) {
2309 ptr = kmap(page);
2310 ptr += off;
2311
2312 r = copy_to_user(buf, ptr, cur_size);
2313 kunmap(adev->gart.pages[p]);
2314 } else
2315 r = clear_user(buf, cur_size);
2316
2317 if (r)
2318 return -EFAULT;
2319
2320 result += cur_size;
2321 buf += cur_size;
2322 *pos += cur_size;
2323 size -= cur_size;
2324 }
2325
2326 return result;
2327}
2328
2329static const struct file_operations amdgpu_ttm_gtt_fops = {
2330 .owner = THIS_MODULE,
2331 .read = amdgpu_ttm_gtt_read,
2332 .llseek = default_llseek
2333};
2334
2335#endif
2336
2337
2338
2339
2340
2341
2342
2343
2344static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2345 size_t size, loff_t *pos)
2346{
2347 struct amdgpu_device *adev = file_inode(f)->i_private;
2348 struct iommu_domain *dom;
2349 ssize_t result = 0;
2350 int r;
2351
2352
2353 dom = iommu_get_domain_for_dev(adev->dev);
2354
2355 while (size) {
2356 phys_addr_t addr = *pos & PAGE_MASK;
2357 loff_t off = *pos & ~PAGE_MASK;
2358 size_t bytes = PAGE_SIZE - off;
2359 unsigned long pfn;
2360 struct page *p;
2361 void *ptr;
2362
2363 bytes = bytes < size ? bytes : size;
2364
2365
2366
2367
2368
2369 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2370
2371 pfn = addr >> PAGE_SHIFT;
2372 if (!pfn_valid(pfn))
2373 return -EPERM;
2374
2375 p = pfn_to_page(pfn);
2376 if (p->mapping != adev->mman.bdev.dev_mapping)
2377 return -EPERM;
2378
2379 ptr = kmap(p);
2380 r = copy_to_user(buf, ptr + off, bytes);
2381 kunmap(p);
2382 if (r)
2383 return -EFAULT;
2384
2385 size -= bytes;
2386 *pos += bytes;
2387 result += bytes;
2388 }
2389
2390 return result;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2401 size_t size, loff_t *pos)
2402{
2403 struct amdgpu_device *adev = file_inode(f)->i_private;
2404 struct iommu_domain *dom;
2405 ssize_t result = 0;
2406 int r;
2407
2408 dom = iommu_get_domain_for_dev(adev->dev);
2409
2410 while (size) {
2411 phys_addr_t addr = *pos & PAGE_MASK;
2412 loff_t off = *pos & ~PAGE_MASK;
2413 size_t bytes = PAGE_SIZE - off;
2414 unsigned long pfn;
2415 struct page *p;
2416 void *ptr;
2417
2418 bytes = bytes < size ? bytes : size;
2419
2420 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2421
2422 pfn = addr >> PAGE_SHIFT;
2423 if (!pfn_valid(pfn))
2424 return -EPERM;
2425
2426 p = pfn_to_page(pfn);
2427 if (p->mapping != adev->mman.bdev.dev_mapping)
2428 return -EPERM;
2429
2430 ptr = kmap(p);
2431 r = copy_from_user(ptr + off, buf, bytes);
2432 kunmap(p);
2433 if (r)
2434 return -EFAULT;
2435
2436 size -= bytes;
2437 *pos += bytes;
2438 result += bytes;
2439 }
2440
2441 return result;
2442}
2443
2444static const struct file_operations amdgpu_ttm_iomem_fops = {
2445 .owner = THIS_MODULE,
2446 .read = amdgpu_iomem_read,
2447 .write = amdgpu_iomem_write,
2448 .llseek = default_llseek
2449};
2450
2451static const struct {
2452 char *name;
2453 const struct file_operations *fops;
2454 int domain;
2455} ttm_debugfs_entries[] = {
2456 { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2457#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2458 { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2459#endif
2460 { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2461};
2462
2463#endif
2464
2465static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2466{
2467#if defined(CONFIG_DEBUG_FS)
2468 unsigned count;
2469
2470 struct drm_minor *minor = adev->ddev->primary;
2471 struct dentry *ent, *root = minor->debugfs_root;
2472
2473 for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2474 ent = debugfs_create_file(
2475 ttm_debugfs_entries[count].name,
2476 S_IFREG | S_IRUGO, root,
2477 adev,
2478 ttm_debugfs_entries[count].fops);
2479 if (IS_ERR(ent))
2480 return PTR_ERR(ent);
2481 if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2482 i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2483 else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2484 i_size_write(ent->d_inode, adev->gmc.gart_size);
2485 adev->mman.debugfs_entries[count] = ent;
2486 }
2487
2488 count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2489
2490#ifdef CONFIG_SWIOTLB
2491 if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
2492 --count;
2493#endif
2494
2495 return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2496#else
2497 return 0;
2498#endif
2499}
2500
2501static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
2502{
2503#if defined(CONFIG_DEBUG_FS)
2504 unsigned i;
2505
2506 for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
2507 debugfs_remove(adev->mman.debugfs_entries[i]);
2508#endif
2509}
2510