1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/dma-mapping.h>
26#include <drm/ttm/ttm_range_manager.h>
27
28#include "amdgpu.h"
29#include "amdgpu_vm.h"
30#include "amdgpu_res_cursor.h"
31#include "amdgpu_atomfirmware.h"
32#include "atom.h"
33
34struct amdgpu_vram_reservation {
35 struct list_head node;
36 struct drm_mm_node mm_node;
37};
38
39static inline struct amdgpu_vram_mgr *
40to_vram_mgr(struct ttm_resource_manager *man)
41{
42 return container_of(man, struct amdgpu_vram_mgr, manager);
43}
44
45static inline struct amdgpu_device *
46to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
47{
48 return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
49}
50
51
52
53
54
55
56
57
58
59static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
60 struct device_attribute *attr, char *buf)
61{
62 struct drm_device *ddev = dev_get_drvdata(dev);
63 struct amdgpu_device *adev = drm_to_adev(ddev);
64
65 return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
66}
67
68
69
70
71
72
73
74
75
76static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
77 struct device_attribute *attr, char *buf)
78{
79 struct drm_device *ddev = dev_get_drvdata(dev);
80 struct amdgpu_device *adev = drm_to_adev(ddev);
81
82 return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
83}
84
85
86
87
88
89
90
91
92
93static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96{
97 struct drm_device *ddev = dev_get_drvdata(dev);
98 struct amdgpu_device *adev = drm_to_adev(ddev);
99 struct ttm_resource_manager *man;
100
101 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
102 return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
103}
104
105
106
107
108
109
110
111
112
113static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
114 struct device_attribute *attr,
115 char *buf)
116{
117 struct drm_device *ddev = dev_get_drvdata(dev);
118 struct amdgpu_device *adev = drm_to_adev(ddev);
119 struct ttm_resource_manager *man;
120
121 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
122 return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
123}
124
125
126
127
128
129
130
131
132
133static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
134 struct device_attribute *attr,
135 char *buf)
136{
137 struct drm_device *ddev = dev_get_drvdata(dev);
138 struct amdgpu_device *adev = drm_to_adev(ddev);
139
140 switch (adev->gmc.vram_vendor) {
141 case SAMSUNG:
142 return sysfs_emit(buf, "samsung\n");
143 case INFINEON:
144 return sysfs_emit(buf, "infineon\n");
145 case ELPIDA:
146 return sysfs_emit(buf, "elpida\n");
147 case ETRON:
148 return sysfs_emit(buf, "etron\n");
149 case NANYA:
150 return sysfs_emit(buf, "nanya\n");
151 case HYNIX:
152 return sysfs_emit(buf, "hynix\n");
153 case MOSEL:
154 return sysfs_emit(buf, "mosel\n");
155 case WINBOND:
156 return sysfs_emit(buf, "winbond\n");
157 case ESMT:
158 return sysfs_emit(buf, "esmt\n");
159 case MICRON:
160 return sysfs_emit(buf, "micron\n");
161 default:
162 return sysfs_emit(buf, "unknown\n");
163 }
164}
165
166static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
167 amdgpu_mem_info_vram_total_show, NULL);
168static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
169 amdgpu_mem_info_vis_vram_total_show,NULL);
170static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
171 amdgpu_mem_info_vram_used_show, NULL);
172static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
173 amdgpu_mem_info_vis_vram_used_show, NULL);
174static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
175 amdgpu_mem_info_vram_vendor, NULL);
176
177static struct attribute *amdgpu_vram_mgr_attributes[] = {
178 &dev_attr_mem_info_vram_total.attr,
179 &dev_attr_mem_info_vis_vram_total.attr,
180 &dev_attr_mem_info_vram_used.attr,
181 &dev_attr_mem_info_vis_vram_used.attr,
182 &dev_attr_mem_info_vram_vendor.attr,
183 NULL
184};
185
186const struct attribute_group amdgpu_vram_mgr_attr_group = {
187 .attrs = amdgpu_vram_mgr_attributes
188};
189
190
191
192
193
194
195
196
197
198static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
199 struct drm_mm_node *node)
200{
201 uint64_t start = node->start << PAGE_SHIFT;
202 uint64_t end = (node->size + node->start) << PAGE_SHIFT;
203
204 if (start >= adev->gmc.visible_vram_size)
205 return 0;
206
207 return (end > adev->gmc.visible_vram_size ?
208 adev->gmc.visible_vram_size : end) - start;
209}
210
211
212
213
214
215
216
217
218
219u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
220{
221 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
222 struct ttm_resource *res = bo->tbo.resource;
223 unsigned pages = res->num_pages;
224 struct drm_mm_node *mm;
225 u64 usage;
226
227 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
228 return amdgpu_bo_size(bo);
229
230 if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
231 return 0;
232
233 mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
234 for (usage = 0; pages; pages -= mm->size, mm++)
235 usage += amdgpu_vram_mgr_vis_size(adev, mm);
236
237 return usage;
238}
239
240
241static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
242{
243 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
244 struct amdgpu_device *adev = to_amdgpu_device(mgr);
245 struct drm_mm *mm = &mgr->mm;
246 struct amdgpu_vram_reservation *rsv, *temp;
247 uint64_t vis_usage;
248
249 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
250 if (drm_mm_reserve_node(mm, &rsv->mm_node))
251 continue;
252
253 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
254 rsv->mm_node.start, rsv->mm_node.size);
255
256 vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
257 atomic64_add(vis_usage, &mgr->vis_usage);
258 atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
259 list_move(&rsv->node, &mgr->reserved_pages);
260 }
261}
262
263
264
265
266
267
268
269
270
271
272int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
273 uint64_t start, uint64_t size)
274{
275 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
276 struct amdgpu_vram_reservation *rsv;
277
278 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
279 if (!rsv)
280 return -ENOMEM;
281
282 INIT_LIST_HEAD(&rsv->node);
283 rsv->mm_node.start = start >> PAGE_SHIFT;
284 rsv->mm_node.size = size >> PAGE_SHIFT;
285
286 spin_lock(&mgr->lock);
287 list_add_tail(&mgr->reservations_pending, &rsv->node);
288 amdgpu_vram_mgr_do_reserve(man);
289 spin_unlock(&mgr->lock);
290
291 return 0;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
306 uint64_t start)
307{
308 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
309 struct amdgpu_vram_reservation *rsv;
310 int ret;
311
312 spin_lock(&mgr->lock);
313
314 list_for_each_entry(rsv, &mgr->reservations_pending, node) {
315 if ((rsv->mm_node.start <= start) &&
316 (start < (rsv->mm_node.start + rsv->mm_node.size))) {
317 ret = -EBUSY;
318 goto out;
319 }
320 }
321
322 list_for_each_entry(rsv, &mgr->reserved_pages, node) {
323 if ((rsv->mm_node.start <= start) &&
324 (start < (rsv->mm_node.start + rsv->mm_node.size))) {
325 ret = 0;
326 goto out;
327 }
328 }
329
330 ret = -ENOENT;
331out:
332 spin_unlock(&mgr->lock);
333 return ret;
334}
335
336
337
338
339
340
341
342
343
344
345static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
346 struct drm_mm_node *node)
347{
348 unsigned long start;
349
350 start = node->start + node->size;
351 if (start > mem->num_pages)
352 start -= mem->num_pages;
353 else
354 start = 0;
355 mem->start = max(mem->start, start);
356}
357
358
359
360
361
362
363
364
365
366
367
368static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
369 struct ttm_buffer_object *tbo,
370 const struct ttm_place *place,
371 struct ttm_resource **res)
372{
373 unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
374 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
375 struct amdgpu_device *adev = to_amdgpu_device(mgr);
376 uint64_t vis_usage = 0, mem_bytes, max_bytes;
377 struct ttm_range_mgr_node *node;
378 struct drm_mm *mm = &mgr->mm;
379 enum drm_mm_insert_mode mode;
380 unsigned i;
381 int r;
382
383 lpfn = place->lpfn;
384 if (!lpfn)
385 lpfn = man->size;
386
387 max_bytes = adev->gmc.mc_vram_size;
388 if (tbo->type != ttm_bo_type_kernel)
389 max_bytes -= AMDGPU_VM_RESERVED_VRAM;
390
391
392 mem_bytes = tbo->base.size;
393 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
394 r = -ENOSPC;
395 goto error_sub;
396 }
397
398 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
399 pages_per_node = ~0ul;
400 num_nodes = 1;
401 } else {
402#ifdef CONFIG_TRANSPARENT_HUGEPAGE
403 pages_per_node = HPAGE_PMD_NR;
404#else
405
406 pages_per_node = 2UL << (20UL - PAGE_SHIFT);
407#endif
408 pages_per_node = max_t(uint32_t, pages_per_node,
409 tbo->page_alignment);
410 num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
411 }
412
413 node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
414 GFP_KERNEL | __GFP_ZERO);
415 if (!node) {
416 r = -ENOMEM;
417 goto error_sub;
418 }
419
420 ttm_resource_init(tbo, place, &node->base);
421
422 mode = DRM_MM_INSERT_BEST;
423 if (place->flags & TTM_PL_FLAG_TOPDOWN)
424 mode = DRM_MM_INSERT_HIGH;
425
426 pages_left = node->base.num_pages;
427
428
429 pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
430
431 i = 0;
432 spin_lock(&mgr->lock);
433 while (pages_left) {
434 uint32_t alignment = tbo->page_alignment;
435
436 if (pages >= pages_per_node)
437 alignment = pages_per_node;
438
439 r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
440 alignment, 0, place->fpfn,
441 lpfn, mode);
442 if (unlikely(r)) {
443 if (pages > pages_per_node) {
444 if (is_power_of_2(pages))
445 pages = pages / 2;
446 else
447 pages = rounddown_pow_of_two(pages);
448 continue;
449 }
450 goto error_free;
451 }
452
453 vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
454 amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
455 pages_left -= pages;
456 ++i;
457
458 if (pages > pages_left)
459 pages = pages_left;
460 }
461 spin_unlock(&mgr->lock);
462
463 if (i == 1)
464 node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
465
466 if (adev->gmc.xgmi.connected_to_cpu)
467 node->base.bus.caching = ttm_cached;
468 else
469 node->base.bus.caching = ttm_write_combined;
470
471 atomic64_add(vis_usage, &mgr->vis_usage);
472 *res = &node->base;
473 return 0;
474
475error_free:
476 while (i--)
477 drm_mm_remove_node(&node->mm_nodes[i]);
478 spin_unlock(&mgr->lock);
479 kvfree(node);
480
481error_sub:
482 atomic64_sub(mem_bytes, &mgr->usage);
483 return r;
484}
485
486
487
488
489
490
491
492
493
494static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
495 struct ttm_resource *res)
496{
497 struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
498 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
499 struct amdgpu_device *adev = to_amdgpu_device(mgr);
500 uint64_t usage = 0, vis_usage = 0;
501 unsigned i, pages;
502
503 spin_lock(&mgr->lock);
504 for (i = 0, pages = res->num_pages; pages;
505 pages -= node->mm_nodes[i].size, ++i) {
506 struct drm_mm_node *mm = &node->mm_nodes[i];
507
508 drm_mm_remove_node(mm);
509 usage += mm->size << PAGE_SHIFT;
510 vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
511 }
512 amdgpu_vram_mgr_do_reserve(man);
513 spin_unlock(&mgr->lock);
514
515 atomic64_sub(usage, &mgr->usage);
516 atomic64_sub(vis_usage, &mgr->vis_usage);
517
518 kvfree(node);
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
535 struct ttm_resource *res,
536 u64 offset, u64 length,
537 struct device *dev,
538 enum dma_data_direction dir,
539 struct sg_table **sgt)
540{
541 struct amdgpu_res_cursor cursor;
542 struct scatterlist *sg;
543 int num_entries = 0;
544 int i, r;
545
546 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
547 if (!*sgt)
548 return -ENOMEM;
549
550
551 amdgpu_res_first(res, offset, length, &cursor);
552 while (cursor.remaining) {
553 num_entries++;
554 amdgpu_res_next(&cursor, cursor.size);
555 }
556
557 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
558 if (r)
559 goto error_free;
560
561
562 for_each_sgtable_sg((*sgt), sg, i)
563 sg->length = 0;
564
565
566
567
568
569
570
571 amdgpu_res_first(res, offset, length, &cursor);
572 for_each_sgtable_sg((*sgt), sg, i) {
573 phys_addr_t phys = cursor.start + adev->gmc.aper_base;
574 size_t size = cursor.size;
575 dma_addr_t addr;
576
577 addr = dma_map_resource(dev, phys, size, dir,
578 DMA_ATTR_SKIP_CPU_SYNC);
579 r = dma_mapping_error(dev, addr);
580 if (r)
581 goto error_unmap;
582
583 sg_set_page(sg, NULL, size, 0);
584 sg_dma_address(sg) = addr;
585 sg_dma_len(sg) = size;
586
587 amdgpu_res_next(&cursor, cursor.size);
588 }
589
590 return 0;
591
592error_unmap:
593 for_each_sgtable_sg((*sgt), sg, i) {
594 if (!sg->length)
595 continue;
596
597 dma_unmap_resource(dev, sg->dma_address,
598 sg->length, dir,
599 DMA_ATTR_SKIP_CPU_SYNC);
600 }
601 sg_free_table(*sgt);
602
603error_free:
604 kfree(*sgt);
605 return r;
606}
607
608
609
610
611
612
613
614
615
616
617void amdgpu_vram_mgr_free_sgt(struct device *dev,
618 enum dma_data_direction dir,
619 struct sg_table *sgt)
620{
621 struct scatterlist *sg;
622 int i;
623
624 for_each_sgtable_sg(sgt, sg, i)
625 dma_unmap_resource(dev, sg->dma_address,
626 sg->length, dir,
627 DMA_ATTR_SKIP_CPU_SYNC);
628 sg_free_table(sgt);
629 kfree(sgt);
630}
631
632
633
634
635
636
637
638
639uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
640{
641 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
642
643 return atomic64_read(&mgr->usage);
644}
645
646
647
648
649
650
651
652
653uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
654{
655 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
656
657 return atomic64_read(&mgr->vis_usage);
658}
659
660
661
662
663
664
665
666
667
668static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
669 struct drm_printer *printer)
670{
671 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
672
673 spin_lock(&mgr->lock);
674 drm_mm_print(&mgr->mm, printer);
675 spin_unlock(&mgr->lock);
676
677 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
678 man->size, amdgpu_vram_mgr_usage(man) >> 20,
679 amdgpu_vram_mgr_vis_usage(man) >> 20);
680}
681
682static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
683 .alloc = amdgpu_vram_mgr_new,
684 .free = amdgpu_vram_mgr_del,
685 .debug = amdgpu_vram_mgr_debug
686};
687
688
689
690
691
692
693
694
695int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
696{
697 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
698 struct ttm_resource_manager *man = &mgr->manager;
699
700 ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
701
702 man->func = &amdgpu_vram_mgr_func;
703
704 drm_mm_init(&mgr->mm, 0, man->size);
705 spin_lock_init(&mgr->lock);
706 INIT_LIST_HEAD(&mgr->reservations_pending);
707 INIT_LIST_HEAD(&mgr->reserved_pages);
708
709 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
710 ttm_resource_manager_set_used(man, true);
711 return 0;
712}
713
714
715
716
717
718
719
720
721
722void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
723{
724 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
725 struct ttm_resource_manager *man = &mgr->manager;
726 int ret;
727 struct amdgpu_vram_reservation *rsv, *temp;
728
729 ttm_resource_manager_set_used(man, false);
730
731 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
732 if (ret)
733 return;
734
735 spin_lock(&mgr->lock);
736 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
737 kfree(rsv);
738
739 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
740 drm_mm_remove_node(&rsv->mm_node);
741 kfree(rsv);
742 }
743 drm_mm_takedown(&mgr->mm);
744 spin_unlock(&mgr->lock);
745
746 ttm_resource_manager_cleanup(man);
747 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
748}
749