1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/errno.h>
27#include <linux/acpi.h>
28#include <linux/hash.h>
29#include <linux/cpufreq.h>
30#include <linux/log2.h>
31#include <linux/dmi.h>
32#include <linux/atomic.h>
33
34#include "kfd_priv.h"
35#include "kfd_crat.h"
36#include "kfd_topology.h"
37#include "kfd_device_queue_manager.h"
38#include "kfd_iommu.h"
39#include "amdgpu_amdkfd.h"
40#include "amdgpu_ras.h"
41
42
43static struct list_head topology_device_list;
44static struct kfd_system_properties sys_props;
45
46static DECLARE_RWSEM(topology_lock);
47static atomic_t topology_crat_proximity_domain;
48
49struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
50 uint32_t proximity_domain)
51{
52 struct kfd_topology_device *top_dev;
53 struct kfd_topology_device *device = NULL;
54
55 down_read(&topology_lock);
56
57 list_for_each_entry(top_dev, &topology_device_list, list)
58 if (top_dev->proximity_domain == proximity_domain) {
59 device = top_dev;
60 break;
61 }
62
63 up_read(&topology_lock);
64
65 return device;
66}
67
68struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
69{
70 struct kfd_topology_device *top_dev = NULL;
71 struct kfd_topology_device *ret = NULL;
72
73 down_read(&topology_lock);
74
75 list_for_each_entry(top_dev, &topology_device_list, list)
76 if (top_dev->gpu_id == gpu_id) {
77 ret = top_dev;
78 break;
79 }
80
81 up_read(&topology_lock);
82
83 return ret;
84}
85
86struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
87{
88 struct kfd_topology_device *top_dev;
89
90 top_dev = kfd_topology_device_by_id(gpu_id);
91 if (!top_dev)
92 return NULL;
93
94 return top_dev->gpu;
95}
96
97struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
98{
99 struct kfd_topology_device *top_dev;
100 struct kfd_dev *device = NULL;
101
102 down_read(&topology_lock);
103
104 list_for_each_entry(top_dev, &topology_device_list, list)
105 if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
106 device = top_dev->gpu;
107 break;
108 }
109
110 up_read(&topology_lock);
111
112 return device;
113}
114
115struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
116{
117 struct kfd_topology_device *top_dev;
118 struct kfd_dev *device = NULL;
119
120 down_read(&topology_lock);
121
122 list_for_each_entry(top_dev, &topology_device_list, list)
123 if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
124 device = top_dev->gpu;
125 break;
126 }
127
128 up_read(&topology_lock);
129
130 return device;
131}
132
133
134static void kfd_release_topology_device(struct kfd_topology_device *dev)
135{
136 struct kfd_mem_properties *mem;
137 struct kfd_cache_properties *cache;
138 struct kfd_iolink_properties *iolink;
139 struct kfd_perf_properties *perf;
140
141 list_del(&dev->list);
142
143 while (dev->mem_props.next != &dev->mem_props) {
144 mem = container_of(dev->mem_props.next,
145 struct kfd_mem_properties, list);
146 list_del(&mem->list);
147 kfree(mem);
148 }
149
150 while (dev->cache_props.next != &dev->cache_props) {
151 cache = container_of(dev->cache_props.next,
152 struct kfd_cache_properties, list);
153 list_del(&cache->list);
154 kfree(cache);
155 }
156
157 while (dev->io_link_props.next != &dev->io_link_props) {
158 iolink = container_of(dev->io_link_props.next,
159 struct kfd_iolink_properties, list);
160 list_del(&iolink->list);
161 kfree(iolink);
162 }
163
164 while (dev->perf_props.next != &dev->perf_props) {
165 perf = container_of(dev->perf_props.next,
166 struct kfd_perf_properties, list);
167 list_del(&perf->list);
168 kfree(perf);
169 }
170
171 kfree(dev);
172}
173
174void kfd_release_topology_device_list(struct list_head *device_list)
175{
176 struct kfd_topology_device *dev;
177
178 while (!list_empty(device_list)) {
179 dev = list_first_entry(device_list,
180 struct kfd_topology_device, list);
181 kfd_release_topology_device(dev);
182 }
183}
184
185static void kfd_release_live_view(void)
186{
187 kfd_release_topology_device_list(&topology_device_list);
188 memset(&sys_props, 0, sizeof(sys_props));
189}
190
191struct kfd_topology_device *kfd_create_topology_device(
192 struct list_head *device_list)
193{
194 struct kfd_topology_device *dev;
195
196 dev = kfd_alloc_struct(dev);
197 if (!dev) {
198 pr_err("No memory to allocate a topology device");
199 return NULL;
200 }
201
202 INIT_LIST_HEAD(&dev->mem_props);
203 INIT_LIST_HEAD(&dev->cache_props);
204 INIT_LIST_HEAD(&dev->io_link_props);
205 INIT_LIST_HEAD(&dev->perf_props);
206
207 list_add_tail(&dev->list, device_list);
208
209 return dev;
210}
211
212
213#define sysfs_show_gen_prop(buffer, fmt, ...) \
214 snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
215#define sysfs_show_32bit_prop(buffer, name, value) \
216 sysfs_show_gen_prop(buffer, "%s %u\n", name, value)
217#define sysfs_show_64bit_prop(buffer, name, value) \
218 sysfs_show_gen_prop(buffer, "%s %llu\n", name, value)
219#define sysfs_show_32bit_val(buffer, value) \
220 sysfs_show_gen_prop(buffer, "%u\n", value)
221#define sysfs_show_str_val(buffer, value) \
222 sysfs_show_gen_prop(buffer, "%s\n", value)
223
224static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
225 char *buffer)
226{
227 ssize_t ret;
228
229
230 buffer[0] = 0;
231
232 if (attr == &sys_props.attr_genid) {
233 ret = sysfs_show_32bit_val(buffer, sys_props.generation_count);
234 } else if (attr == &sys_props.attr_props) {
235 sysfs_show_64bit_prop(buffer, "platform_oem",
236 sys_props.platform_oem);
237 sysfs_show_64bit_prop(buffer, "platform_id",
238 sys_props.platform_id);
239 ret = sysfs_show_64bit_prop(buffer, "platform_rev",
240 sys_props.platform_rev);
241 } else {
242 ret = -EINVAL;
243 }
244
245 return ret;
246}
247
248static void kfd_topology_kobj_release(struct kobject *kobj)
249{
250 kfree(kobj);
251}
252
253static const struct sysfs_ops sysprops_ops = {
254 .show = sysprops_show,
255};
256
257static struct kobj_type sysprops_type = {
258 .release = kfd_topology_kobj_release,
259 .sysfs_ops = &sysprops_ops,
260};
261
262static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
263 char *buffer)
264{
265 ssize_t ret;
266 struct kfd_iolink_properties *iolink;
267
268
269 buffer[0] = 0;
270
271 iolink = container_of(attr, struct kfd_iolink_properties, attr);
272 sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
273 sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
274 sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
275 sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from);
276 sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to);
277 sysfs_show_32bit_prop(buffer, "weight", iolink->weight);
278 sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency);
279 sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency);
280 sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth);
281 sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth);
282 sysfs_show_32bit_prop(buffer, "recommended_transfer_size",
283 iolink->rec_transfer_size);
284 ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags);
285
286 return ret;
287}
288
289static const struct sysfs_ops iolink_ops = {
290 .show = iolink_show,
291};
292
293static struct kobj_type iolink_type = {
294 .release = kfd_topology_kobj_release,
295 .sysfs_ops = &iolink_ops,
296};
297
298static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
299 char *buffer)
300{
301 ssize_t ret;
302 struct kfd_mem_properties *mem;
303
304
305 buffer[0] = 0;
306
307 mem = container_of(attr, struct kfd_mem_properties, attr);
308 sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
309 sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
310 sysfs_show_32bit_prop(buffer, "flags", mem->flags);
311 sysfs_show_32bit_prop(buffer, "width", mem->width);
312 ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max);
313
314 return ret;
315}
316
317static const struct sysfs_ops mem_ops = {
318 .show = mem_show,
319};
320
321static struct kobj_type mem_type = {
322 .release = kfd_topology_kobj_release,
323 .sysfs_ops = &mem_ops,
324};
325
326static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
327 char *buffer)
328{
329 ssize_t ret;
330 uint32_t i, j;
331 struct kfd_cache_properties *cache;
332
333
334 buffer[0] = 0;
335
336 cache = container_of(attr, struct kfd_cache_properties, attr);
337 sysfs_show_32bit_prop(buffer, "processor_id_low",
338 cache->processor_id_low);
339 sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
340 sysfs_show_32bit_prop(buffer, "size", cache->cache_size);
341 sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size);
342 sysfs_show_32bit_prop(buffer, "cache_lines_per_tag",
343 cache->cachelines_per_tag);
344 sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc);
345 sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
346 sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
347 snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
348 for (i = 0; i < CRAT_SIBLINGMAP_SIZE; i++)
349 for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++) {
350
351 if (cache->sibling_map[i] & (1 << j))
352 ret = snprintf(buffer, PAGE_SIZE,
353 "%s%d%s", buffer, 1, ",");
354 else
355 ret = snprintf(buffer, PAGE_SIZE,
356 "%s%d%s", buffer, 0, ",");
357 }
358
359 *(buffer + strlen(buffer) - 1) = 0xA;
360 return ret;
361}
362
363static const struct sysfs_ops cache_ops = {
364 .show = kfd_cache_show,
365};
366
367static struct kobj_type cache_type = {
368 .release = kfd_topology_kobj_release,
369 .sysfs_ops = &cache_ops,
370};
371
372
373
374struct kfd_perf_attr {
375 struct kobj_attribute attr;
376 uint32_t data;
377};
378
379static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
380 char *buf)
381{
382 struct kfd_perf_attr *attr;
383
384 buf[0] = 0;
385 attr = container_of(attrs, struct kfd_perf_attr, attr);
386 if (!attr->data)
387 return 0;
388 else
389 return sysfs_show_32bit_val(buf, attr->data);
390}
391
392#define KFD_PERF_DESC(_name, _data) \
393{ \
394 .attr = __ATTR(_name, 0444, perf_show, NULL), \
395 .data = _data, \
396}
397
398static struct kfd_perf_attr perf_attr_iommu[] = {
399 KFD_PERF_DESC(max_concurrent, 0),
400 KFD_PERF_DESC(num_counters, 0),
401 KFD_PERF_DESC(counter_ids, 0),
402};
403
404
405static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
406 char *buffer)
407{
408 struct kfd_topology_device *dev;
409 char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
410 uint32_t i;
411 uint32_t log_max_watch_addr;
412
413
414 buffer[0] = 0;
415
416 if (strcmp(attr->name, "gpu_id") == 0) {
417 dev = container_of(attr, struct kfd_topology_device,
418 attr_gpuid);
419 return sysfs_show_32bit_val(buffer, dev->gpu_id);
420 }
421
422 if (strcmp(attr->name, "name") == 0) {
423 dev = container_of(attr, struct kfd_topology_device,
424 attr_name);
425 for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
426 public_name[i] =
427 (char)dev->node_props.marketing_name[i];
428 if (dev->node_props.marketing_name[i] == 0)
429 break;
430 }
431 public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
432 return sysfs_show_str_val(buffer, public_name);
433 }
434
435 dev = container_of(attr, struct kfd_topology_device,
436 attr_props);
437 sysfs_show_32bit_prop(buffer, "cpu_cores_count",
438 dev->node_props.cpu_cores_count);
439 sysfs_show_32bit_prop(buffer, "simd_count",
440 dev->node_props.simd_count);
441 sysfs_show_32bit_prop(buffer, "mem_banks_count",
442 dev->node_props.mem_banks_count);
443 sysfs_show_32bit_prop(buffer, "caches_count",
444 dev->node_props.caches_count);
445 sysfs_show_32bit_prop(buffer, "io_links_count",
446 dev->node_props.io_links_count);
447 sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
448 dev->node_props.cpu_core_id_base);
449 sysfs_show_32bit_prop(buffer, "simd_id_base",
450 dev->node_props.simd_id_base);
451 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
452 dev->node_props.max_waves_per_simd);
453 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
454 dev->node_props.lds_size_in_kb);
455 sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
456 dev->node_props.gds_size_in_kb);
457 sysfs_show_32bit_prop(buffer, "wave_front_size",
458 dev->node_props.wave_front_size);
459 sysfs_show_32bit_prop(buffer, "array_count",
460 dev->node_props.array_count);
461 sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
462 dev->node_props.simd_arrays_per_engine);
463 sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
464 dev->node_props.cu_per_simd_array);
465 sysfs_show_32bit_prop(buffer, "simd_per_cu",
466 dev->node_props.simd_per_cu);
467 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
468 dev->node_props.max_slots_scratch_cu);
469 sysfs_show_32bit_prop(buffer, "vendor_id",
470 dev->node_props.vendor_id);
471 sysfs_show_32bit_prop(buffer, "device_id",
472 dev->node_props.device_id);
473 sysfs_show_32bit_prop(buffer, "location_id",
474 dev->node_props.location_id);
475 sysfs_show_32bit_prop(buffer, "drm_render_minor",
476 dev->node_props.drm_render_minor);
477 sysfs_show_64bit_prop(buffer, "hive_id",
478 dev->node_props.hive_id);
479
480 if (dev->gpu) {
481 log_max_watch_addr =
482 __ilog2_u32(dev->gpu->device_info->num_of_watch_points);
483
484 if (log_max_watch_addr) {
485 dev->node_props.capability |=
486 HSA_CAP_WATCH_POINTS_SUPPORTED;
487
488 dev->node_props.capability |=
489 ((log_max_watch_addr <<
490 HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
491 HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
492 }
493
494 if (dev->gpu->device_info->asic_family == CHIP_TONGA)
495 dev->node_props.capability |=
496 HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
497
498 sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
499 dev->node_props.max_engine_clk_fcompute);
500
501 sysfs_show_64bit_prop(buffer, "local_mem_size",
502 (unsigned long long int) 0);
503
504 sysfs_show_32bit_prop(buffer, "fw_version",
505 dev->gpu->mec_fw_version);
506 sysfs_show_32bit_prop(buffer, "capability",
507 dev->node_props.capability);
508 sysfs_show_32bit_prop(buffer, "sdma_fw_version",
509 dev->gpu->sdma_fw_version);
510 }
511
512 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
513 cpufreq_quick_get_max(0)/1000);
514}
515
516static const struct sysfs_ops node_ops = {
517 .show = node_show,
518};
519
520static struct kobj_type node_type = {
521 .release = kfd_topology_kobj_release,
522 .sysfs_ops = &node_ops,
523};
524
525static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
526{
527 sysfs_remove_file(kobj, attr);
528 kobject_del(kobj);
529 kobject_put(kobj);
530}
531
532static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
533{
534 struct kfd_iolink_properties *iolink;
535 struct kfd_cache_properties *cache;
536 struct kfd_mem_properties *mem;
537 struct kfd_perf_properties *perf;
538
539 if (dev->kobj_iolink) {
540 list_for_each_entry(iolink, &dev->io_link_props, list)
541 if (iolink->kobj) {
542 kfd_remove_sysfs_file(iolink->kobj,
543 &iolink->attr);
544 iolink->kobj = NULL;
545 }
546 kobject_del(dev->kobj_iolink);
547 kobject_put(dev->kobj_iolink);
548 dev->kobj_iolink = NULL;
549 }
550
551 if (dev->kobj_cache) {
552 list_for_each_entry(cache, &dev->cache_props, list)
553 if (cache->kobj) {
554 kfd_remove_sysfs_file(cache->kobj,
555 &cache->attr);
556 cache->kobj = NULL;
557 }
558 kobject_del(dev->kobj_cache);
559 kobject_put(dev->kobj_cache);
560 dev->kobj_cache = NULL;
561 }
562
563 if (dev->kobj_mem) {
564 list_for_each_entry(mem, &dev->mem_props, list)
565 if (mem->kobj) {
566 kfd_remove_sysfs_file(mem->kobj, &mem->attr);
567 mem->kobj = NULL;
568 }
569 kobject_del(dev->kobj_mem);
570 kobject_put(dev->kobj_mem);
571 dev->kobj_mem = NULL;
572 }
573
574 if (dev->kobj_perf) {
575 list_for_each_entry(perf, &dev->perf_props, list) {
576 kfree(perf->attr_group);
577 perf->attr_group = NULL;
578 }
579 kobject_del(dev->kobj_perf);
580 kobject_put(dev->kobj_perf);
581 dev->kobj_perf = NULL;
582 }
583
584 if (dev->kobj_node) {
585 sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
586 sysfs_remove_file(dev->kobj_node, &dev->attr_name);
587 sysfs_remove_file(dev->kobj_node, &dev->attr_props);
588 kobject_del(dev->kobj_node);
589 kobject_put(dev->kobj_node);
590 dev->kobj_node = NULL;
591 }
592}
593
594static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
595 uint32_t id)
596{
597 struct kfd_iolink_properties *iolink;
598 struct kfd_cache_properties *cache;
599 struct kfd_mem_properties *mem;
600 struct kfd_perf_properties *perf;
601 int ret;
602 uint32_t i, num_attrs;
603 struct attribute **attrs;
604
605 if (WARN_ON(dev->kobj_node))
606 return -EEXIST;
607
608
609
610
611 dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
612 if (!dev->kobj_node)
613 return -ENOMEM;
614
615 ret = kobject_init_and_add(dev->kobj_node, &node_type,
616 sys_props.kobj_nodes, "%d", id);
617 if (ret < 0)
618 return ret;
619
620 dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
621 if (!dev->kobj_mem)
622 return -ENOMEM;
623
624 dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
625 if (!dev->kobj_cache)
626 return -ENOMEM;
627
628 dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
629 if (!dev->kobj_iolink)
630 return -ENOMEM;
631
632 dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
633 if (!dev->kobj_perf)
634 return -ENOMEM;
635
636
637
638
639 dev->attr_gpuid.name = "gpu_id";
640 dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
641 sysfs_attr_init(&dev->attr_gpuid);
642 dev->attr_name.name = "name";
643 dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
644 sysfs_attr_init(&dev->attr_name);
645 dev->attr_props.name = "properties";
646 dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
647 sysfs_attr_init(&dev->attr_props);
648 ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
649 if (ret < 0)
650 return ret;
651 ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
652 if (ret < 0)
653 return ret;
654 ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
655 if (ret < 0)
656 return ret;
657
658 i = 0;
659 list_for_each_entry(mem, &dev->mem_props, list) {
660 mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
661 if (!mem->kobj)
662 return -ENOMEM;
663 ret = kobject_init_and_add(mem->kobj, &mem_type,
664 dev->kobj_mem, "%d", i);
665 if (ret < 0)
666 return ret;
667
668 mem->attr.name = "properties";
669 mem->attr.mode = KFD_SYSFS_FILE_MODE;
670 sysfs_attr_init(&mem->attr);
671 ret = sysfs_create_file(mem->kobj, &mem->attr);
672 if (ret < 0)
673 return ret;
674 i++;
675 }
676
677 i = 0;
678 list_for_each_entry(cache, &dev->cache_props, list) {
679 cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
680 if (!cache->kobj)
681 return -ENOMEM;
682 ret = kobject_init_and_add(cache->kobj, &cache_type,
683 dev->kobj_cache, "%d", i);
684 if (ret < 0)
685 return ret;
686
687 cache->attr.name = "properties";
688 cache->attr.mode = KFD_SYSFS_FILE_MODE;
689 sysfs_attr_init(&cache->attr);
690 ret = sysfs_create_file(cache->kobj, &cache->attr);
691 if (ret < 0)
692 return ret;
693 i++;
694 }
695
696 i = 0;
697 list_for_each_entry(iolink, &dev->io_link_props, list) {
698 iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
699 if (!iolink->kobj)
700 return -ENOMEM;
701 ret = kobject_init_and_add(iolink->kobj, &iolink_type,
702 dev->kobj_iolink, "%d", i);
703 if (ret < 0)
704 return ret;
705
706 iolink->attr.name = "properties";
707 iolink->attr.mode = KFD_SYSFS_FILE_MODE;
708 sysfs_attr_init(&iolink->attr);
709 ret = sysfs_create_file(iolink->kobj, &iolink->attr);
710 if (ret < 0)
711 return ret;
712 i++;
713 }
714
715
716 num_attrs = ARRAY_SIZE(perf_attr_iommu);
717 list_for_each_entry(perf, &dev->perf_props, list) {
718 perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
719 * num_attrs + sizeof(struct attribute_group),
720 GFP_KERNEL);
721 if (!perf->attr_group)
722 return -ENOMEM;
723
724 attrs = (struct attribute **)(perf->attr_group + 1);
725 if (!strcmp(perf->block_name, "iommu")) {
726
727
728
729
730 perf_attr_iommu[0].data = perf->max_concurrent;
731 for (i = 0; i < num_attrs; i++)
732 attrs[i] = &perf_attr_iommu[i].attr.attr;
733 }
734 perf->attr_group->name = perf->block_name;
735 perf->attr_group->attrs = attrs;
736 ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
737 if (ret < 0)
738 return ret;
739 }
740
741 return 0;
742}
743
744
745static int kfd_build_sysfs_node_tree(void)
746{
747 struct kfd_topology_device *dev;
748 int ret;
749 uint32_t i = 0;
750
751 list_for_each_entry(dev, &topology_device_list, list) {
752 ret = kfd_build_sysfs_node_entry(dev, i);
753 if (ret < 0)
754 return ret;
755 i++;
756 }
757
758 return 0;
759}
760
761
762static void kfd_remove_sysfs_node_tree(void)
763{
764 struct kfd_topology_device *dev;
765
766 list_for_each_entry(dev, &topology_device_list, list)
767 kfd_remove_sysfs_node_entry(dev);
768}
769
770static int kfd_topology_update_sysfs(void)
771{
772 int ret;
773
774 pr_info("Creating topology SYSFS entries\n");
775 if (!sys_props.kobj_topology) {
776 sys_props.kobj_topology =
777 kfd_alloc_struct(sys_props.kobj_topology);
778 if (!sys_props.kobj_topology)
779 return -ENOMEM;
780
781 ret = kobject_init_and_add(sys_props.kobj_topology,
782 &sysprops_type, &kfd_device->kobj,
783 "topology");
784 if (ret < 0)
785 return ret;
786
787 sys_props.kobj_nodes = kobject_create_and_add("nodes",
788 sys_props.kobj_topology);
789 if (!sys_props.kobj_nodes)
790 return -ENOMEM;
791
792 sys_props.attr_genid.name = "generation_id";
793 sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
794 sysfs_attr_init(&sys_props.attr_genid);
795 ret = sysfs_create_file(sys_props.kobj_topology,
796 &sys_props.attr_genid);
797 if (ret < 0)
798 return ret;
799
800 sys_props.attr_props.name = "system_properties";
801 sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
802 sysfs_attr_init(&sys_props.attr_props);
803 ret = sysfs_create_file(sys_props.kobj_topology,
804 &sys_props.attr_props);
805 if (ret < 0)
806 return ret;
807 }
808
809 kfd_remove_sysfs_node_tree();
810
811 return kfd_build_sysfs_node_tree();
812}
813
814static void kfd_topology_release_sysfs(void)
815{
816 kfd_remove_sysfs_node_tree();
817 if (sys_props.kobj_topology) {
818 sysfs_remove_file(sys_props.kobj_topology,
819 &sys_props.attr_genid);
820 sysfs_remove_file(sys_props.kobj_topology,
821 &sys_props.attr_props);
822 if (sys_props.kobj_nodes) {
823 kobject_del(sys_props.kobj_nodes);
824 kobject_put(sys_props.kobj_nodes);
825 sys_props.kobj_nodes = NULL;
826 }
827 kobject_del(sys_props.kobj_topology);
828 kobject_put(sys_props.kobj_topology);
829 sys_props.kobj_topology = NULL;
830 }
831}
832
833
834static void kfd_topology_update_device_list(struct list_head *temp_list,
835 struct list_head *master_list)
836{
837 while (!list_empty(temp_list)) {
838 list_move_tail(temp_list->next, master_list);
839 sys_props.num_devices++;
840 }
841}
842
843static void kfd_debug_print_topology(void)
844{
845 struct kfd_topology_device *dev;
846
847 down_read(&topology_lock);
848
849 dev = list_last_entry(&topology_device_list,
850 struct kfd_topology_device, list);
851 if (dev) {
852 if (dev->node_props.cpu_cores_count &&
853 dev->node_props.simd_count) {
854 pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
855 dev->node_props.device_id,
856 dev->node_props.vendor_id);
857 } else if (dev->node_props.cpu_cores_count)
858 pr_info("Topology: Add CPU node\n");
859 else if (dev->node_props.simd_count)
860 pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
861 dev->node_props.device_id,
862 dev->node_props.vendor_id);
863 }
864 up_read(&topology_lock);
865}
866
867
868
869
870static void kfd_update_system_properties(void)
871{
872 struct kfd_topology_device *dev;
873
874 down_read(&topology_lock);
875 dev = list_last_entry(&topology_device_list,
876 struct kfd_topology_device, list);
877 if (dev) {
878 sys_props.platform_id =
879 (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
880 sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
881 sys_props.platform_rev = dev->oem_revision;
882 }
883 up_read(&topology_lock);
884}
885
886static void find_system_memory(const struct dmi_header *dm,
887 void *private)
888{
889 struct kfd_mem_properties *mem;
890 u16 mem_width, mem_clock;
891 struct kfd_topology_device *kdev =
892 (struct kfd_topology_device *)private;
893 const u8 *dmi_data = (const u8 *)(dm + 1);
894
895 if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
896 mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
897 mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
898 list_for_each_entry(mem, &kdev->mem_props, list) {
899 if (mem_width != 0xFFFF && mem_width != 0)
900 mem->width = mem_width;
901 if (mem_clock != 0)
902 mem->mem_clk_max = mem_clock;
903 }
904 }
905}
906
907
908
909
910
911
912static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
913{
914
915 return kfd_iommu_add_perf_counters(kdev);
916}
917
918
919
920
921
922static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
923{
924
925 if (!kdev->gpu) {
926
927 dmi_walk(find_system_memory, kdev);
928 }
929
930}
931
932
933
934
935
936
937
938static bool kfd_is_acpi_crat_invalid(struct list_head *device_list)
939{
940 struct kfd_topology_device *dev;
941
942 list_for_each_entry(dev, device_list, list) {
943 if (dev->node_props.cpu_cores_count &&
944 dev->node_props.simd_count)
945 return false;
946 }
947 pr_info("Ignoring ACPI CRAT on non-APU system\n");
948 return true;
949}
950
951int kfd_topology_init(void)
952{
953 void *crat_image = NULL;
954 size_t image_size = 0;
955 int ret;
956 struct list_head temp_topology_device_list;
957 int cpu_only_node = 0;
958 struct kfd_topology_device *kdev;
959 int proximity_domain;
960
961
962
963
964
965
966
967
968 INIT_LIST_HEAD(&topology_device_list);
969 INIT_LIST_HEAD(&temp_topology_device_list);
970 init_rwsem(&topology_lock);
971
972 memset(&sys_props, 0, sizeof(sys_props));
973
974
975
976
977
978
979 proximity_domain = 0;
980
981
982
983
984
985
986
987 ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
988 if (!ret) {
989 ret = kfd_parse_crat_table(crat_image,
990 &temp_topology_device_list,
991 proximity_domain);
992 if (ret ||
993 kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
994 kfd_release_topology_device_list(
995 &temp_topology_device_list);
996 kfd_destroy_crat_image(crat_image);
997 crat_image = NULL;
998 }
999 }
1000
1001 if (!crat_image) {
1002 ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
1003 COMPUTE_UNIT_CPU, NULL,
1004 proximity_domain);
1005 cpu_only_node = 1;
1006 if (ret) {
1007 pr_err("Error creating VCRAT table for CPU\n");
1008 return ret;
1009 }
1010
1011 ret = kfd_parse_crat_table(crat_image,
1012 &temp_topology_device_list,
1013 proximity_domain);
1014 if (ret) {
1015 pr_err("Error parsing VCRAT table for CPU\n");
1016 goto err;
1017 }
1018 }
1019
1020 kdev = list_first_entry(&temp_topology_device_list,
1021 struct kfd_topology_device, list);
1022 kfd_add_perf_to_topology(kdev);
1023
1024 down_write(&topology_lock);
1025 kfd_topology_update_device_list(&temp_topology_device_list,
1026 &topology_device_list);
1027 atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
1028 ret = kfd_topology_update_sysfs();
1029 up_write(&topology_lock);
1030
1031 if (!ret) {
1032 sys_props.generation_count++;
1033 kfd_update_system_properties();
1034 kfd_debug_print_topology();
1035 pr_info("Finished initializing topology\n");
1036 } else
1037 pr_err("Failed to update topology in sysfs ret=%d\n", ret);
1038
1039
1040
1041
1042 if (cpu_only_node) {
1043
1044 down_write(&topology_lock);
1045 kdev = list_first_entry(&topology_device_list,
1046 struct kfd_topology_device, list);
1047 up_write(&topology_lock);
1048 kfd_add_non_crat_information(kdev);
1049 }
1050
1051err:
1052 kfd_destroy_crat_image(crat_image);
1053 return ret;
1054}
1055
1056void kfd_topology_shutdown(void)
1057{
1058 down_write(&topology_lock);
1059 kfd_topology_release_sysfs();
1060 kfd_release_live_view();
1061 up_write(&topology_lock);
1062}
1063
1064static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1065{
1066 uint32_t hashout;
1067 uint32_t buf[7];
1068 uint64_t local_mem_size;
1069 int i;
1070 struct kfd_local_mem_info local_mem_info;
1071
1072 if (!gpu)
1073 return 0;
1074
1075 amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
1076
1077 local_mem_size = local_mem_info.local_mem_size_private +
1078 local_mem_info.local_mem_size_public;
1079
1080 buf[0] = gpu->pdev->devfn;
1081 buf[1] = gpu->pdev->subsystem_vendor;
1082 buf[2] = gpu->pdev->subsystem_device;
1083 buf[3] = gpu->pdev->device;
1084 buf[4] = gpu->pdev->bus->number;
1085 buf[5] = lower_32_bits(local_mem_size);
1086 buf[6] = upper_32_bits(local_mem_size);
1087
1088 for (i = 0, hashout = 0; i < 7; i++)
1089 hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
1090
1091 return hashout;
1092}
1093
1094
1095
1096
1097
1098static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1099{
1100 struct kfd_topology_device *dev;
1101 struct kfd_topology_device *out_dev = NULL;
1102
1103 down_write(&topology_lock);
1104 list_for_each_entry(dev, &topology_device_list, list) {
1105
1106
1107
1108 if (!gpu->device_info->needs_iommu_device &&
1109 dev->node_props.cpu_cores_count)
1110 continue;
1111
1112 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1113 dev->gpu = gpu;
1114 out_dev = dev;
1115 break;
1116 }
1117 }
1118 up_write(&topology_lock);
1119 return out_dev;
1120}
1121
1122static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
1123{
1124
1125
1126
1127
1128}
1129
1130
1131
1132
1133static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
1134{
1135 struct kfd_mem_properties *mem;
1136 struct kfd_local_mem_info local_mem_info;
1137
1138 if (!dev)
1139 return;
1140
1141
1142
1143
1144
1145
1146
1147 amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
1148
1149 list_for_each_entry(mem, &dev->mem_props, list)
1150 mem->mem_clk_max = local_mem_info.mem_clk_max;
1151}
1152
1153static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
1154{
1155 struct kfd_iolink_properties *link, *cpu_link;
1156 struct kfd_topology_device *cpu_dev;
1157 uint32_t cap;
1158 uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED;
1159 uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED;
1160
1161 if (!dev || !dev->gpu)
1162 return;
1163
1164 pcie_capability_read_dword(dev->gpu->pdev,
1165 PCI_EXP_DEVCAP2, &cap);
1166
1167 if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
1168 PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
1169 cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1170 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1171
1172 if (!dev->gpu->pci_atomic_requested ||
1173 dev->gpu->device_info->asic_family == CHIP_HAWAII)
1174 flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1175 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1176
1177
1178 list_for_each_entry(link, &dev->io_link_props, list) {
1179 link->flags = flag;
1180 cpu_dev = kfd_topology_device_by_proximity_domain(
1181 link->node_to);
1182 if (cpu_dev) {
1183 list_for_each_entry(cpu_link,
1184 &cpu_dev->io_link_props, list)
1185 if (cpu_link->node_to == link->node_from)
1186 cpu_link->flags = cpu_flag;
1187 }
1188 }
1189}
1190
1191int kfd_topology_add_device(struct kfd_dev *gpu)
1192{
1193 uint32_t gpu_id;
1194 struct kfd_topology_device *dev;
1195 struct kfd_cu_info cu_info;
1196 int res = 0;
1197 struct list_head temp_topology_device_list;
1198 void *crat_image = NULL;
1199 size_t image_size = 0;
1200 int proximity_domain;
1201 struct amdgpu_ras *ctx;
1202
1203 INIT_LIST_HEAD(&temp_topology_device_list);
1204
1205 gpu_id = kfd_generate_gpu_id(gpu);
1206
1207 pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
1208
1209 proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
1210
1211
1212
1213
1214
1215
1216
1217 dev = kfd_assign_gpu(gpu);
1218 if (!dev) {
1219 res = kfd_create_crat_image_virtual(&crat_image, &image_size,
1220 COMPUTE_UNIT_GPU, gpu,
1221 proximity_domain);
1222 if (res) {
1223 pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
1224 gpu_id);
1225 return res;
1226 }
1227 res = kfd_parse_crat_table(crat_image,
1228 &temp_topology_device_list,
1229 proximity_domain);
1230 if (res) {
1231 pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
1232 gpu_id);
1233 goto err;
1234 }
1235
1236 down_write(&topology_lock);
1237 kfd_topology_update_device_list(&temp_topology_device_list,
1238 &topology_device_list);
1239
1240
1241
1242
1243 res = kfd_topology_update_sysfs();
1244 up_write(&topology_lock);
1245
1246 if (!res)
1247 sys_props.generation_count++;
1248 else
1249 pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
1250 gpu_id, res);
1251 dev = kfd_assign_gpu(gpu);
1252 if (WARN_ON(!dev)) {
1253 res = -ENODEV;
1254 goto err;
1255 }
1256 }
1257
1258 dev->gpu_id = gpu_id;
1259 gpu->id = gpu_id;
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
1270 dev->node_props.simd_arrays_per_engine =
1271 cu_info.num_shader_arrays_per_engine;
1272
1273 dev->node_props.vendor_id = gpu->pdev->vendor;
1274 dev->node_props.device_id = gpu->pdev->device;
1275 dev->node_props.location_id = pci_dev_id(gpu->pdev);
1276 dev->node_props.max_engine_clk_fcompute =
1277 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
1278 dev->node_props.max_engine_clk_ccompute =
1279 cpufreq_quick_get_max(0) / 1000;
1280 dev->node_props.drm_render_minor =
1281 gpu->shared_resources.drm_render_minor;
1282
1283 dev->node_props.hive_id = gpu->hive_id;
1284
1285 kfd_fill_mem_clk_max_info(dev);
1286 kfd_fill_iolink_non_crat_info(dev);
1287
1288 switch (dev->gpu->device_info->asic_family) {
1289 case CHIP_KAVERI:
1290 case CHIP_HAWAII:
1291 case CHIP_TONGA:
1292 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
1293 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1294 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1295 break;
1296 case CHIP_CARRIZO:
1297 case CHIP_FIJI:
1298 case CHIP_POLARIS10:
1299 case CHIP_POLARIS11:
1300 case CHIP_POLARIS12:
1301 pr_debug("Adding doorbell packet type capability\n");
1302 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
1303 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1304 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1305 break;
1306 case CHIP_VEGA10:
1307 case CHIP_VEGA12:
1308 case CHIP_VEGA20:
1309 case CHIP_RAVEN:
1310 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
1311 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1312 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1313 break;
1314 default:
1315 WARN(1, "Unexpected ASIC family %u",
1316 dev->gpu->device_info->asic_family);
1317 }
1318
1319
1320
1321
1322
1323
1324
1325 if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
1326 dev->node_props.simd_count =
1327 cu_info.simd_per_cu * cu_info.cu_active_number;
1328 dev->node_props.max_waves_per_simd = 10;
1329 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
1330 }
1331
1332 ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
1333 if (ctx) {
1334
1335 dev->node_props.capability |=
1336 (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
1337 ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
1338 HSA_CAP_SRAM_EDCSUPPORTED : 0;
1339 dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
1340 HSA_CAP_MEM_EDCSUPPORTED : 0;
1341
1342 dev->node_props.capability |= (ctx->features != 0) ?
1343 HSA_CAP_RASEVENTNOTIFY : 0;
1344 }
1345
1346 kfd_debug_print_topology();
1347
1348 if (!res)
1349 kfd_notify_gpu_change(gpu_id, 1);
1350err:
1351 kfd_destroy_crat_image(crat_image);
1352 return res;
1353}
1354
1355int kfd_topology_remove_device(struct kfd_dev *gpu)
1356{
1357 struct kfd_topology_device *dev, *tmp;
1358 uint32_t gpu_id;
1359 int res = -ENODEV;
1360
1361 down_write(&topology_lock);
1362
1363 list_for_each_entry_safe(dev, tmp, &topology_device_list, list)
1364 if (dev->gpu == gpu) {
1365 gpu_id = dev->gpu_id;
1366 kfd_remove_sysfs_node_entry(dev);
1367 kfd_release_topology_device(dev);
1368 sys_props.num_devices--;
1369 res = 0;
1370 if (kfd_topology_update_sysfs() < 0)
1371 kfd_topology_release_sysfs();
1372 break;
1373 }
1374
1375 up_write(&topology_lock);
1376
1377 if (!res)
1378 kfd_notify_gpu_change(gpu_id, 0);
1379
1380 return res;
1381}
1382
1383
1384
1385
1386
1387
1388
1389int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1390{
1391
1392 struct kfd_topology_device *top_dev;
1393 uint8_t device_idx = 0;
1394
1395 *kdev = NULL;
1396 down_read(&topology_lock);
1397
1398 list_for_each_entry(top_dev, &topology_device_list, list) {
1399 if (device_idx == idx) {
1400 *kdev = top_dev->gpu;
1401 up_read(&topology_lock);
1402 return 0;
1403 }
1404
1405 device_idx++;
1406 }
1407
1408 up_read(&topology_lock);
1409
1410 return -1;
1411
1412}
1413
1414static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1415{
1416 int first_cpu_of_numa_node;
1417
1418 if (!cpumask || cpumask == cpu_none_mask)
1419 return -1;
1420 first_cpu_of_numa_node = cpumask_first(cpumask);
1421 if (first_cpu_of_numa_node >= nr_cpu_ids)
1422 return -1;
1423#ifdef CONFIG_X86_64
1424 return cpu_data(first_cpu_of_numa_node).apicid;
1425#else
1426 return first_cpu_of_numa_node;
1427#endif
1428}
1429
1430
1431
1432
1433
1434int kfd_numa_node_to_apic_id(int numa_node_id)
1435{
1436 if (numa_node_id == -1) {
1437 pr_warn("Invalid NUMA Node. Use online CPU mask\n");
1438 return kfd_cpumask_to_apic_id(cpu_online_mask);
1439 }
1440 return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
1441}
1442
1443#if defined(CONFIG_DEBUG_FS)
1444
1445int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
1446{
1447 struct kfd_topology_device *dev;
1448 unsigned int i = 0;
1449 int r = 0;
1450
1451 down_read(&topology_lock);
1452
1453 list_for_each_entry(dev, &topology_device_list, list) {
1454 if (!dev->gpu) {
1455 i++;
1456 continue;
1457 }
1458
1459 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
1460 r = dqm_debugfs_hqds(m, dev->gpu->dqm);
1461 if (r)
1462 break;
1463 }
1464
1465 up_read(&topology_lock);
1466
1467 return r;
1468}
1469
1470int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
1471{
1472 struct kfd_topology_device *dev;
1473 unsigned int i = 0;
1474 int r = 0;
1475
1476 down_read(&topology_lock);
1477
1478 list_for_each_entry(dev, &topology_device_list, list) {
1479 if (!dev->gpu) {
1480 i++;
1481 continue;
1482 }
1483
1484 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
1485 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
1486 if (r)
1487 break;
1488 }
1489
1490 up_read(&topology_lock);
1491
1492 return r;
1493}
1494
1495#endif
1496