1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/slab.h>
25#include <linux/mutex.h>
26#include "kfd_device_queue_manager.h"
27#include "kfd_kernel_queue.h"
28#include "kfd_priv.h"
29
30static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
31 unsigned int buffer_size_bytes)
32{
33 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
34
35 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
36 "Runlist IB overflow");
37 *wptr = temp;
38}
39
40static void pm_calc_rlib_size(struct packet_manager *pm,
41 unsigned int *rlib_size,
42 bool *over_subscription)
43{
44 unsigned int process_count, queue_count, compute_queue_count;
45 unsigned int map_queue_size;
46 unsigned int max_proc_per_quantum = 1;
47 struct kfd_dev *dev = pm->dqm->dev;
48
49 process_count = pm->dqm->processes_count;
50 queue_count = pm->dqm->active_queue_count;
51 compute_queue_count = pm->dqm->active_cp_queue_count;
52
53
54
55
56
57
58 *over_subscription = false;
59
60 if (dev->max_proc_per_quantum > 1)
61 max_proc_per_quantum = dev->max_proc_per_quantum;
62
63 if ((process_count > max_proc_per_quantum) ||
64 compute_queue_count > get_cp_queues_num(pm->dqm)) {
65 *over_subscription = true;
66 pr_debug("Over subscribed runlist\n");
67 }
68
69 map_queue_size = pm->pmf->map_queues_size;
70
71 *rlib_size = process_count * pm->pmf->map_process_size +
72 queue_count * map_queue_size;
73
74
75
76
77
78 if (*over_subscription)
79 *rlib_size += pm->pmf->runlist_size;
80
81 pr_debug("runlist ib size %d\n", *rlib_size);
82}
83
84static int pm_allocate_runlist_ib(struct packet_manager *pm,
85 unsigned int **rl_buffer,
86 uint64_t *rl_gpu_buffer,
87 unsigned int *rl_buffer_size,
88 bool *is_over_subscription)
89{
90 int retval;
91
92 if (WARN_ON(pm->allocated))
93 return -EINVAL;
94
95 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
96
97 mutex_lock(&pm->lock);
98
99 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
100 &pm->ib_buffer_obj);
101
102 if (retval) {
103 pr_err("Failed to allocate runlist IB\n");
104 goto out;
105 }
106
107 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
108 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
109
110 memset(*rl_buffer, 0, *rl_buffer_size);
111 pm->allocated = true;
112
113out:
114 mutex_unlock(&pm->lock);
115 return retval;
116}
117
118static int pm_create_runlist_ib(struct packet_manager *pm,
119 struct list_head *queues,
120 uint64_t *rl_gpu_addr,
121 size_t *rl_size_bytes)
122{
123 unsigned int alloc_size_bytes;
124 unsigned int *rl_buffer, rl_wptr, i;
125 int retval, proccesses_mapped;
126 struct device_process_node *cur;
127 struct qcm_process_device *qpd;
128 struct queue *q;
129 struct kernel_queue *kq;
130 bool is_over_subscription;
131
132 rl_wptr = retval = proccesses_mapped = 0;
133
134 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
135 &alloc_size_bytes, &is_over_subscription);
136 if (retval)
137 return retval;
138
139 *rl_size_bytes = alloc_size_bytes;
140 pm->ib_size_bytes = alloc_size_bytes;
141
142 pr_debug("Building runlist ib process count: %d queues count %d\n",
143 pm->dqm->processes_count, pm->dqm->active_queue_count);
144
145
146 list_for_each_entry(cur, queues, list) {
147 qpd = cur->qpd;
148
149 if (proccesses_mapped >= pm->dqm->processes_count) {
150 pr_debug("Not enough space left in runlist IB\n");
151 pm_release_ib(pm);
152 return -ENOMEM;
153 }
154
155 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
156 if (retval)
157 return retval;
158
159 proccesses_mapped++;
160 inc_wptr(&rl_wptr, pm->pmf->map_process_size,
161 alloc_size_bytes);
162
163 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
164 if (!kq->queue->properties.is_active)
165 continue;
166
167 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
168 kq->queue->queue, qpd->is_debug);
169
170 retval = pm->pmf->map_queues(pm,
171 &rl_buffer[rl_wptr],
172 kq->queue,
173 qpd->is_debug);
174 if (retval)
175 return retval;
176
177 inc_wptr(&rl_wptr,
178 pm->pmf->map_queues_size,
179 alloc_size_bytes);
180 }
181
182 list_for_each_entry(q, &qpd->queues_list, list) {
183 if (!q->properties.is_active)
184 continue;
185
186 pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
187 q->queue, qpd->is_debug);
188
189 retval = pm->pmf->map_queues(pm,
190 &rl_buffer[rl_wptr],
191 q,
192 qpd->is_debug);
193
194 if (retval)
195 return retval;
196
197 inc_wptr(&rl_wptr,
198 pm->pmf->map_queues_size,
199 alloc_size_bytes);
200 }
201 }
202
203 pr_debug("Finished map process and queues to runlist\n");
204
205 if (is_over_subscription) {
206 if (!pm->is_over_subscription)
207 pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
208 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
209 *rl_gpu_addr,
210 alloc_size_bytes / sizeof(uint32_t),
211 true);
212 }
213 pm->is_over_subscription = is_over_subscription;
214
215 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
216 pr_debug("0x%2X ", rl_buffer[i]);
217 pr_debug("\n");
218
219 return retval;
220}
221
222int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
223{
224 switch (dqm->dev->device_info->asic_family) {
225 case CHIP_KAVERI:
226 case CHIP_HAWAII:
227
228 case CHIP_CARRIZO:
229 case CHIP_TONGA:
230 case CHIP_FIJI:
231 case CHIP_POLARIS10:
232 case CHIP_POLARIS11:
233 case CHIP_POLARIS12:
234 case CHIP_VEGAM:
235 pm->pmf = &kfd_vi_pm_funcs;
236 break;
237 case CHIP_VEGA10:
238 case CHIP_VEGA12:
239 case CHIP_VEGA20:
240 case CHIP_RAVEN:
241 case CHIP_RENOIR:
242 case CHIP_ARCTURUS:
243 case CHIP_NAVI10:
244 case CHIP_NAVI12:
245 case CHIP_NAVI14:
246 pm->pmf = &kfd_v9_pm_funcs;
247 break;
248 default:
249 WARN(1, "Unexpected ASIC family %u",
250 dqm->dev->device_info->asic_family);
251 return -EINVAL;
252 }
253
254 pm->dqm = dqm;
255 mutex_init(&pm->lock);
256 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
257 if (!pm->priv_queue) {
258 mutex_destroy(&pm->lock);
259 return -ENOMEM;
260 }
261 pm->allocated = false;
262
263 return 0;
264}
265
266void pm_uninit(struct packet_manager *pm, bool hanging)
267{
268 mutex_destroy(&pm->lock);
269 kernel_queue_uninit(pm->priv_queue, hanging);
270}
271
272int pm_send_set_resources(struct packet_manager *pm,
273 struct scheduling_resources *res)
274{
275 uint32_t *buffer, size;
276 int retval = 0;
277
278 size = pm->pmf->set_resources_size;
279 mutex_lock(&pm->lock);
280 kq_acquire_packet_buffer(pm->priv_queue,
281 size / sizeof(uint32_t),
282 (unsigned int **)&buffer);
283 if (!buffer) {
284 pr_err("Failed to allocate buffer on kernel queue\n");
285 retval = -ENOMEM;
286 goto out;
287 }
288
289 retval = pm->pmf->set_resources(pm, buffer, res);
290 if (!retval)
291 kq_submit_packet(pm->priv_queue);
292 else
293 kq_rollback_packet(pm->priv_queue);
294
295out:
296 mutex_unlock(&pm->lock);
297
298 return retval;
299}
300
301int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
302{
303 uint64_t rl_gpu_ib_addr;
304 uint32_t *rl_buffer;
305 size_t rl_ib_size, packet_size_dwords;
306 int retval;
307
308 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
309 &rl_ib_size);
310 if (retval)
311 goto fail_create_runlist_ib;
312
313 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
314
315 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
316 mutex_lock(&pm->lock);
317
318 retval = kq_acquire_packet_buffer(pm->priv_queue,
319 packet_size_dwords, &rl_buffer);
320 if (retval)
321 goto fail_acquire_packet_buffer;
322
323 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
324 rl_ib_size / sizeof(uint32_t), false);
325 if (retval)
326 goto fail_create_runlist;
327
328 kq_submit_packet(pm->priv_queue);
329
330 mutex_unlock(&pm->lock);
331
332 return retval;
333
334fail_create_runlist:
335 kq_rollback_packet(pm->priv_queue);
336fail_acquire_packet_buffer:
337 mutex_unlock(&pm->lock);
338fail_create_runlist_ib:
339 pm_release_ib(pm);
340 return retval;
341}
342
343int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
344 uint32_t fence_value)
345{
346 uint32_t *buffer, size;
347 int retval = 0;
348
349 if (WARN_ON(!fence_address))
350 return -EFAULT;
351
352 size = pm->pmf->query_status_size;
353 mutex_lock(&pm->lock);
354 kq_acquire_packet_buffer(pm->priv_queue,
355 size / sizeof(uint32_t), (unsigned int **)&buffer);
356 if (!buffer) {
357 pr_err("Failed to allocate buffer on kernel queue\n");
358 retval = -ENOMEM;
359 goto out;
360 }
361
362 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
363 if (!retval)
364 kq_submit_packet(pm->priv_queue);
365 else
366 kq_rollback_packet(pm->priv_queue);
367
368out:
369 mutex_unlock(&pm->lock);
370 return retval;
371}
372
373int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
374 enum kfd_unmap_queues_filter filter,
375 uint32_t filter_param, bool reset,
376 unsigned int sdma_engine)
377{
378 uint32_t *buffer, size;
379 int retval = 0;
380
381 size = pm->pmf->unmap_queues_size;
382 mutex_lock(&pm->lock);
383 kq_acquire_packet_buffer(pm->priv_queue,
384 size / sizeof(uint32_t), (unsigned int **)&buffer);
385 if (!buffer) {
386 pr_err("Failed to allocate buffer on kernel queue\n");
387 retval = -ENOMEM;
388 goto out;
389 }
390
391 retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
392 reset, sdma_engine);
393 if (!retval)
394 kq_submit_packet(pm->priv_queue);
395 else
396 kq_rollback_packet(pm->priv_queue);
397
398out:
399 mutex_unlock(&pm->lock);
400 return retval;
401}
402
403void pm_release_ib(struct packet_manager *pm)
404{
405 mutex_lock(&pm->lock);
406 if (pm->allocated) {
407 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
408 pm->allocated = false;
409 }
410 mutex_unlock(&pm->lock);
411}
412
413#if defined(CONFIG_DEBUG_FS)
414
415int pm_debugfs_runlist(struct seq_file *m, void *data)
416{
417 struct packet_manager *pm = data;
418
419 mutex_lock(&pm->lock);
420
421 if (!pm->allocated) {
422 seq_puts(m, " No active runlist\n");
423 goto out;
424 }
425
426 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
427 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
428
429out:
430 mutex_unlock(&pm->lock);
431 return 0;
432}
433
434int pm_debugfs_hang_hws(struct packet_manager *pm)
435{
436 uint32_t *buffer, size;
437 int r = 0;
438
439 size = pm->pmf->query_status_size;
440 mutex_lock(&pm->lock);
441 kq_acquire_packet_buffer(pm->priv_queue,
442 size / sizeof(uint32_t), (unsigned int **)&buffer);
443 if (!buffer) {
444 pr_err("Failed to allocate buffer on kernel queue\n");
445 r = -ENOMEM;
446 goto out;
447 }
448 memset(buffer, 0x55, size);
449 kq_submit_packet(pm->priv_queue);
450
451 pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
452 buffer[0], buffer[1], buffer[2], buffer[3],
453 buffer[4], buffer[5], buffer[6]);
454out:
455 mutex_unlock(&pm->lock);
456 return r;
457}
458
459
460#endif
461