1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/amdgpu_drm.h>
34#include "amdgpu.h"
35#include "atom.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
51 struct amdgpu_ring *ring);
52static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
53
54
55
56
57
58
59
60
61
62
63
64int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
65{
66
67
68 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
69
70
71
72
73 if (WARN_ON_ONCE(ndw > ring->max_dw))
74 return -ENOMEM;
75
76 ring->count_dw = ndw;
77 ring->wptr_old = ring->wptr;
78
79 if (ring->funcs->begin_use)
80 ring->funcs->begin_use(ring);
81
82 return 0;
83}
84
85
86
87
88
89
90
91
92void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
93{
94 int i;
95
96 for (i = 0; i < count; i++)
97 amdgpu_ring_write(ring, ring->funcs->nop);
98}
99
100
101
102
103
104
105
106
107void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
108{
109 while (ib->length_dw & ring->funcs->align_mask)
110 ib->ptr[ib->length_dw++] = ring->funcs->nop;
111}
112
113
114
115
116
117
118
119
120
121
122
123void amdgpu_ring_commit(struct amdgpu_ring *ring)
124{
125 uint32_t count;
126
127
128 count = ring->funcs->align_mask + 1 -
129 (ring->wptr & ring->funcs->align_mask);
130 count %= ring->funcs->align_mask + 1;
131 ring->funcs->insert_nop(ring, count);
132
133 mb();
134 amdgpu_ring_set_wptr(ring);
135
136 if (ring->funcs->end_use)
137 ring->funcs->end_use(ring);
138
139 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
140 amdgpu_ring_lru_touch(ring->adev, ring);
141}
142
143
144
145
146
147
148
149
150void amdgpu_ring_undo(struct amdgpu_ring *ring)
151{
152 ring->wptr = ring->wptr_old;
153
154 if (ring->funcs->end_use)
155 ring->funcs->end_use(ring);
156}
157
158
159
160
161
162
163
164
165
166void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
167 enum drm_sched_priority priority)
168{
169 int i;
170
171 if (!ring->funcs->set_priority)
172 return;
173
174 if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
175 return;
176
177
178 if (priority == DRM_SCHED_PRIORITY_NORMAL)
179 return;
180
181 mutex_lock(&ring->priority_mutex);
182
183 if (ring->priority > priority)
184 goto out_unlock;
185
186
187 for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
188 if (i == DRM_SCHED_PRIORITY_NORMAL
189 || atomic_read(&ring->num_jobs[i])) {
190 ring->priority = i;
191 ring->funcs->set_priority(ring, i);
192 break;
193 }
194 }
195
196out_unlock:
197 mutex_unlock(&ring->priority_mutex);
198}
199
200
201
202
203
204
205
206
207
208void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
209 enum drm_sched_priority priority)
210{
211 if (!ring->funcs->set_priority)
212 return;
213
214 atomic_inc(&ring->num_jobs[priority]);
215
216 mutex_lock(&ring->priority_mutex);
217 if (priority <= ring->priority)
218 goto out_unlock;
219
220 ring->priority = priority;
221 ring->funcs->set_priority(ring, priority);
222
223out_unlock:
224 mutex_unlock(&ring->priority_mutex);
225}
226
227
228
229
230
231
232
233
234
235
236
237
238int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
239 unsigned max_dw, struct amdgpu_irq_src *irq_src,
240 unsigned irq_type)
241{
242 int r, i;
243 int sched_hw_submission = amdgpu_sched_hw_submission;
244
245
246
247
248
249
250
251 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
252 sched_hw_submission = max(sched_hw_submission, 256);
253
254 if (ring->adev == NULL) {
255 if (adev->num_rings >= AMDGPU_MAX_RINGS)
256 return -EINVAL;
257
258 ring->adev = adev;
259 ring->idx = adev->num_rings++;
260 adev->rings[ring->idx] = ring;
261 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
262 if (r)
263 return r;
264 }
265
266 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
267 if (r) {
268 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
269 return r;
270 }
271
272 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
273 if (r) {
274 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
275 return r;
276 }
277
278 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
279 if (r) {
280 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
281 return r;
282 }
283
284 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
285 if (r) {
286 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
287 return r;
288 }
289 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
290 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
291
292 *ring->cond_exe_cpu_addr = 1;
293
294 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
295 if (r) {
296 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
297 return r;
298 }
299
300 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
301
302 ring->buf_mask = (ring->ring_size / 4) - 1;
303 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
304 0xffffffffffffffff : ring->buf_mask;
305
306 if (ring->ring_obj == NULL) {
307 r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
308 AMDGPU_GEM_DOMAIN_GTT,
309 &ring->ring_obj,
310 &ring->gpu_addr,
311 (void **)&ring->ring);
312 if (r) {
313 dev_err(adev->dev, "(%d) ring create failed\n", r);
314 return r;
315 }
316 amdgpu_ring_clear_ring(ring);
317 }
318
319 ring->max_dw = max_dw;
320 ring->priority = DRM_SCHED_PRIORITY_NORMAL;
321 mutex_init(&ring->priority_mutex);
322 INIT_LIST_HEAD(&ring->lru_list);
323 amdgpu_ring_lru_touch(adev, ring);
324
325 for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
326 atomic_set(&ring->num_jobs[i], 0);
327
328 if (amdgpu_debugfs_ring_init(adev, ring)) {
329 DRM_ERROR("Failed to register debugfs file for rings !\n");
330 }
331
332 return 0;
333}
334
335
336
337
338
339
340
341
342
343void amdgpu_ring_fini(struct amdgpu_ring *ring)
344{
345 ring->ready = false;
346
347
348 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
349 return;
350
351 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
352 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
353
354 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
355 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
356
357 amdgpu_bo_free_kernel(&ring->ring_obj,
358 &ring->gpu_addr,
359 (void **)&ring->ring);
360
361 amdgpu_debugfs_ring_fini(ring);
362
363 dma_fence_put(ring->vmid_wait);
364 ring->vmid_wait = NULL;
365
366 ring->adev->rings[ring->idx] = NULL;
367}
368
369static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev,
370 struct amdgpu_ring *ring)
371{
372
373 list_move_tail(&ring->lru_list, &adev->ring_lru_list);
374}
375
376static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
377 int *blacklist, int num_blacklist)
378{
379 int i;
380
381 for (i = 0; i < num_blacklist; i++) {
382 if (ring->idx == blacklist[i])
383 return true;
384 }
385
386 return false;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
404 int *blacklist, int num_blacklist,
405 bool lru_pipe_order, struct amdgpu_ring **ring)
406{
407 struct amdgpu_ring *entry;
408
409
410
411 *ring = NULL;
412 spin_lock(&adev->ring_lru_list_lock);
413 list_for_each_entry(entry, &adev->ring_lru_list, lru_list) {
414 if (entry->funcs->type != type)
415 continue;
416
417 if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
418 continue;
419
420 if (!*ring) {
421 *ring = entry;
422
423
424 if (!lru_pipe_order)
425 break;
426 }
427
428
429 if (entry->pipe == (*ring)->pipe)
430 amdgpu_ring_lru_touch_locked(adev, entry);
431 }
432
433
434 if (*ring)
435 amdgpu_ring_lru_touch_locked(adev, *ring);
436
437 spin_unlock(&adev->ring_lru_list_lock);
438
439 if (!*ring) {
440 DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type);
441 return -EINVAL;
442 }
443
444 return 0;
445}
446
447
448
449
450
451
452
453
454
455void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
456{
457 spin_lock(&adev->ring_lru_list_lock);
458 amdgpu_ring_lru_touch_locked(adev, ring);
459 spin_unlock(&adev->ring_lru_list_lock);
460}
461
462
463
464
465#if defined(CONFIG_DEBUG_FS)
466
467
468
469
470
471
472
473
474static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
475 size_t size, loff_t *pos)
476{
477 struct amdgpu_ring *ring = file_inode(f)->i_private;
478 int r, i;
479 uint32_t value, result, early[3];
480
481 if (*pos & 3 || size & 3)
482 return -EINVAL;
483
484 result = 0;
485
486 if (*pos < 12) {
487 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
488 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
489 early[2] = ring->wptr & ring->buf_mask;
490 for (i = *pos / 4; i < 3 && size; i++) {
491 r = put_user(early[i], (uint32_t *)buf);
492 if (r)
493 return r;
494 buf += 4;
495 result += 4;
496 size -= 4;
497 *pos += 4;
498 }
499 }
500
501 while (size) {
502 if (*pos >= (ring->ring_size + 12))
503 return result;
504
505 value = ring->ring[(*pos - 12)/4];
506 r = put_user(value, (uint32_t*)buf);
507 if (r)
508 return r;
509 buf += 4;
510 result += 4;
511 size -= 4;
512 *pos += 4;
513 }
514
515 return result;
516}
517
518static const struct file_operations amdgpu_debugfs_ring_fops = {
519 .owner = THIS_MODULE,
520 .read = amdgpu_debugfs_ring_read,
521 .llseek = default_llseek
522};
523
524#endif
525
526static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
527 struct amdgpu_ring *ring)
528{
529#if defined(CONFIG_DEBUG_FS)
530 struct drm_minor *minor = adev->ddev->primary;
531 struct dentry *ent, *root = minor->debugfs_root;
532 char name[32];
533
534 sprintf(name, "amdgpu_ring_%s", ring->name);
535
536 ent = debugfs_create_file(name,
537 S_IFREG | S_IRUGO, root,
538 ring, &amdgpu_debugfs_ring_fops);
539 if (!ent)
540 return -ENOMEM;
541
542 i_size_write(ent->d_inode, ring->ring_size + 12);
543 ring->ent = ent;
544#endif
545 return 0;
546}
547
548static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
549{
550#if defined(CONFIG_DEBUG_FS)
551 debugfs_remove(ring->ent);
552#endif
553}
554