1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33
34#include <drm/amdgpu_drm.h>
35#include "amdgpu.h"
36#include "atom.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
63{
64
65
66 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
67
68
69
70
71 if (WARN_ON_ONCE(ndw > ring->max_dw))
72 return -ENOMEM;
73
74 ring->count_dw = ndw;
75 ring->wptr_old = ring->wptr;
76
77 if (ring->funcs->begin_use)
78 ring->funcs->begin_use(ring);
79
80 return 0;
81}
82
83
84
85
86
87
88
89
90void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
91{
92 int i;
93
94 for (i = 0; i < count; i++)
95 amdgpu_ring_write(ring, ring->funcs->nop);
96}
97
98
99
100
101
102
103
104
105void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
106{
107 while (ib->length_dw & ring->funcs->align_mask)
108 ib->ptr[ib->length_dw++] = ring->funcs->nop;
109}
110
111
112
113
114
115
116
117
118
119
120
121void amdgpu_ring_commit(struct amdgpu_ring *ring)
122{
123 uint32_t count;
124
125
126 count = ring->funcs->align_mask + 1 -
127 (ring->wptr & ring->funcs->align_mask);
128 count %= ring->funcs->align_mask + 1;
129 ring->funcs->insert_nop(ring, count);
130
131 mb();
132 amdgpu_ring_set_wptr(ring);
133
134 if (ring->funcs->end_use)
135 ring->funcs->end_use(ring);
136}
137
138
139
140
141
142
143
144
145void amdgpu_ring_undo(struct amdgpu_ring *ring)
146{
147 ring->wptr = ring->wptr_old;
148
149 if (ring->funcs->end_use)
150 ring->funcs->end_use(ring);
151}
152
153
154
155
156
157
158
159
160
161
162
163
164int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
165 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
166 unsigned int irq_type, unsigned int hw_prio)
167{
168 int r, i;
169 int sched_hw_submission = amdgpu_sched_hw_submission;
170 u32 *num_sched;
171 u32 hw_ip;
172
173
174
175
176
177
178
179 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
180 sched_hw_submission = max(sched_hw_submission, 256);
181 else if (ring == &adev->sdma.instance[0].page)
182 sched_hw_submission = 256;
183
184 if (ring->adev == NULL) {
185 if (adev->num_rings >= AMDGPU_MAX_RINGS)
186 return -EINVAL;
187
188 ring->adev = adev;
189 ring->idx = adev->num_rings++;
190 adev->rings[ring->idx] = ring;
191 r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
192 if (r)
193 return r;
194 }
195
196 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
197 if (r) {
198 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
199 return r;
200 }
201
202 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
203 if (r) {
204 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
205 return r;
206 }
207
208 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
209 if (r) {
210 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
211 return r;
212 }
213
214 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
215 if (r) {
216 dev_err(adev->dev,
217 "(%d) ring trail_fence_offs wb alloc failed\n", r);
218 return r;
219 }
220 ring->trail_fence_gpu_addr =
221 adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
222 ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
223
224 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
225 if (r) {
226 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
227 return r;
228 }
229 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
230 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
231
232 *ring->cond_exe_cpu_addr = 1;
233
234 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
235 if (r) {
236 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
237 return r;
238 }
239
240 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
241
242 ring->buf_mask = (ring->ring_size / 4) - 1;
243 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
244 0xffffffffffffffff : ring->buf_mask;
245
246 if (ring->ring_obj == NULL) {
247 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
248 AMDGPU_GEM_DOMAIN_GTT,
249 &ring->ring_obj,
250 &ring->gpu_addr,
251 (void **)&ring->ring);
252 if (r) {
253 dev_err(adev->dev, "(%d) ring create failed\n", r);
254 return r;
255 }
256 amdgpu_ring_clear_ring(ring);
257 }
258
259 ring->max_dw = max_dw;
260 ring->priority = DRM_SCHED_PRIORITY_NORMAL;
261 mutex_init(&ring->priority_mutex);
262
263 if (!ring->no_scheduler) {
264 hw_ip = ring->funcs->type;
265 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
266 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
267 &ring->sched;
268 }
269
270 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
271 atomic_set(&ring->num_jobs[i], 0);
272
273 return 0;
274}
275
276
277
278
279
280
281
282
283
284void amdgpu_ring_fini(struct amdgpu_ring *ring)
285{
286
287
288 if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
289 return;
290
291 ring->sched.ready = false;
292
293 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
294 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
295
296 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
297 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
298
299 amdgpu_bo_free_kernel(&ring->ring_obj,
300 &ring->gpu_addr,
301 (void **)&ring->ring);
302
303 dma_fence_put(ring->vmid_wait);
304 ring->vmid_wait = NULL;
305 ring->me = 0;
306
307 ring->adev->rings[ring->idx] = NULL;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
323 uint32_t reg0, uint32_t reg1,
324 uint32_t ref, uint32_t mask)
325{
326 amdgpu_ring_emit_wreg(ring, reg0, ref);
327 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
328}
329
330
331
332
333
334
335
336
337
338
339bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
340 struct dma_fence *fence)
341{
342 ktime_t deadline = ktime_add_us(ktime_get(), 10000);
343
344 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
345 return false;
346
347 atomic_inc(&ring->adev->gpu_reset_counter);
348 while (!dma_fence_is_signaled(fence) &&
349 ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
350 ring->funcs->soft_recovery(ring, vmid);
351
352 return dma_fence_is_signaled(fence);
353}
354
355
356
357
358#if defined(CONFIG_DEBUG_FS)
359
360
361
362
363
364
365
366
367static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
368 size_t size, loff_t *pos)
369{
370 struct amdgpu_ring *ring = file_inode(f)->i_private;
371 int r, i;
372 uint32_t value, result, early[3];
373
374 if (*pos & 3 || size & 3)
375 return -EINVAL;
376
377 result = 0;
378
379 if (*pos < 12) {
380 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
381 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
382 early[2] = ring->wptr & ring->buf_mask;
383 for (i = *pos / 4; i < 3 && size; i++) {
384 r = put_user(early[i], (uint32_t *)buf);
385 if (r)
386 return r;
387 buf += 4;
388 result += 4;
389 size -= 4;
390 *pos += 4;
391 }
392 }
393
394 while (size) {
395 if (*pos >= (ring->ring_size + 12))
396 return result;
397
398 value = ring->ring[(*pos - 12)/4];
399 r = put_user(value, (uint32_t*)buf);
400 if (r)
401 return r;
402 buf += 4;
403 result += 4;
404 size -= 4;
405 *pos += 4;
406 }
407
408 return result;
409}
410
411static const struct file_operations amdgpu_debugfs_ring_fops = {
412 .owner = THIS_MODULE,
413 .read = amdgpu_debugfs_ring_read,
414 .llseek = default_llseek
415};
416
417#endif
418
419int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
420 struct amdgpu_ring *ring)
421{
422#if defined(CONFIG_DEBUG_FS)
423 struct drm_minor *minor = adev_to_drm(adev)->primary;
424 struct dentry *ent, *root = minor->debugfs_root;
425 char name[32];
426
427 sprintf(name, "amdgpu_ring_%s", ring->name);
428
429 ent = debugfs_create_file(name,
430 S_IFREG | S_IRUGO, root,
431 ring, &amdgpu_debugfs_ring_fops);
432 if (!ent)
433 return -ENOMEM;
434
435 i_size_write(ent->d_inode, ring->ring_size + 12);
436 ring->ent = ent;
437#endif
438 return 0;
439}
440
441
442
443
444
445
446
447
448
449
450int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
451{
452 struct amdgpu_device *adev = ring->adev;
453 int r;
454
455 r = amdgpu_ring_test_ring(ring);
456 if (r)
457 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
458 ring->name, r);
459 else
460 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
461 ring->name);
462
463 ring->sched.ready = !r;
464 return r;
465}
466