1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31
32#include <drm/amdgpu_drm.h>
33
34#include "amdgpu.h"
35#include "atom.h"
36#include "amdgpu_trace.h"
37
38#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
39#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
65 unsigned size, enum amdgpu_ib_pool_type pool_type,
66 struct amdgpu_ib *ib)
67{
68 int r;
69
70 if (size) {
71 r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
72 &ib->sa_bo, size, 256);
73 if (r) {
74 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
75 return r;
76 }
77
78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
79
80 ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
81
82 if (!vm)
83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
84 }
85
86 return 0;
87}
88
89
90
91
92
93
94
95
96
97
98void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
99 struct dma_fence *f)
100{
101 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
102}
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
127 struct amdgpu_ib *ibs, struct amdgpu_job *job,
128 struct dma_fence **f)
129{
130 struct amdgpu_device *adev = ring->adev;
131 struct amdgpu_ib *ib = &ibs[0];
132 struct dma_fence *tmp = NULL;
133 bool need_ctx_switch;
134 unsigned patch_offset = ~0;
135 struct amdgpu_vm *vm;
136 uint64_t fence_ctx;
137 uint32_t status = 0, alloc_size;
138 unsigned fence_flags = 0;
139 bool secure;
140
141 unsigned i;
142 int r = 0;
143 bool need_pipe_sync = false;
144
145 if (num_ibs == 0)
146 return -EINVAL;
147
148
149 if (job) {
150 vm = job->vm;
151 fence_ctx = job->base.s_fence ?
152 job->base.s_fence->scheduled.context : 0;
153 } else {
154 vm = NULL;
155 fence_ctx = 0;
156 }
157
158 if (!ring->sched.ready) {
159 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
160 return -EINVAL;
161 }
162
163 if (vm && !job->vmid) {
164 dev_err(adev->dev, "VM IB without ID\n");
165 return -EINVAL;
166 }
167
168 if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
169 (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
170 dev_err(adev->dev, "secure submissions not supported on compute rings\n");
171 return -EINVAL;
172 }
173
174 alloc_size = ring->funcs->emit_frame_size + num_ibs *
175 ring->funcs->emit_ib_size;
176
177 r = amdgpu_ring_alloc(ring, alloc_size);
178 if (r) {
179 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
180 return r;
181 }
182
183 need_ctx_switch = ring->current_ctx != fence_ctx;
184 if (ring->funcs->emit_pipeline_sync && job &&
185 ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
186 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
187 amdgpu_vm_need_pipeline_sync(ring, job))) {
188 need_pipe_sync = true;
189
190 if (tmp)
191 trace_amdgpu_ib_pipe_sync(job, tmp);
192
193 dma_fence_put(tmp);
194 }
195
196 if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
197 ring->funcs->emit_mem_sync(ring);
198
199 if (ring->funcs->emit_wave_limit &&
200 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
201 ring->funcs->emit_wave_limit(ring, true);
202
203 if (ring->funcs->insert_start)
204 ring->funcs->insert_start(ring);
205
206 if (job) {
207 r = amdgpu_vm_flush(ring, job, need_pipe_sync);
208 if (r) {
209 amdgpu_ring_undo(ring);
210 return r;
211 }
212 }
213
214 if (job && ring->funcs->init_cond_exec)
215 patch_offset = amdgpu_ring_init_cond_exec(ring);
216
217 amdgpu_device_flush_hdp(adev, ring);
218
219 if (need_ctx_switch)
220 status |= AMDGPU_HAVE_CTX_SWITCH;
221
222 if (job && ring->funcs->emit_cntxcntl) {
223 status |= job->preamble_status;
224 status |= job->preemption_status;
225 amdgpu_ring_emit_cntxcntl(ring, status);
226 }
227
228
229
230 secure = false;
231 if (job && ring->funcs->emit_frame_cntl) {
232 secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
233 amdgpu_ring_emit_frame_cntl(ring, true, secure);
234 }
235
236 for (i = 0; i < num_ibs; ++i) {
237 ib = &ibs[i];
238
239 if (job && ring->funcs->emit_frame_cntl) {
240 if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
241 amdgpu_ring_emit_frame_cntl(ring, false, secure);
242 secure = !secure;
243 amdgpu_ring_emit_frame_cntl(ring, true, secure);
244 }
245 }
246
247 amdgpu_ring_emit_ib(ring, job, ib, status);
248 status &= ~AMDGPU_HAVE_CTX_SWITCH;
249 }
250
251 if (job && ring->funcs->emit_frame_cntl)
252 amdgpu_ring_emit_frame_cntl(ring, false, secure);
253
254 amdgpu_device_invalidate_hdp(adev, ring);
255
256 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
257 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
258
259
260 if (job && job->uf_addr) {
261 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
262 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
263 }
264
265 r = amdgpu_fence_emit(ring, f, job, fence_flags);
266 if (r) {
267 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
268 if (job && job->vmid)
269 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
270 amdgpu_ring_undo(ring);
271 return r;
272 }
273
274 if (ring->funcs->insert_end)
275 ring->funcs->insert_end(ring);
276
277 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
278 amdgpu_ring_patch_cond_exec(ring, patch_offset);
279
280 ring->current_ctx = fence_ctx;
281 if (vm && ring->funcs->emit_switch_buffer)
282 amdgpu_ring_emit_switch_buffer(ring);
283
284 if (ring->funcs->emit_wave_limit &&
285 ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
286 ring->funcs->emit_wave_limit(ring, false);
287
288 amdgpu_ring_commit(ring);
289 return 0;
290}
291
292
293
294
295
296
297
298
299
300
301int amdgpu_ib_pool_init(struct amdgpu_device *adev)
302{
303 unsigned size;
304 int r, i;
305
306 if (adev->ib_pool_ready)
307 return 0;
308
309 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
310 if (i == AMDGPU_IB_POOL_DIRECT)
311 size = PAGE_SIZE * 6;
312 else
313 size = AMDGPU_IB_POOL_SIZE;
314
315 r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
316 size, AMDGPU_GPU_PAGE_SIZE,
317 AMDGPU_GEM_DOMAIN_GTT);
318 if (r)
319 goto error;
320 }
321 adev->ib_pool_ready = true;
322
323 return 0;
324
325error:
326 while (i--)
327 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
328 return r;
329}
330
331
332
333
334
335
336
337
338
339void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
340{
341 int i;
342
343 if (!adev->ib_pool_ready)
344 return;
345
346 for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
347 amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
348 adev->ib_pool_ready = false;
349}
350
351
352
353
354
355
356
357
358
359
360
361int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
362{
363 long tmo_gfx, tmo_mm;
364 int r, ret = 0;
365 unsigned i;
366
367 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
368 if (amdgpu_sriov_vf(adev)) {
369
370
371
372
373
374
375 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
376 }
377
378 if (amdgpu_sriov_runtime(adev)) {
379
380
381
382
383 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
384 } else if (adev->gmc.xgmi.hive_id) {
385 tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
386 }
387
388 for (i = 0; i < adev->num_rings; ++i) {
389 struct amdgpu_ring *ring = adev->rings[i];
390 long tmo;
391
392
393
394
395 if (!ring->sched.ready || !ring->funcs->test_ib)
396 continue;
397
398
399 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
400 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
401 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
402 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
403 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
404 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
405 tmo = tmo_mm;
406 else
407 tmo = tmo_gfx;
408
409 r = amdgpu_ring_test_ib(ring, tmo);
410 if (!r) {
411 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
412 ring->name);
413 continue;
414 }
415
416 ring->sched.ready = false;
417 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
418 ring->name, r);
419
420 if (ring == &adev->gfx.gfx_ring[0]) {
421
422 adev->accel_working = false;
423 return r;
424
425 } else {
426 ret = r;
427 }
428 }
429 return ret;
430}
431
432
433
434
435#if defined(CONFIG_DEBUG_FS)
436
437static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
438{
439 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
440
441 seq_printf(m, "--------------------- DELAYED --------------------- \n");
442 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
443 m);
444 seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
445 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
446 m);
447 seq_printf(m, "--------------------- DIRECT ---------------------- \n");
448 amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
449
450 return 0;
451}
452
453DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info);
454
455#endif
456
457void amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
458{
459#if defined(CONFIG_DEBUG_FS)
460 struct drm_minor *minor = adev_to_drm(adev)->primary;
461 struct dentry *root = minor->debugfs_root;
462
463 debugfs_create_file("amdgpu_sa_info", 0444, root, adev,
464 &amdgpu_debugfs_sa_info_fops);
465
466#endif
467}
468