1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/seq_file.h>
30#include <linux/slab.h>
31
32#include <drm/amdgpu_drm.h>
33#include <drm/drm_debugfs.h>
34
35#include "amdgpu.h"
36#include "atom.h"
37#include "amdgpu_trace.h"
38
39#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
40#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
41
42
43
44
45
46
47
48
49
50
51static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
52
53
54
55
56
57
58
59
60
61
62
63
64int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
65 unsigned size, struct amdgpu_ib *ib)
66{
67 int r;
68
69 if (size) {
70 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
71 &ib->sa_bo, size, 256);
72 if (r) {
73 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
74 return r;
75 }
76
77 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
78
79 if (!vm)
80 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
81 }
82
83 return 0;
84}
85
86
87
88
89
90
91
92
93
94
95void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
96 struct dma_fence *f)
97{
98 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
123 struct amdgpu_ib *ibs, struct amdgpu_job *job,
124 struct dma_fence **f)
125{
126 struct amdgpu_device *adev = ring->adev;
127 struct amdgpu_ib *ib = &ibs[0];
128 struct dma_fence *tmp = NULL;
129 bool skip_preamble, need_ctx_switch;
130 unsigned patch_offset = ~0;
131 struct amdgpu_vm *vm;
132 uint64_t fence_ctx;
133 uint32_t status = 0, alloc_size;
134 unsigned fence_flags = 0;
135
136 unsigned i;
137 int r = 0;
138 bool need_pipe_sync = false;
139
140 if (num_ibs == 0)
141 return -EINVAL;
142
143
144 if (job) {
145 vm = job->vm;
146 fence_ctx = job->base.s_fence->scheduled.context;
147 } else {
148 vm = NULL;
149 fence_ctx = 0;
150 }
151
152 if (!ring->sched.ready) {
153 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
154 return -EINVAL;
155 }
156
157 if (vm && !job->vmid) {
158 dev_err(adev->dev, "VM IB without ID\n");
159 return -EINVAL;
160 }
161
162 alloc_size = ring->funcs->emit_frame_size + num_ibs *
163 ring->funcs->emit_ib_size;
164
165 r = amdgpu_ring_alloc(ring, alloc_size);
166 if (r) {
167 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
168 return r;
169 }
170
171 need_ctx_switch = ring->current_ctx != fence_ctx;
172 if (ring->funcs->emit_pipeline_sync && job &&
173 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
174 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
175 amdgpu_vm_need_pipeline_sync(ring, job))) {
176 need_pipe_sync = true;
177
178 if (tmp)
179 trace_amdgpu_ib_pipe_sync(job, tmp);
180
181 dma_fence_put(tmp);
182 }
183
184 if (ring->funcs->insert_start)
185 ring->funcs->insert_start(ring);
186
187 if (job) {
188 r = amdgpu_vm_flush(ring, job, need_pipe_sync);
189 if (r) {
190 amdgpu_ring_undo(ring);
191 return r;
192 }
193 }
194
195 if (job && ring->funcs->init_cond_exec)
196 patch_offset = amdgpu_ring_init_cond_exec(ring);
197
198#ifdef CONFIG_X86_64
199 if (!(adev->flags & AMD_IS_APU))
200#endif
201 {
202 if (ring->funcs->emit_hdp_flush)
203 amdgpu_ring_emit_hdp_flush(ring);
204 else
205 amdgpu_asic_flush_hdp(adev, ring);
206 }
207
208 if (need_ctx_switch)
209 status |= AMDGPU_HAVE_CTX_SWITCH;
210
211 skip_preamble = ring->current_ctx == fence_ctx;
212 if (job && ring->funcs->emit_cntxcntl) {
213 status |= job->preamble_status;
214 status |= job->preemption_status;
215 amdgpu_ring_emit_cntxcntl(ring, status);
216 }
217
218 for (i = 0; i < num_ibs; ++i) {
219 ib = &ibs[i];
220
221
222 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
223 skip_preamble &&
224 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
225 !amdgpu_mcbp &&
226 !amdgpu_sriov_vf(adev))
227 continue;
228
229 amdgpu_ring_emit_ib(ring, job, ib, status);
230 status &= ~AMDGPU_HAVE_CTX_SWITCH;
231 }
232
233 if (ring->funcs->emit_tmz)
234 amdgpu_ring_emit_tmz(ring, false);
235
236#ifdef CONFIG_X86_64
237 if (!(adev->flags & AMD_IS_APU))
238#endif
239 amdgpu_asic_invalidate_hdp(adev, ring);
240
241 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
242 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
243
244
245 if (job && job->uf_addr) {
246 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
247 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
248 }
249
250 r = amdgpu_fence_emit(ring, f, fence_flags);
251 if (r) {
252 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
253 if (job && job->vmid)
254 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
255 amdgpu_ring_undo(ring);
256 return r;
257 }
258
259 if (ring->funcs->insert_end)
260 ring->funcs->insert_end(ring);
261
262 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
263 amdgpu_ring_patch_cond_exec(ring, patch_offset);
264
265 ring->current_ctx = fence_ctx;
266 if (vm && ring->funcs->emit_switch_buffer)
267 amdgpu_ring_emit_switch_buffer(ring);
268 amdgpu_ring_commit(ring);
269 return 0;
270}
271
272
273
274
275
276
277
278
279
280
281int amdgpu_ib_pool_init(struct amdgpu_device *adev)
282{
283 int r;
284
285 if (adev->ib_pool_ready) {
286 return 0;
287 }
288 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
289 AMDGPU_IB_POOL_SIZE*64*1024,
290 AMDGPU_GPU_PAGE_SIZE,
291 AMDGPU_GEM_DOMAIN_GTT);
292 if (r) {
293 return r;
294 }
295
296 adev->ib_pool_ready = true;
297 if (amdgpu_debugfs_sa_init(adev)) {
298 dev_err(adev->dev, "failed to register debugfs file for SA\n");
299 }
300 return 0;
301}
302
303
304
305
306
307
308
309
310
311void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
312{
313 if (adev->ib_pool_ready) {
314 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
315 adev->ib_pool_ready = false;
316 }
317}
318
319
320
321
322
323
324
325
326
327
328
329int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
330{
331 unsigned i;
332 int r, ret = 0;
333 long tmo_gfx, tmo_mm;
334
335 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
336 if (amdgpu_sriov_vf(adev)) {
337
338
339
340
341
342
343 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
344 }
345
346 if (amdgpu_sriov_runtime(adev)) {
347
348
349
350
351 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
352 } else if (adev->gmc.xgmi.hive_id) {
353 tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
354 }
355
356 for (i = 0; i < adev->num_rings; ++i) {
357 struct amdgpu_ring *ring = adev->rings[i];
358 long tmo;
359
360
361
362
363 if (!ring->sched.ready || !ring->funcs->test_ib)
364 continue;
365
366
367 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
368 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
369 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
370 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
371 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
372 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
373 tmo = tmo_mm;
374 else
375 tmo = tmo_gfx;
376
377 r = amdgpu_ring_test_ib(ring, tmo);
378 if (!r) {
379 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
380 ring->name);
381 continue;
382 }
383
384 ring->sched.ready = false;
385 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
386 ring->name, r);
387
388 if (ring == &adev->gfx.gfx_ring[0]) {
389
390 adev->accel_working = false;
391 return r;
392
393 } else {
394 ret = r;
395 }
396 }
397 return ret;
398}
399
400
401
402
403#if defined(CONFIG_DEBUG_FS)
404
405static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
406{
407 struct drm_info_node *node = (struct drm_info_node *) m->private;
408 struct drm_device *dev = node->minor->dev;
409 struct amdgpu_device *adev = dev->dev_private;
410
411 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
412
413 return 0;
414
415}
416
417static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
418 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
419};
420
421#endif
422
423static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
424{
425#if defined(CONFIG_DEBUG_FS)
426 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
427#else
428 return 0;
429#endif
430}
431