1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29
30#include "vega10/soc15ip.h"
31#include "vega10/SDMA0/sdma0_4_0_offset.h"
32#include "vega10/SDMA0/sdma0_4_0_sh_mask.h"
33#include "vega10/SDMA1/sdma1_4_0_offset.h"
34#include "vega10/SDMA1/sdma1_4_0_sh_mask.h"
35#include "vega10/MMHUB/mmhub_1_0_offset.h"
36#include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
37#include "vega10/HDP/hdp_4_0_offset.h"
38#include "raven1/SDMA0/sdma0_4_1_default.h"
39
40#include "soc15_common.h"
41#include "soc15.h"
42#include "vega10_sdma_pkt_open.h"
43
44MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
45MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
46MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
47
48#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
49#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
50
51static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
52static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
53static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
54static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
55
56static const u32 golden_settings_sdma_4[] = {
57 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
58 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
59 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
60 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
61 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
62 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
63 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000,
64 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
65 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
66 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
67 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
68 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0,
69 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
70 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100,
71 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100,
72 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
73 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
74 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
75 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000,
76 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
77 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
78 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
79 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
80 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
81};
82
83static const u32 golden_settings_sdma_vg10[] = {
84 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
85 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
86 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
87 SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
88};
89
90static const u32 golden_settings_sdma_4_1[] =
91{
92 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
93 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
94 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
95 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
96 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
97 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
98 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
99 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
100 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
101 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
102};
103
104static const u32 golden_settings_sdma_rv1[] =
105{
106 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
107 SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
108};
109
110static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
111{
112 u32 base = 0;
113
114 switch (instance) {
115 case 0:
116 base = SDMA0_BASE.instance[0].segment[0];
117 break;
118 case 1:
119 base = SDMA1_BASE.instance[0].segment[0];
120 break;
121 default:
122 BUG();
123 break;
124 }
125
126 return base + internal_offset;
127}
128
129static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
130{
131 switch (adev->asic_type) {
132 case CHIP_VEGA10:
133 amdgpu_program_register_sequence(adev,
134 golden_settings_sdma_4,
135 (const u32)ARRAY_SIZE(golden_settings_sdma_4));
136 amdgpu_program_register_sequence(adev,
137 golden_settings_sdma_vg10,
138 (const u32)ARRAY_SIZE(golden_settings_sdma_vg10));
139 break;
140 case CHIP_RAVEN:
141 amdgpu_program_register_sequence(adev,
142 golden_settings_sdma_4_1,
143 (const u32)ARRAY_SIZE(golden_settings_sdma_4_1));
144 amdgpu_program_register_sequence(adev,
145 golden_settings_sdma_rv1,
146 (const u32)ARRAY_SIZE(golden_settings_sdma_rv1));
147 break;
148 default:
149 break;
150 }
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
166{
167 const char *chip_name;
168 char fw_name[30];
169 int err = 0, i;
170 struct amdgpu_firmware_info *info = NULL;
171 const struct common_firmware_header *header = NULL;
172 const struct sdma_firmware_header_v1_0 *hdr;
173
174 DRM_DEBUG("\n");
175
176 switch (adev->asic_type) {
177 case CHIP_VEGA10:
178 chip_name = "vega10";
179 break;
180 case CHIP_RAVEN:
181 chip_name = "raven";
182 break;
183 default:
184 BUG();
185 }
186
187 for (i = 0; i < adev->sdma.num_instances; i++) {
188 if (i == 0)
189 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
190 else
191 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
192 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
193 if (err)
194 goto out;
195 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
196 if (err)
197 goto out;
198 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
199 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
200 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
201 if (adev->sdma.instance[i].feature_version >= 20)
202 adev->sdma.instance[i].burst_nop = true;
203 DRM_DEBUG("psp_load == '%s'\n",
204 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
205
206 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
207 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
208 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
209 info->fw = adev->sdma.instance[i].fw;
210 header = (const struct common_firmware_header *)info->fw->data;
211 adev->firmware.fw_size +=
212 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
213 }
214 }
215out:
216 if (err) {
217 DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
218 for (i = 0; i < adev->sdma.num_instances; i++) {
219 release_firmware(adev->sdma.instance[i].fw);
220 adev->sdma.instance[i].fw = NULL;
221 }
222 }
223 return err;
224}
225
226
227
228
229
230
231
232
233static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
234{
235 u64 *rptr;
236
237
238 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
239
240 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
241 return ((*rptr) >> 2);
242}
243
244
245
246
247
248
249
250
251static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
252{
253 struct amdgpu_device *adev = ring->adev;
254 u64 *wptr = NULL;
255 uint64_t local_wptr = 0;
256
257 if (ring->use_doorbell) {
258
259 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
260 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
261 *wptr = (*wptr) >> 2;
262 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
263 } else {
264 u32 lowbit, highbit;
265 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
266
267 wptr = &local_wptr;
268 lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
269 highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
270
271 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
272 me, highbit, lowbit);
273 *wptr = highbit;
274 *wptr = (*wptr) << 32;
275 *wptr |= lowbit;
276 }
277
278 return *wptr;
279}
280
281
282
283
284
285
286
287
288static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
289{
290 struct amdgpu_device *adev = ring->adev;
291
292 DRM_DEBUG("Setting write pointer\n");
293 if (ring->use_doorbell) {
294 DRM_DEBUG("Using doorbell -- "
295 "wptr_offs == 0x%08x "
296 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
297 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
298 ring->wptr_offs,
299 lower_32_bits(ring->wptr << 2),
300 upper_32_bits(ring->wptr << 2));
301
302 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
303 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
304 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
305 ring->doorbell_index, ring->wptr << 2);
306 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
307 } else {
308 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
309
310 DRM_DEBUG("Not using doorbell -- "
311 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
312 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
313 me,
314 lower_32_bits(ring->wptr << 2),
315 me,
316 upper_32_bits(ring->wptr << 2));
317 WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
318 WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
319 }
320}
321
322static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
323{
324 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
325 int i;
326
327 for (i = 0; i < count; i++)
328 if (sdma && sdma->burst_nop && (i == 0))
329 amdgpu_ring_write(ring, ring->funcs->nop |
330 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
331 else
332 amdgpu_ring_write(ring, ring->funcs->nop);
333}
334
335
336
337
338
339
340
341
342
343static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
344 struct amdgpu_ib *ib,
345 unsigned vm_id, bool ctx_switch)
346{
347 u32 vmid = vm_id & 0xf;
348
349
350 sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
351
352 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
353 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
354
355 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
356 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
357 amdgpu_ring_write(ring, ib->length_dw);
358 amdgpu_ring_write(ring, 0);
359 amdgpu_ring_write(ring, 0);
360
361}
362
363
364
365
366
367
368
369
370static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
371{
372 u32 ref_and_mask = 0;
373 struct nbio_hdp_flush_reg *nbio_hf_reg;
374
375 if (ring->adev->flags & AMD_IS_APU)
376 nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
377 else
378 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
379
380 if (ring == &ring->adev->sdma.instance[0].ring)
381 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
382 else
383 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
384
385 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
386 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
387 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
388 amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2);
389 amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2);
390 amdgpu_ring_write(ring, ref_and_mask);
391 amdgpu_ring_write(ring, ref_and_mask);
392 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
393 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
394}
395
396static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
397{
398 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
399 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
400 amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0));
401 amdgpu_ring_write(ring, 1);
402}
403
404
405
406
407
408
409
410
411
412
413
414static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
415 unsigned flags)
416{
417 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
418
419 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
420
421 BUG_ON(addr & 0x3);
422 amdgpu_ring_write(ring, lower_32_bits(addr));
423 amdgpu_ring_write(ring, upper_32_bits(addr));
424 amdgpu_ring_write(ring, lower_32_bits(seq));
425
426
427 if (write64bit) {
428 addr += 4;
429 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
430
431 BUG_ON(addr & 0x3);
432 amdgpu_ring_write(ring, lower_32_bits(addr));
433 amdgpu_ring_write(ring, upper_32_bits(addr));
434 amdgpu_ring_write(ring, upper_32_bits(seq));
435 }
436
437
438 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
439 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
440}
441
442
443
444
445
446
447
448
449
450static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
451{
452 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
453 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
454 u32 rb_cntl, ib_cntl;
455 int i;
456
457 if ((adev->mman.buffer_funcs_ring == sdma0) ||
458 (adev->mman.buffer_funcs_ring == sdma1))
459 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
460
461 for (i = 0; i < adev->sdma.num_instances; i++) {
462 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
463 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
464 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
465 ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
466 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
467 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
468 }
469
470 sdma0->ready = false;
471 sdma1->ready = false;
472}
473
474
475
476
477
478
479
480
481static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
482{
483
484}
485
486
487
488
489
490
491
492
493
494static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
495{
496 u32 f32_cntl;
497 int i;
498
499 for (i = 0; i < adev->sdma.num_instances; i++) {
500 f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
501 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
502 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
503 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
504 }
505
506}
507
508
509
510
511
512
513
514
515
516static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
517{
518 u32 f32_cntl;
519 int i;
520
521 if (enable == false) {
522 sdma_v4_0_gfx_stop(adev);
523 sdma_v4_0_rlc_stop(adev);
524 }
525
526 for (i = 0; i < adev->sdma.num_instances; i++) {
527 f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
528 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
529 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), f32_cntl);
530 }
531}
532
533
534
535
536
537
538
539
540
541static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
542{
543 struct amdgpu_ring *ring;
544 u32 rb_cntl, ib_cntl;
545 u32 rb_bufsz;
546 u32 wb_offset;
547 u32 doorbell;
548 u32 doorbell_offset;
549 u32 temp;
550 int i, r;
551
552 for (i = 0; i < adev->sdma.num_instances; i++) {
553 ring = &adev->sdma.instance[i].ring;
554 wb_offset = (ring->rptr_offs * 4);
555
556 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
557
558
559 rb_bufsz = order_base_2(ring->ring_size / 4);
560 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
561 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
562#ifdef __BIG_ENDIAN
563 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
564 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
565 RPTR_WRITEBACK_SWAP_ENABLE, 1);
566#endif
567 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
568
569
570 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR), 0);
571 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_HI), 0);
572 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), 0);
573 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), 0);
574
575
576 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
577 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
578 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
579 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
580
581 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
582
583 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
584 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
585
586 ring->wptr = 0;
587
588
589 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
590
591 if (!amdgpu_sriov_vf(adev)) {
592 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
593 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
594 }
595
596 doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
597 doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
598
599 if (ring->use_doorbell) {
600 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
601 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
602 OFFSET, ring->doorbell_index);
603 } else {
604 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
605 }
606 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
607 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
608 if (adev->flags & AMD_IS_APU)
609 nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
610 else
611 nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
612
613 if (amdgpu_sriov_vf(adev))
614 sdma_v4_0_ring_set_wptr(ring);
615
616
617 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
618
619
620 temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
621 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
622 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), temp);
623
624 if (!amdgpu_sriov_vf(adev)) {
625
626 temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
627 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
628 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
629 }
630
631
632 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
633 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
634
635 ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
636 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
637#ifdef __BIG_ENDIAN
638 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
639#endif
640
641 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
642
643 ring->ready = true;
644
645 if (amdgpu_sriov_vf(adev)) {
646 sdma_v4_0_ctx_switch_enable(adev, true);
647 sdma_v4_0_enable(adev, true);
648 }
649
650 r = amdgpu_ring_test_ring(ring);
651 if (r) {
652 ring->ready = false;
653 return r;
654 }
655
656 if (adev->mman.buffer_funcs_ring == ring)
657 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
658 }
659
660 return 0;
661}
662
663static void
664sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
665{
666 uint32_t def, data;
667
668 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
669
670 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
671 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
672
673 if (data != def)
674 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
675 } else {
676
677 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
678 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
679 if (data != def)
680 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
681 }
682}
683
684static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
685{
686 uint32_t def, data;
687
688
689 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
690 data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
691 if (data != def)
692 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
693
694
695 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
696 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
697 if (data != def)
698 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
699
700
701 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
702 data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
703 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
704
705 data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
706 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
707 if(data != def)
708 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
709}
710
711static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
712{
713 if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
714 return;
715
716 switch (adev->asic_type) {
717 case CHIP_RAVEN:
718 sdma_v4_1_init_power_gating(adev);
719 sdma_v4_1_update_power_gating(adev, true);
720 break;
721 default:
722 break;
723 }
724}
725
726
727
728
729
730
731
732
733
734static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
735{
736 sdma_v4_0_init_pg(adev);
737
738 return 0;
739}
740
741
742
743
744
745
746
747
748
749static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
750{
751 const struct sdma_firmware_header_v1_0 *hdr;
752 const __le32 *fw_data;
753 u32 fw_size;
754 u32 digest_size = 0;
755 int i, j;
756
757
758 sdma_v4_0_enable(adev, false);
759
760 for (i = 0; i < adev->sdma.num_instances; i++) {
761 uint16_t version_major;
762 uint16_t version_minor;
763 if (!adev->sdma.instance[i].fw)
764 return -EINVAL;
765
766 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
767 amdgpu_ucode_print_sdma_hdr(&hdr->header);
768 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
769
770 version_major = le16_to_cpu(hdr->header.header_version_major);
771 version_minor = le16_to_cpu(hdr->header.header_version_minor);
772
773 if (version_major == 1 && version_minor >= 1) {
774 const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr;
775 digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size);
776 }
777
778 fw_size -= digest_size;
779
780 fw_data = (const __le32 *)
781 (adev->sdma.instance[i].fw->data +
782 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
783
784 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
785
786
787 for (j = 0; j < fw_size; j++)
788 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
789
790 WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
791 }
792
793 return 0;
794}
795
796
797
798
799
800
801
802
803
804static int sdma_v4_0_start(struct amdgpu_device *adev)
805{
806 int r = 0;
807
808 if (amdgpu_sriov_vf(adev)) {
809 sdma_v4_0_ctx_switch_enable(adev, false);
810 sdma_v4_0_enable(adev, false);
811
812
813 r = sdma_v4_0_gfx_resume(adev);
814 return r;
815 }
816
817 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
818 r = sdma_v4_0_load_microcode(adev);
819 if (r)
820 return r;
821 }
822
823
824 sdma_v4_0_enable(adev, true);
825
826 sdma_v4_0_ctx_switch_enable(adev, true);
827
828
829 r = sdma_v4_0_gfx_resume(adev);
830 if (r)
831 return r;
832 r = sdma_v4_0_rlc_resume(adev);
833
834 return r;
835}
836
837
838
839
840
841
842
843
844
845
846static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
847{
848 struct amdgpu_device *adev = ring->adev;
849 unsigned i;
850 unsigned index;
851 int r;
852 u32 tmp;
853 u64 gpu_addr;
854
855 r = amdgpu_wb_get(adev, &index);
856 if (r) {
857 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
858 return r;
859 }
860
861 gpu_addr = adev->wb.gpu_addr + (index * 4);
862 tmp = 0xCAFEDEAD;
863 adev->wb.wb[index] = cpu_to_le32(tmp);
864
865 r = amdgpu_ring_alloc(ring, 5);
866 if (r) {
867 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
868 amdgpu_wb_free(adev, index);
869 return r;
870 }
871
872 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
873 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
874 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
875 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
876 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
877 amdgpu_ring_write(ring, 0xDEADBEEF);
878 amdgpu_ring_commit(ring);
879
880 for (i = 0; i < adev->usec_timeout; i++) {
881 tmp = le32_to_cpu(adev->wb.wb[index]);
882 if (tmp == 0xDEADBEEF)
883 break;
884 DRM_UDELAY(1);
885 }
886
887 if (i < adev->usec_timeout) {
888 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
889 } else {
890 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
891 ring->idx, tmp);
892 r = -EINVAL;
893 }
894 amdgpu_wb_free(adev, index);
895
896 return r;
897}
898
899
900
901
902
903
904
905
906
907static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
908{
909 struct amdgpu_device *adev = ring->adev;
910 struct amdgpu_ib ib;
911 struct dma_fence *f = NULL;
912 unsigned index;
913 long r;
914 u32 tmp = 0;
915 u64 gpu_addr;
916
917 r = amdgpu_wb_get(adev, &index);
918 if (r) {
919 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
920 return r;
921 }
922
923 gpu_addr = adev->wb.gpu_addr + (index * 4);
924 tmp = 0xCAFEDEAD;
925 adev->wb.wb[index] = cpu_to_le32(tmp);
926 memset(&ib, 0, sizeof(ib));
927 r = amdgpu_ib_get(adev, NULL, 256, &ib);
928 if (r) {
929 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
930 goto err0;
931 }
932
933 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
934 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
935 ib.ptr[1] = lower_32_bits(gpu_addr);
936 ib.ptr[2] = upper_32_bits(gpu_addr);
937 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
938 ib.ptr[4] = 0xDEADBEEF;
939 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
940 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
941 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
942 ib.length_dw = 8;
943
944 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
945 if (r)
946 goto err1;
947
948 r = dma_fence_wait_timeout(f, false, timeout);
949 if (r == 0) {
950 DRM_ERROR("amdgpu: IB test timed out\n");
951 r = -ETIMEDOUT;
952 goto err1;
953 } else if (r < 0) {
954 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
955 goto err1;
956 }
957 tmp = le32_to_cpu(adev->wb.wb[index]);
958 if (tmp == 0xDEADBEEF) {
959 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
960 r = 0;
961 } else {
962 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
963 r = -EINVAL;
964 }
965err1:
966 amdgpu_ib_free(adev, &ib, NULL);
967 dma_fence_put(f);
968err0:
969 amdgpu_wb_free(adev, index);
970 return r;
971}
972
973
974
975
976
977
978
979
980
981
982
983
984static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
985 uint64_t pe, uint64_t src,
986 unsigned count)
987{
988 unsigned bytes = count * 8;
989
990 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
991 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
992 ib->ptr[ib->length_dw++] = bytes - 1;
993 ib->ptr[ib->length_dw++] = 0;
994 ib->ptr[ib->length_dw++] = lower_32_bits(src);
995 ib->ptr[ib->length_dw++] = upper_32_bits(src);
996 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
997 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
998
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1014 uint64_t value, unsigned count,
1015 uint32_t incr)
1016{
1017 unsigned ndw = count * 2;
1018
1019 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1020 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1021 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1022 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1023 ib->ptr[ib->length_dw++] = ndw - 1;
1024 for (; ndw > 0; ndw -= 2) {
1025 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1026 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1027 value += incr;
1028 }
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1044 uint64_t pe,
1045 uint64_t addr, unsigned count,
1046 uint32_t incr, uint64_t flags)
1047{
1048
1049 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1050 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1051 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1052 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
1053 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1054 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
1055 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1056 ib->ptr[ib->length_dw++] = incr;
1057 ib->ptr[ib->length_dw++] = 0;
1058 ib->ptr[ib->length_dw++] = count - 1;
1059}
1060
1061
1062
1063
1064
1065
1066
1067static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1068{
1069 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1070 u32 pad_count;
1071 int i;
1072
1073 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1074 for (i = 0; i < pad_count; i++)
1075 if (sdma && sdma->burst_nop && (i == 0))
1076 ib->ptr[ib->length_dw++] =
1077 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1078 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1079 else
1080 ib->ptr[ib->length_dw++] =
1081 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1093{
1094 uint32_t seq = ring->fence_drv.sync_seq;
1095 uint64_t addr = ring->fence_drv.gpu_addr;
1096
1097
1098 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1099 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1100 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) |
1101 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1102 amdgpu_ring_write(ring, addr & 0xfffffffc);
1103 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1104 amdgpu_ring_write(ring, seq);
1105 amdgpu_ring_write(ring, 0xfffffff);
1106 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1107 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4));
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1121 unsigned vm_id, uint64_t pd_addr)
1122{
1123 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1124 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1125 unsigned eng = ring->vm_inv_eng;
1126
1127 pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
1128 pd_addr |= AMDGPU_PTE_VALID;
1129
1130 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1131 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1132 amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
1133 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
1134
1135 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1136 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1137 amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
1138 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
1139
1140
1141 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1142 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1143 amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
1144 amdgpu_ring_write(ring, req);
1145
1146
1147 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1148 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1149 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
1150 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
1151 amdgpu_ring_write(ring, 0);
1152 amdgpu_ring_write(ring, 1 << vm_id);
1153 amdgpu_ring_write(ring, 1 << vm_id);
1154 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1155 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1156}
1157
1158static int sdma_v4_0_early_init(void *handle)
1159{
1160 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1161
1162 if (adev->asic_type == CHIP_RAVEN)
1163 adev->sdma.num_instances = 1;
1164 else
1165 adev->sdma.num_instances = 2;
1166
1167 sdma_v4_0_set_ring_funcs(adev);
1168 sdma_v4_0_set_buffer_funcs(adev);
1169 sdma_v4_0_set_vm_pte_funcs(adev);
1170 sdma_v4_0_set_irq_funcs(adev);
1171
1172 return 0;
1173}
1174
1175
1176static int sdma_v4_0_sw_init(void *handle)
1177{
1178 struct amdgpu_ring *ring;
1179 int r, i;
1180 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1181
1182
1183 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA0, 224,
1184 &adev->sdma.trap_irq);
1185 if (r)
1186 return r;
1187
1188
1189 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_SDMA1, 224,
1190 &adev->sdma.trap_irq);
1191 if (r)
1192 return r;
1193
1194 r = sdma_v4_0_init_microcode(adev);
1195 if (r) {
1196 DRM_ERROR("Failed to load sdma firmware!\n");
1197 return r;
1198 }
1199
1200 for (i = 0; i < adev->sdma.num_instances; i++) {
1201 ring = &adev->sdma.instance[i].ring;
1202 ring->ring_obj = NULL;
1203 ring->use_doorbell = true;
1204
1205 DRM_INFO("use_doorbell being set to: [%s]\n",
1206 ring->use_doorbell?"true":"false");
1207
1208 ring->doorbell_index = (i == 0) ?
1209 (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1)
1210 : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1);
1211
1212 sprintf(ring->name, "sdma%d", i);
1213 r = amdgpu_ring_init(adev, ring, 1024,
1214 &adev->sdma.trap_irq,
1215 (i == 0) ?
1216 AMDGPU_SDMA_IRQ_TRAP0 :
1217 AMDGPU_SDMA_IRQ_TRAP1);
1218 if (r)
1219 return r;
1220 }
1221
1222 return r;
1223}
1224
1225static int sdma_v4_0_sw_fini(void *handle)
1226{
1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1228 int i;
1229
1230 for (i = 0; i < adev->sdma.num_instances; i++)
1231 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1232
1233 return 0;
1234}
1235
1236static int sdma_v4_0_hw_init(void *handle)
1237{
1238 int r;
1239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240
1241 sdma_v4_0_init_golden_registers(adev);
1242
1243 r = sdma_v4_0_start(adev);
1244
1245 return r;
1246}
1247
1248static int sdma_v4_0_hw_fini(void *handle)
1249{
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251
1252 if (amdgpu_sriov_vf(adev))
1253 return 0;
1254
1255 sdma_v4_0_ctx_switch_enable(adev, false);
1256 sdma_v4_0_enable(adev, false);
1257
1258 return 0;
1259}
1260
1261static int sdma_v4_0_suspend(void *handle)
1262{
1263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1264
1265 return sdma_v4_0_hw_fini(adev);
1266}
1267
1268static int sdma_v4_0_resume(void *handle)
1269{
1270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1271
1272 return sdma_v4_0_hw_init(adev);
1273}
1274
1275static bool sdma_v4_0_is_idle(void *handle)
1276{
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278 u32 i;
1279
1280 for (i = 0; i < adev->sdma.num_instances; i++) {
1281 u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
1282
1283 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1284 return false;
1285 }
1286
1287 return true;
1288}
1289
1290static int sdma_v4_0_wait_for_idle(void *handle)
1291{
1292 unsigned i;
1293 u32 sdma0, sdma1;
1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295
1296 for (i = 0; i < adev->usec_timeout; i++) {
1297 sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
1298 sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
1299
1300 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1301 return 0;
1302 udelay(1);
1303 }
1304 return -ETIMEDOUT;
1305}
1306
1307static int sdma_v4_0_soft_reset(void *handle)
1308{
1309
1310
1311 return 0;
1312}
1313
1314static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
1315 struct amdgpu_irq_src *source,
1316 unsigned type,
1317 enum amdgpu_interrupt_state state)
1318{
1319 u32 sdma_cntl;
1320
1321 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
1322 sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
1323 sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
1324
1325 sdma_cntl = RREG32(reg_offset);
1326 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1327 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1328 WREG32(reg_offset, sdma_cntl);
1329
1330 return 0;
1331}
1332
1333static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1334 struct amdgpu_irq_src *source,
1335 struct amdgpu_iv_entry *entry)
1336{
1337 DRM_DEBUG("IH: SDMA trap\n");
1338 switch (entry->client_id) {
1339 case AMDGPU_IH_CLIENTID_SDMA0:
1340 switch (entry->ring_id) {
1341 case 0:
1342 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1343 break;
1344 case 1:
1345
1346 break;
1347 case 2:
1348
1349 break;
1350 case 3:
1351
1352 break;
1353 }
1354 break;
1355 case AMDGPU_IH_CLIENTID_SDMA1:
1356 switch (entry->ring_id) {
1357 case 0:
1358 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1359 break;
1360 case 1:
1361
1362 break;
1363 case 2:
1364
1365 break;
1366 case 3:
1367
1368 break;
1369 }
1370 break;
1371 }
1372 return 0;
1373}
1374
1375static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1376 struct amdgpu_irq_src *source,
1377 struct amdgpu_iv_entry *entry)
1378{
1379 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1380 schedule_work(&adev->reset_work);
1381 return 0;
1382}
1383
1384
1385static void sdma_v4_0_update_medium_grain_clock_gating(
1386 struct amdgpu_device *adev,
1387 bool enable)
1388{
1389 uint32_t data, def;
1390
1391 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1392
1393 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1394 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1395 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1396 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1397 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1398 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1399 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1400 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1401 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1402 if (def != data)
1403 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1404
1405 if (adev->asic_type == CHIP_VEGA10) {
1406 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1407 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1408 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1409 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1410 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1411 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1412 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1413 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1414 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1415 if (def != data)
1416 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
1417 }
1418 } else {
1419
1420 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1421 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1422 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1423 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1424 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1425 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1426 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1427 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1428 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1429
1430 if (def != data)
1431 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1432
1433 if (adev->asic_type == CHIP_VEGA10) {
1434 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1435 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1436 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1437 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1438 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1439 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1440 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1441 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1442 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1443 if (def != data)
1444 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
1445 }
1446 }
1447}
1448
1449
1450static void sdma_v4_0_update_medium_grain_light_sleep(
1451 struct amdgpu_device *adev,
1452 bool enable)
1453{
1454 uint32_t data, def;
1455
1456 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1457
1458 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1459 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1460 if (def != data)
1461 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1462
1463
1464 if (adev->asic_type == CHIP_VEGA10) {
1465 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1466 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1467 if (def != data)
1468 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
1469 }
1470 } else {
1471
1472 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1473 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1474 if (def != data)
1475 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1476
1477
1478 if (adev->asic_type == CHIP_VEGA10) {
1479 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1480 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1481 if (def != data)
1482 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
1483 }
1484 }
1485}
1486
1487static int sdma_v4_0_set_clockgating_state(void *handle,
1488 enum amd_clockgating_state state)
1489{
1490 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1491
1492 if (amdgpu_sriov_vf(adev))
1493 return 0;
1494
1495 switch (adev->asic_type) {
1496 case CHIP_VEGA10:
1497 case CHIP_RAVEN:
1498 sdma_v4_0_update_medium_grain_clock_gating(adev,
1499 state == AMD_CG_STATE_GATE ? true : false);
1500 sdma_v4_0_update_medium_grain_light_sleep(adev,
1501 state == AMD_CG_STATE_GATE ? true : false);
1502 break;
1503 default:
1504 break;
1505 }
1506 return 0;
1507}
1508
1509static int sdma_v4_0_set_powergating_state(void *handle,
1510 enum amd_powergating_state state)
1511{
1512 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1513
1514 switch (adev->asic_type) {
1515 case CHIP_RAVEN:
1516 sdma_v4_1_update_power_gating(adev,
1517 state == AMD_PG_STATE_GATE ? true : false);
1518 break;
1519 default:
1520 break;
1521 }
1522
1523 return 0;
1524}
1525
1526static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
1527{
1528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1529 int data;
1530
1531 if (amdgpu_sriov_vf(adev))
1532 *flags = 0;
1533
1534
1535 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1536 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1537 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1538
1539
1540 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1541 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1542 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1543}
1544
1545const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
1546 .name = "sdma_v4_0",
1547 .early_init = sdma_v4_0_early_init,
1548 .late_init = NULL,
1549 .sw_init = sdma_v4_0_sw_init,
1550 .sw_fini = sdma_v4_0_sw_fini,
1551 .hw_init = sdma_v4_0_hw_init,
1552 .hw_fini = sdma_v4_0_hw_fini,
1553 .suspend = sdma_v4_0_suspend,
1554 .resume = sdma_v4_0_resume,
1555 .is_idle = sdma_v4_0_is_idle,
1556 .wait_for_idle = sdma_v4_0_wait_for_idle,
1557 .soft_reset = sdma_v4_0_soft_reset,
1558 .set_clockgating_state = sdma_v4_0_set_clockgating_state,
1559 .set_powergating_state = sdma_v4_0_set_powergating_state,
1560 .get_clockgating_state = sdma_v4_0_get_clockgating_state,
1561};
1562
1563static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
1564 .type = AMDGPU_RING_TYPE_SDMA,
1565 .align_mask = 0xf,
1566 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1567 .support_64bit_ptrs = true,
1568 .vmhub = AMDGPU_MMHUB,
1569 .get_rptr = sdma_v4_0_ring_get_rptr,
1570 .get_wptr = sdma_v4_0_ring_get_wptr,
1571 .set_wptr = sdma_v4_0_ring_set_wptr,
1572 .emit_frame_size =
1573 6 +
1574 3 +
1575 6 +
1576 18 +
1577 10 + 10 + 10,
1578 .emit_ib_size = 7 + 6,
1579 .emit_ib = sdma_v4_0_ring_emit_ib,
1580 .emit_fence = sdma_v4_0_ring_emit_fence,
1581 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
1582 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
1583 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
1584 .emit_hdp_invalidate = sdma_v4_0_ring_emit_hdp_invalidate,
1585 .test_ring = sdma_v4_0_ring_test_ring,
1586 .test_ib = sdma_v4_0_ring_test_ib,
1587 .insert_nop = sdma_v4_0_ring_insert_nop,
1588 .pad_ib = sdma_v4_0_ring_pad_ib,
1589};
1590
1591static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1592{
1593 int i;
1594
1595 for (i = 0; i < adev->sdma.num_instances; i++)
1596 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
1597}
1598
1599static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
1600 .set = sdma_v4_0_set_trap_irq_state,
1601 .process = sdma_v4_0_process_trap_irq,
1602};
1603
1604static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
1605 .process = sdma_v4_0_process_illegal_inst_irq,
1606};
1607
1608static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1609{
1610 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1611 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
1612 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
1628 uint64_t src_offset,
1629 uint64_t dst_offset,
1630 uint32_t byte_count)
1631{
1632 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1633 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1634 ib->ptr[ib->length_dw++] = byte_count - 1;
1635 ib->ptr[ib->length_dw++] = 0;
1636 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1637 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1638 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1639 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
1653 uint32_t src_data,
1654 uint64_t dst_offset,
1655 uint32_t byte_count)
1656{
1657 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1658 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1659 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1660 ib->ptr[ib->length_dw++] = src_data;
1661 ib->ptr[ib->length_dw++] = byte_count - 1;
1662}
1663
1664static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
1665 .copy_max_bytes = 0x400000,
1666 .copy_num_dw = 7,
1667 .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
1668
1669 .fill_max_bytes = 0x400000,
1670 .fill_num_dw = 5,
1671 .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
1672};
1673
1674static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
1675{
1676 if (adev->mman.buffer_funcs == NULL) {
1677 adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
1678 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1679 }
1680}
1681
1682static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
1683 .copy_pte = sdma_v4_0_vm_copy_pte,
1684 .write_pte = sdma_v4_0_vm_write_pte,
1685 .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
1686};
1687
1688static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1689{
1690 unsigned i;
1691
1692 if (adev->vm_manager.vm_pte_funcs == NULL) {
1693 adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
1694 for (i = 0; i < adev->sdma.num_instances; i++)
1695 adev->vm_manager.vm_pte_rings[i] =
1696 &adev->sdma.instance[i].ring;
1697
1698 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1699 }
1700}
1701
1702const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
1703 .type = AMD_IP_BLOCK_TYPE_SDMA,
1704 .major = 4,
1705 .minor = 0,
1706 .rev = 0,
1707 .funcs = &sdma_v4_0_ip_funcs,
1708};
1709