1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29#include "vi.h"
30#include "vid.h"
31
32#include "oss/oss_3_0_d.h"
33#include "oss/oss_3_0_sh_mask.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_enum.h"
40#include "gca/gfx_8_0_sh_mask.h"
41
42#include "bif/bif_5_0_d.h"
43#include "bif/bif_5_0_sh_mask.h"
44
45#include "tonga_sdma_pkt_open.h"
46
47static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
48static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
49static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
50static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
52MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
58MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
59MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
61MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
63MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
64MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
65
66
67static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
68{
69 SDMA0_REGISTER_OFFSET,
70 SDMA1_REGISTER_OFFSET
71};
72
73static const u32 golden_settings_tonga_a11[] =
74{
75 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
76 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
77 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
78 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
79 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
80 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
81 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
82 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
83 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
84 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
85};
86
87static const u32 tonga_mgcg_cgcg_init[] =
88{
89 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
90 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
91};
92
93static const u32 golden_settings_fiji_a10[] =
94{
95 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
96 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
97 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
98 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
99 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
100 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
101 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
102 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
103};
104
105static const u32 fiji_mgcg_cgcg_init[] =
106{
107 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
108 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
109};
110
111static const u32 golden_settings_polaris11_a11[] =
112{
113 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
114 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
115 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
116 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
117 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
118 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
119 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
120 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
121 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
122 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
123};
124
125static const u32 golden_settings_polaris10_a11[] =
126{
127 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
128 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
129 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
130 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
132 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
133 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
134 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
135 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
136 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
137};
138
139static const u32 cz_golden_settings_a11[] =
140{
141 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
142 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
143 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
144 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
145 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
146 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
147 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
148 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
149 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
150 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
151 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
152 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
153};
154
155static const u32 cz_mgcg_cgcg_init[] =
156{
157 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
158 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
159};
160
161static const u32 stoney_golden_settings_a11[] =
162{
163 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
164 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
165 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
166 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
167};
168
169static const u32 stoney_mgcg_cgcg_init[] =
170{
171 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
172};
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
192{
193 switch (adev->asic_type) {
194 case CHIP_FIJI:
195 amdgpu_program_register_sequence(adev,
196 fiji_mgcg_cgcg_init,
197 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
198 amdgpu_program_register_sequence(adev,
199 golden_settings_fiji_a10,
200 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
201 break;
202 case CHIP_TONGA:
203 amdgpu_program_register_sequence(adev,
204 tonga_mgcg_cgcg_init,
205 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
206 amdgpu_program_register_sequence(adev,
207 golden_settings_tonga_a11,
208 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
209 break;
210 case CHIP_POLARIS11:
211 case CHIP_POLARIS12:
212 amdgpu_program_register_sequence(adev,
213 golden_settings_polaris11_a11,
214 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
215 break;
216 case CHIP_POLARIS10:
217 amdgpu_program_register_sequence(adev,
218 golden_settings_polaris10_a11,
219 (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
220 break;
221 case CHIP_CARRIZO:
222 amdgpu_program_register_sequence(adev,
223 cz_mgcg_cgcg_init,
224 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
225 amdgpu_program_register_sequence(adev,
226 cz_golden_settings_a11,
227 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
228 break;
229 case CHIP_STONEY:
230 amdgpu_program_register_sequence(adev,
231 stoney_mgcg_cgcg_init,
232 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
233 amdgpu_program_register_sequence(adev,
234 stoney_golden_settings_a11,
235 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
236 break;
237 default:
238 break;
239 }
240}
241
242static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
243{
244 int i;
245 for (i = 0; i < adev->sdma.num_instances; i++) {
246 release_firmware(adev->sdma.instance[i].fw);
247 adev->sdma.instance[i].fw = NULL;
248 }
249}
250
251
252
253
254
255
256
257
258
259
260static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
261{
262 const char *chip_name;
263 char fw_name[30];
264 int err = 0, i;
265 struct amdgpu_firmware_info *info = NULL;
266 const struct common_firmware_header *header = NULL;
267 const struct sdma_firmware_header_v1_0 *hdr;
268
269 DRM_DEBUG("\n");
270
271 switch (adev->asic_type) {
272 case CHIP_TONGA:
273 chip_name = "tonga";
274 break;
275 case CHIP_FIJI:
276 chip_name = "fiji";
277 break;
278 case CHIP_POLARIS11:
279 chip_name = "polaris11";
280 break;
281 case CHIP_POLARIS10:
282 chip_name = "polaris10";
283 break;
284 case CHIP_POLARIS12:
285 chip_name = "polaris12";
286 break;
287 case CHIP_CARRIZO:
288 chip_name = "carrizo";
289 break;
290 case CHIP_STONEY:
291 chip_name = "stoney";
292 break;
293 default: BUG();
294 }
295
296 for (i = 0; i < adev->sdma.num_instances; i++) {
297 if (i == 0)
298 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
299 else
300 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
301 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
302 if (err)
303 goto out;
304 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
305 if (err)
306 goto out;
307 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
308 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
309 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
310 if (adev->sdma.instance[i].feature_version >= 20)
311 adev->sdma.instance[i].burst_nop = true;
312
313 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
314 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
315 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
316 info->fw = adev->sdma.instance[i].fw;
317 header = (const struct common_firmware_header *)info->fw->data;
318 adev->firmware.fw_size +=
319 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
320 }
321 }
322out:
323 if (err) {
324 pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
325 for (i = 0; i < adev->sdma.num_instances; i++) {
326 release_firmware(adev->sdma.instance[i].fw);
327 adev->sdma.instance[i].fw = NULL;
328 }
329 }
330 return err;
331}
332
333
334
335
336
337
338
339
340static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
341{
342
343 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
344}
345
346
347
348
349
350
351
352
353static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
354{
355 struct amdgpu_device *adev = ring->adev;
356 u32 wptr;
357
358 if (ring->use_doorbell) {
359
360 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
361 } else {
362 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
363
364 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
365 }
366
367 return wptr;
368}
369
370
371
372
373
374
375
376
377static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
378{
379 struct amdgpu_device *adev = ring->adev;
380
381 if (ring->use_doorbell) {
382
383 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
384 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
385 } else {
386 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
387
388 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
389 }
390}
391
392static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
393{
394 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
395 int i;
396
397 for (i = 0; i < count; i++)
398 if (sdma && sdma->burst_nop && (i == 0))
399 amdgpu_ring_write(ring, ring->funcs->nop |
400 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
401 else
402 amdgpu_ring_write(ring, ring->funcs->nop);
403}
404
405
406
407
408
409
410
411
412
413static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
414 struct amdgpu_ib *ib,
415 unsigned vm_id, bool ctx_switch)
416{
417 u32 vmid = vm_id & 0xf;
418
419
420 sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
421
422 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
423 SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
424
425 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
426 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
427 amdgpu_ring_write(ring, ib->length_dw);
428 amdgpu_ring_write(ring, 0);
429 amdgpu_ring_write(ring, 0);
430
431}
432
433
434
435
436
437
438
439
440static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
441{
442 u32 ref_and_mask = 0;
443
444 if (ring == &ring->adev->sdma.instance[0].ring)
445 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
446 else
447 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
448
449 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
450 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
451 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
452 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
453 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
454 amdgpu_ring_write(ring, ref_and_mask);
455 amdgpu_ring_write(ring, ref_and_mask);
456 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
457 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
458}
459
460static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
461{
462 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
463 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
464 amdgpu_ring_write(ring, mmHDP_DEBUG0);
465 amdgpu_ring_write(ring, 1);
466}
467
468
469
470
471
472
473
474
475
476
477
478static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
479 unsigned flags)
480{
481 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
482
483 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
484 amdgpu_ring_write(ring, lower_32_bits(addr));
485 amdgpu_ring_write(ring, upper_32_bits(addr));
486 amdgpu_ring_write(ring, lower_32_bits(seq));
487
488
489 if (write64bit) {
490 addr += 4;
491 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
492 amdgpu_ring_write(ring, lower_32_bits(addr));
493 amdgpu_ring_write(ring, upper_32_bits(addr));
494 amdgpu_ring_write(ring, upper_32_bits(seq));
495 }
496
497
498 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
499 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
500}
501
502
503
504
505
506
507
508
509static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
510{
511 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
512 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
513 u32 rb_cntl, ib_cntl;
514 int i;
515
516 if ((adev->mman.buffer_funcs_ring == sdma0) ||
517 (adev->mman.buffer_funcs_ring == sdma1))
518 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
519
520 for (i = 0; i < adev->sdma.num_instances; i++) {
521 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
522 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
523 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
524 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
525 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
526 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
527 }
528 sdma0->ready = false;
529 sdma1->ready = false;
530}
531
532
533
534
535
536
537
538
539static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
540{
541
542}
543
544
545
546
547
548
549
550
551
552static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
553{
554 u32 f32_cntl;
555 int i;
556
557 for (i = 0; i < adev->sdma.num_instances; i++) {
558 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
559 if (enable)
560 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
561 AUTO_CTXSW_ENABLE, 1);
562 else
563 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
564 AUTO_CTXSW_ENABLE, 0);
565 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
566 }
567}
568
569
570
571
572
573
574
575
576
577static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
578{
579 u32 f32_cntl;
580 int i;
581
582 if (!enable) {
583 sdma_v3_0_gfx_stop(adev);
584 sdma_v3_0_rlc_stop(adev);
585 }
586
587 for (i = 0; i < adev->sdma.num_instances; i++) {
588 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
589 if (enable)
590 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
591 else
592 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
593 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
594 }
595}
596
597
598
599
600
601
602
603
604
605static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
606{
607 struct amdgpu_ring *ring;
608 u32 rb_cntl, ib_cntl;
609 u32 rb_bufsz;
610 u32 wb_offset;
611 u32 doorbell;
612 int i, j, r;
613
614 for (i = 0; i < adev->sdma.num_instances; i++) {
615 ring = &adev->sdma.instance[i].ring;
616 amdgpu_ring_clear_ring(ring);
617 wb_offset = (ring->rptr_offs * 4);
618
619 mutex_lock(&adev->srbm_mutex);
620 for (j = 0; j < 16; j++) {
621 vi_srbm_select(adev, 0, 0, 0, j);
622
623 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
624 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
625 }
626 vi_srbm_select(adev, 0, 0, 0, 0);
627 mutex_unlock(&adev->srbm_mutex);
628
629 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
630 adev->gfx.config.gb_addr_config & 0x70);
631
632 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
633
634
635 rb_bufsz = order_base_2(ring->ring_size / 4);
636 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
637 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
638#ifdef __BIG_ENDIAN
639 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
640 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
641 RPTR_WRITEBACK_SWAP_ENABLE, 1);
642#endif
643 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
644
645
646 ring->wptr = 0;
647 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
648 sdma_v3_0_ring_set_wptr(ring);
649 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
650 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
651
652
653 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
654 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
655 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
656 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
657
658 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
659
660 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
661 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
662
663 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
664
665 if (ring->use_doorbell) {
666 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
667 OFFSET, ring->doorbell_index);
668 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
669 } else {
670 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
671 }
672 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
673
674
675 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
676 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
677
678 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
679 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
680#ifdef __BIG_ENDIAN
681 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
682#endif
683
684 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
685
686 ring->ready = true;
687 }
688
689
690 sdma_v3_0_enable(adev, true);
691
692 sdma_v3_0_ctx_switch_enable(adev, true);
693
694 for (i = 0; i < adev->sdma.num_instances; i++) {
695 ring = &adev->sdma.instance[i].ring;
696 r = amdgpu_ring_test_ring(ring);
697 if (r) {
698 ring->ready = false;
699 return r;
700 }
701
702 if (adev->mman.buffer_funcs_ring == ring)
703 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
704 }
705
706 return 0;
707}
708
709
710
711
712
713
714
715
716
717static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
718{
719
720 return 0;
721}
722
723
724
725
726
727
728
729
730
731static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
732{
733 const struct sdma_firmware_header_v1_0 *hdr;
734 const __le32 *fw_data;
735 u32 fw_size;
736 int i, j;
737
738
739 sdma_v3_0_enable(adev, false);
740
741 for (i = 0; i < adev->sdma.num_instances; i++) {
742 if (!adev->sdma.instance[i].fw)
743 return -EINVAL;
744 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
745 amdgpu_ucode_print_sdma_hdr(&hdr->header);
746 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
747 fw_data = (const __le32 *)
748 (adev->sdma.instance[i].fw->data +
749 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
750 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
751 for (j = 0; j < fw_size; j++)
752 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
753 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
754 }
755
756 return 0;
757}
758
759
760
761
762
763
764
765
766
767static int sdma_v3_0_start(struct amdgpu_device *adev)
768{
769 int r, i;
770
771 if (!adev->pp_enabled) {
772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
773 r = sdma_v3_0_load_microcode(adev);
774 if (r)
775 return r;
776 } else {
777 for (i = 0; i < adev->sdma.num_instances; i++) {
778 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
779 (i == 0) ?
780 AMDGPU_UCODE_ID_SDMA0 :
781 AMDGPU_UCODE_ID_SDMA1);
782 if (r)
783 return -EINVAL;
784 }
785 }
786 }
787
788
789 sdma_v3_0_ctx_switch_enable(adev, false);
790 sdma_v3_0_enable(adev, false);
791
792
793 r = sdma_v3_0_gfx_resume(adev);
794 if (r)
795 return r;
796 r = sdma_v3_0_rlc_resume(adev);
797 if (r)
798 return r;
799
800 return 0;
801}
802
803
804
805
806
807
808
809
810
811
812static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
813{
814 struct amdgpu_device *adev = ring->adev;
815 unsigned i;
816 unsigned index;
817 int r;
818 u32 tmp;
819 u64 gpu_addr;
820
821 r = amdgpu_wb_get(adev, &index);
822 if (r) {
823 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
824 return r;
825 }
826
827 gpu_addr = adev->wb.gpu_addr + (index * 4);
828 tmp = 0xCAFEDEAD;
829 adev->wb.wb[index] = cpu_to_le32(tmp);
830
831 r = amdgpu_ring_alloc(ring, 5);
832 if (r) {
833 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
834 amdgpu_wb_free(adev, index);
835 return r;
836 }
837
838 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
839 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
840 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
841 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
842 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
843 amdgpu_ring_write(ring, 0xDEADBEEF);
844 amdgpu_ring_commit(ring);
845
846 for (i = 0; i < adev->usec_timeout; i++) {
847 tmp = le32_to_cpu(adev->wb.wb[index]);
848 if (tmp == 0xDEADBEEF)
849 break;
850 DRM_UDELAY(1);
851 }
852
853 if (i < adev->usec_timeout) {
854 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
855 } else {
856 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
857 ring->idx, tmp);
858 r = -EINVAL;
859 }
860 amdgpu_wb_free(adev, index);
861
862 return r;
863}
864
865
866
867
868
869
870
871
872
873static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
874{
875 struct amdgpu_device *adev = ring->adev;
876 struct amdgpu_ib ib;
877 struct dma_fence *f = NULL;
878 unsigned index;
879 u32 tmp = 0;
880 u64 gpu_addr;
881 long r;
882
883 r = amdgpu_wb_get(adev, &index);
884 if (r) {
885 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
886 return r;
887 }
888
889 gpu_addr = adev->wb.gpu_addr + (index * 4);
890 tmp = 0xCAFEDEAD;
891 adev->wb.wb[index] = cpu_to_le32(tmp);
892 memset(&ib, 0, sizeof(ib));
893 r = amdgpu_ib_get(adev, NULL, 256, &ib);
894 if (r) {
895 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
896 goto err0;
897 }
898
899 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
900 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
901 ib.ptr[1] = lower_32_bits(gpu_addr);
902 ib.ptr[2] = upper_32_bits(gpu_addr);
903 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
904 ib.ptr[4] = 0xDEADBEEF;
905 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
906 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
907 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
908 ib.length_dw = 8;
909
910 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
911 if (r)
912 goto err1;
913
914 r = dma_fence_wait_timeout(f, false, timeout);
915 if (r == 0) {
916 DRM_ERROR("amdgpu: IB test timed out\n");
917 r = -ETIMEDOUT;
918 goto err1;
919 } else if (r < 0) {
920 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
921 goto err1;
922 }
923 tmp = le32_to_cpu(adev->wb.wb[index]);
924 if (tmp == 0xDEADBEEF) {
925 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
926 r = 0;
927 } else {
928 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
929 r = -EINVAL;
930 }
931err1:
932 amdgpu_ib_free(adev, &ib, NULL);
933 dma_fence_put(f);
934err0:
935 amdgpu_wb_free(adev, index);
936 return r;
937}
938
939
940
941
942
943
944
945
946
947
948
949static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
950 uint64_t pe, uint64_t src,
951 unsigned count)
952{
953 unsigned bytes = count * 8;
954
955 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
956 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
957 ib->ptr[ib->length_dw++] = bytes;
958 ib->ptr[ib->length_dw++] = 0;
959 ib->ptr[ib->length_dw++] = lower_32_bits(src);
960 ib->ptr[ib->length_dw++] = upper_32_bits(src);
961 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
962 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
963}
964
965
966
967
968
969
970
971
972
973
974
975
976static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
977 uint64_t value, unsigned count,
978 uint32_t incr)
979{
980 unsigned ndw = count * 2;
981
982 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
983 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
984 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
985 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
986 ib->ptr[ib->length_dw++] = ndw;
987 for (; ndw > 0; ndw -= 2) {
988 ib->ptr[ib->length_dw++] = lower_32_bits(value);
989 ib->ptr[ib->length_dw++] = upper_32_bits(value);
990 value += incr;
991 }
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
1007 uint64_t addr, unsigned count,
1008 uint32_t incr, uint64_t flags)
1009{
1010
1011 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
1012 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1013 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1014 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
1015 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1016 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
1017 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1018 ib->ptr[ib->length_dw++] = incr;
1019 ib->ptr[ib->length_dw++] = 0;
1020 ib->ptr[ib->length_dw++] = count;
1021}
1022
1023
1024
1025
1026
1027
1028
1029static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1030{
1031 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1032 u32 pad_count;
1033 int i;
1034
1035 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1036 for (i = 0; i < pad_count; i++)
1037 if (sdma && sdma->burst_nop && (i == 0))
1038 ib->ptr[ib->length_dw++] =
1039 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1040 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1041 else
1042 ib->ptr[ib->length_dw++] =
1043 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1054{
1055 uint32_t seq = ring->fence_drv.sync_seq;
1056 uint64_t addr = ring->fence_drv.gpu_addr;
1057
1058
1059 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1060 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1061 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) |
1062 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1063 amdgpu_ring_write(ring, addr & 0xfffffffc);
1064 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1065 amdgpu_ring_write(ring, seq);
1066 amdgpu_ring_write(ring, 0xfffffff);
1067 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1068 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4));
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1081 unsigned vm_id, uint64_t pd_addr)
1082{
1083 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1084 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1085 if (vm_id < 8) {
1086 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
1087 } else {
1088 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
1089 }
1090 amdgpu_ring_write(ring, pd_addr >> 12);
1091
1092
1093 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1094 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1095 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
1096 amdgpu_ring_write(ring, 1 << vm_id);
1097
1098
1099 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1100 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1101 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0));
1102 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1103 amdgpu_ring_write(ring, 0);
1104 amdgpu_ring_write(ring, 0);
1105 amdgpu_ring_write(ring, 0);
1106 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1107 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1108}
1109
1110static int sdma_v3_0_early_init(void *handle)
1111{
1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1113
1114 switch (adev->asic_type) {
1115 case CHIP_STONEY:
1116 adev->sdma.num_instances = 1;
1117 break;
1118 default:
1119 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
1120 break;
1121 }
1122
1123 sdma_v3_0_set_ring_funcs(adev);
1124 sdma_v3_0_set_buffer_funcs(adev);
1125 sdma_v3_0_set_vm_pte_funcs(adev);
1126 sdma_v3_0_set_irq_funcs(adev);
1127
1128 return 0;
1129}
1130
1131static int sdma_v3_0_sw_init(void *handle)
1132{
1133 struct amdgpu_ring *ring;
1134 int r, i;
1135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136
1137
1138 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
1139 &adev->sdma.trap_irq);
1140 if (r)
1141 return r;
1142
1143
1144 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
1145 &adev->sdma.illegal_inst_irq);
1146 if (r)
1147 return r;
1148
1149
1150 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
1151 &adev->sdma.illegal_inst_irq);
1152 if (r)
1153 return r;
1154
1155 r = sdma_v3_0_init_microcode(adev);
1156 if (r) {
1157 DRM_ERROR("Failed to load sdma firmware!\n");
1158 return r;
1159 }
1160
1161 for (i = 0; i < adev->sdma.num_instances; i++) {
1162 ring = &adev->sdma.instance[i].ring;
1163 ring->ring_obj = NULL;
1164 ring->use_doorbell = true;
1165 ring->doorbell_index = (i == 0) ?
1166 AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
1167
1168 sprintf(ring->name, "sdma%d", i);
1169 r = amdgpu_ring_init(adev, ring, 1024,
1170 &adev->sdma.trap_irq,
1171 (i == 0) ?
1172 AMDGPU_SDMA_IRQ_TRAP0 :
1173 AMDGPU_SDMA_IRQ_TRAP1);
1174 if (r)
1175 return r;
1176 }
1177
1178 return r;
1179}
1180
1181static int sdma_v3_0_sw_fini(void *handle)
1182{
1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1184 int i;
1185
1186 for (i = 0; i < adev->sdma.num_instances; i++)
1187 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1188
1189 sdma_v3_0_free_microcode(adev);
1190 return 0;
1191}
1192
1193static int sdma_v3_0_hw_init(void *handle)
1194{
1195 int r;
1196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1197
1198 sdma_v3_0_init_golden_registers(adev);
1199
1200 r = sdma_v3_0_start(adev);
1201 if (r)
1202 return r;
1203
1204 return r;
1205}
1206
1207static int sdma_v3_0_hw_fini(void *handle)
1208{
1209 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210
1211 sdma_v3_0_ctx_switch_enable(adev, false);
1212 sdma_v3_0_enable(adev, false);
1213
1214 return 0;
1215}
1216
1217static int sdma_v3_0_suspend(void *handle)
1218{
1219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220
1221 return sdma_v3_0_hw_fini(adev);
1222}
1223
1224static int sdma_v3_0_resume(void *handle)
1225{
1226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227
1228 return sdma_v3_0_hw_init(adev);
1229}
1230
1231static bool sdma_v3_0_is_idle(void *handle)
1232{
1233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234 u32 tmp = RREG32(mmSRBM_STATUS2);
1235
1236 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1237 SRBM_STATUS2__SDMA1_BUSY_MASK))
1238 return false;
1239
1240 return true;
1241}
1242
1243static int sdma_v3_0_wait_for_idle(void *handle)
1244{
1245 unsigned i;
1246 u32 tmp;
1247 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1248
1249 for (i = 0; i < adev->usec_timeout; i++) {
1250 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1251 SRBM_STATUS2__SDMA1_BUSY_MASK);
1252
1253 if (!tmp)
1254 return 0;
1255 udelay(1);
1256 }
1257 return -ETIMEDOUT;
1258}
1259
1260static bool sdma_v3_0_check_soft_reset(void *handle)
1261{
1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263 u32 srbm_soft_reset = 0;
1264 u32 tmp = RREG32(mmSRBM_STATUS2);
1265
1266 if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
1267 (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
1268 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1269 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1270 }
1271
1272 if (srbm_soft_reset) {
1273 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1274 return true;
1275 } else {
1276 adev->sdma.srbm_soft_reset = 0;
1277 return false;
1278 }
1279}
1280
1281static int sdma_v3_0_pre_soft_reset(void *handle)
1282{
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284 u32 srbm_soft_reset = 0;
1285
1286 if (!adev->sdma.srbm_soft_reset)
1287 return 0;
1288
1289 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1290
1291 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1292 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1293 sdma_v3_0_ctx_switch_enable(adev, false);
1294 sdma_v3_0_enable(adev, false);
1295 }
1296
1297 return 0;
1298}
1299
1300static int sdma_v3_0_post_soft_reset(void *handle)
1301{
1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303 u32 srbm_soft_reset = 0;
1304
1305 if (!adev->sdma.srbm_soft_reset)
1306 return 0;
1307
1308 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1309
1310 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
1311 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
1312 sdma_v3_0_gfx_resume(adev);
1313 sdma_v3_0_rlc_resume(adev);
1314 }
1315
1316 return 0;
1317}
1318
1319static int sdma_v3_0_soft_reset(void *handle)
1320{
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322 u32 srbm_soft_reset = 0;
1323 u32 tmp;
1324
1325 if (!adev->sdma.srbm_soft_reset)
1326 return 0;
1327
1328 srbm_soft_reset = adev->sdma.srbm_soft_reset;
1329
1330 if (srbm_soft_reset) {
1331 tmp = RREG32(mmSRBM_SOFT_RESET);
1332 tmp |= srbm_soft_reset;
1333 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1334 WREG32(mmSRBM_SOFT_RESET, tmp);
1335 tmp = RREG32(mmSRBM_SOFT_RESET);
1336
1337 udelay(50);
1338
1339 tmp &= ~srbm_soft_reset;
1340 WREG32(mmSRBM_SOFT_RESET, tmp);
1341 tmp = RREG32(mmSRBM_SOFT_RESET);
1342
1343
1344 udelay(50);
1345 }
1346
1347 return 0;
1348}
1349
1350static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
1351 struct amdgpu_irq_src *source,
1352 unsigned type,
1353 enum amdgpu_interrupt_state state)
1354{
1355 u32 sdma_cntl;
1356
1357 switch (type) {
1358 case AMDGPU_SDMA_IRQ_TRAP0:
1359 switch (state) {
1360 case AMDGPU_IRQ_STATE_DISABLE:
1361 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1362 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1363 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1364 break;
1365 case AMDGPU_IRQ_STATE_ENABLE:
1366 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1367 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1368 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1369 break;
1370 default:
1371 break;
1372 }
1373 break;
1374 case AMDGPU_SDMA_IRQ_TRAP1:
1375 switch (state) {
1376 case AMDGPU_IRQ_STATE_DISABLE:
1377 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1378 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1379 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1380 break;
1381 case AMDGPU_IRQ_STATE_ENABLE:
1382 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1383 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1384 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1385 break;
1386 default:
1387 break;
1388 }
1389 break;
1390 default:
1391 break;
1392 }
1393 return 0;
1394}
1395
1396static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
1397 struct amdgpu_irq_src *source,
1398 struct amdgpu_iv_entry *entry)
1399{
1400 u8 instance_id, queue_id;
1401
1402 instance_id = (entry->ring_id & 0x3) >> 0;
1403 queue_id = (entry->ring_id & 0xc) >> 2;
1404 DRM_DEBUG("IH: SDMA trap\n");
1405 switch (instance_id) {
1406 case 0:
1407 switch (queue_id) {
1408 case 0:
1409 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1410 break;
1411 case 1:
1412
1413 break;
1414 case 2:
1415
1416 break;
1417 }
1418 break;
1419 case 1:
1420 switch (queue_id) {
1421 case 0:
1422 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1423 break;
1424 case 1:
1425
1426 break;
1427 case 2:
1428
1429 break;
1430 }
1431 break;
1432 }
1433 return 0;
1434}
1435
1436static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1437 struct amdgpu_irq_src *source,
1438 struct amdgpu_iv_entry *entry)
1439{
1440 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1441 schedule_work(&adev->reset_work);
1442 return 0;
1443}
1444
1445static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
1446 struct amdgpu_device *adev,
1447 bool enable)
1448{
1449 uint32_t temp, data;
1450 int i;
1451
1452 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1453 for (i = 0; i < adev->sdma.num_instances; i++) {
1454 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1455 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1456 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1457 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1458 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1459 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1460 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1461 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1462 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1463 if (data != temp)
1464 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1465 }
1466 } else {
1467 for (i = 0; i < adev->sdma.num_instances; i++) {
1468 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
1469 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1470 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1471 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1472 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1473 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1474 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1475 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1476 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
1477
1478 if (data != temp)
1479 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
1480 }
1481 }
1482}
1483
1484static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
1485 struct amdgpu_device *adev,
1486 bool enable)
1487{
1488 uint32_t temp, data;
1489 int i;
1490
1491 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1492 for (i = 0; i < adev->sdma.num_instances; i++) {
1493 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1494 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1495
1496 if (temp != data)
1497 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1498 }
1499 } else {
1500 for (i = 0; i < adev->sdma.num_instances; i++) {
1501 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
1502 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1503
1504 if (temp != data)
1505 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
1506 }
1507 }
1508}
1509
1510static int sdma_v3_0_set_clockgating_state(void *handle,
1511 enum amd_clockgating_state state)
1512{
1513 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1514
1515 if (amdgpu_sriov_vf(adev))
1516 return 0;
1517
1518 switch (adev->asic_type) {
1519 case CHIP_FIJI:
1520 case CHIP_CARRIZO:
1521 case CHIP_STONEY:
1522 sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
1523 state == AMD_CG_STATE_GATE);
1524 sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
1525 state == AMD_CG_STATE_GATE);
1526 break;
1527 default:
1528 break;
1529 }
1530 return 0;
1531}
1532
1533static int sdma_v3_0_set_powergating_state(void *handle,
1534 enum amd_powergating_state state)
1535{
1536 return 0;
1537}
1538
1539static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
1540{
1541 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1542 int data;
1543
1544 if (amdgpu_sriov_vf(adev))
1545 *flags = 0;
1546
1547
1548 data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
1549 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
1550 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1551
1552
1553 data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
1554 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1555 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1556}
1557
1558static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1559 .name = "sdma_v3_0",
1560 .early_init = sdma_v3_0_early_init,
1561 .late_init = NULL,
1562 .sw_init = sdma_v3_0_sw_init,
1563 .sw_fini = sdma_v3_0_sw_fini,
1564 .hw_init = sdma_v3_0_hw_init,
1565 .hw_fini = sdma_v3_0_hw_fini,
1566 .suspend = sdma_v3_0_suspend,
1567 .resume = sdma_v3_0_resume,
1568 .is_idle = sdma_v3_0_is_idle,
1569 .wait_for_idle = sdma_v3_0_wait_for_idle,
1570 .check_soft_reset = sdma_v3_0_check_soft_reset,
1571 .pre_soft_reset = sdma_v3_0_pre_soft_reset,
1572 .post_soft_reset = sdma_v3_0_post_soft_reset,
1573 .soft_reset = sdma_v3_0_soft_reset,
1574 .set_clockgating_state = sdma_v3_0_set_clockgating_state,
1575 .set_powergating_state = sdma_v3_0_set_powergating_state,
1576 .get_clockgating_state = sdma_v3_0_get_clockgating_state,
1577};
1578
1579static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1580 .type = AMDGPU_RING_TYPE_SDMA,
1581 .align_mask = 0xf,
1582 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1583 .support_64bit_ptrs = false,
1584 .get_rptr = sdma_v3_0_ring_get_rptr,
1585 .get_wptr = sdma_v3_0_ring_get_wptr,
1586 .set_wptr = sdma_v3_0_ring_set_wptr,
1587 .emit_frame_size =
1588 6 +
1589 3 +
1590 6 +
1591 12 +
1592 10 + 10 + 10,
1593 .emit_ib_size = 7 + 6,
1594 .emit_ib = sdma_v3_0_ring_emit_ib,
1595 .emit_fence = sdma_v3_0_ring_emit_fence,
1596 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1597 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1598 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1599 .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
1600 .test_ring = sdma_v3_0_ring_test_ring,
1601 .test_ib = sdma_v3_0_ring_test_ib,
1602 .insert_nop = sdma_v3_0_ring_insert_nop,
1603 .pad_ib = sdma_v3_0_ring_pad_ib,
1604};
1605
1606static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
1607{
1608 int i;
1609
1610 for (i = 0; i < adev->sdma.num_instances; i++)
1611 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1612}
1613
1614static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
1615 .set = sdma_v3_0_set_trap_irq_state,
1616 .process = sdma_v3_0_process_trap_irq,
1617};
1618
1619static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
1620 .process = sdma_v3_0_process_illegal_inst_irq,
1621};
1622
1623static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
1624{
1625 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1626 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
1627 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
1643 uint64_t src_offset,
1644 uint64_t dst_offset,
1645 uint32_t byte_count)
1646{
1647 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1648 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1649 ib->ptr[ib->length_dw++] = byte_count;
1650 ib->ptr[ib->length_dw++] = 0;
1651 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1652 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1653 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1654 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
1668 uint32_t src_data,
1669 uint64_t dst_offset,
1670 uint32_t byte_count)
1671{
1672 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1673 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1674 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1675 ib->ptr[ib->length_dw++] = src_data;
1676 ib->ptr[ib->length_dw++] = byte_count;
1677}
1678
1679static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1680 .copy_max_bytes = 0x1fffff,
1681 .copy_num_dw = 7,
1682 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
1683
1684 .fill_max_bytes = 0x1fffff,
1685 .fill_num_dw = 5,
1686 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
1687};
1688
1689static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
1690{
1691 if (adev->mman.buffer_funcs == NULL) {
1692 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
1693 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1694 }
1695}
1696
1697static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1698 .copy_pte = sdma_v3_0_vm_copy_pte,
1699 .write_pte = sdma_v3_0_vm_write_pte,
1700 .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
1701};
1702
1703static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1704{
1705 unsigned i;
1706
1707 if (adev->vm_manager.vm_pte_funcs == NULL) {
1708 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1709 for (i = 0; i < adev->sdma.num_instances; i++)
1710 adev->vm_manager.vm_pte_rings[i] =
1711 &adev->sdma.instance[i].ring;
1712
1713 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1714 }
1715}
1716
1717const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
1718{
1719 .type = AMD_IP_BLOCK_TYPE_SDMA,
1720 .major = 3,
1721 .minor = 0,
1722 .rev = 0,
1723 .funcs = &sdma_v3_0_ip_funcs,
1724};
1725
1726const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
1727{
1728 .type = AMD_IP_BLOCK_TYPE_SDMA,
1729 .major = 3,
1730 .minor = 1,
1731 .rev = 0,
1732 .funcs = &sdma_v3_0_ip_funcs,
1733};
1734