1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/delay.h>
26#include <linux/firmware.h>
27#include <linux/module.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32#include "vi.h"
33#include "vid.h"
34
35#include "oss/oss_2_4_d.h"
36#include "oss/oss_2_4_sh_mask.h"
37
38#include "gmc/gmc_7_1_d.h"
39#include "gmc/gmc_7_1_sh_mask.h"
40
41#include "gca/gfx_8_0_d.h"
42#include "gca/gfx_8_0_enum.h"
43#include "gca/gfx_8_0_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "iceland_sdma_pkt_open.h"
49
50#include "ivsrcid/ivsrcid_vislands30.h"
51
52static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
53static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
54static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
55static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
56
57MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
58MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
59
60static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
61{
62 SDMA0_REGISTER_OFFSET,
63 SDMA1_REGISTER_OFFSET
64};
65
66static const u32 golden_settings_iceland_a11[] =
67{
68 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
69 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
70 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
71 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
72};
73
74static const u32 iceland_mgcg_cgcg_init[] =
75{
76 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
77 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
78};
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
98{
99 switch (adev->asic_type) {
100 case CHIP_TOPAZ:
101 amdgpu_device_program_register_sequence(adev,
102 iceland_mgcg_cgcg_init,
103 ARRAY_SIZE(iceland_mgcg_cgcg_init));
104 amdgpu_device_program_register_sequence(adev,
105 golden_settings_iceland_a11,
106 ARRAY_SIZE(golden_settings_iceland_a11));
107 break;
108 default:
109 break;
110 }
111}
112
113static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
114{
115 int i;
116 for (i = 0; i < adev->sdma.num_instances; i++) {
117 release_firmware(adev->sdma.instance[i].fw);
118 adev->sdma.instance[i].fw = NULL;
119 }
120}
121
122
123
124
125
126
127
128
129
130
131static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
132{
133 const char *chip_name;
134 char fw_name[30];
135 int err = 0, i;
136 struct amdgpu_firmware_info *info = NULL;
137 const struct common_firmware_header *header = NULL;
138 const struct sdma_firmware_header_v1_0 *hdr;
139
140 DRM_DEBUG("\n");
141
142 switch (adev->asic_type) {
143 case CHIP_TOPAZ:
144 chip_name = "topaz";
145 break;
146 default: BUG();
147 }
148
149 for (i = 0; i < adev->sdma.num_instances; i++) {
150 if (i == 0)
151 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
152 else
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
154 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
155 if (err)
156 goto out;
157 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
158 if (err)
159 goto out;
160 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
161 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
162 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
163 if (adev->sdma.instance[i].feature_version >= 20)
164 adev->sdma.instance[i].burst_nop = true;
165
166 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
167 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
168 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
169 info->fw = adev->sdma.instance[i].fw;
170 header = (const struct common_firmware_header *)info->fw->data;
171 adev->firmware.fw_size +=
172 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
173 }
174 }
175
176out:
177 if (err) {
178 pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
179 for (i = 0; i < adev->sdma.num_instances; i++) {
180 release_firmware(adev->sdma.instance[i].fw);
181 adev->sdma.instance[i].fw = NULL;
182 }
183 }
184 return err;
185}
186
187
188
189
190
191
192
193
194static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
195{
196
197 return ring->adev->wb.wb[ring->rptr_offs] >> 2;
198}
199
200
201
202
203
204
205
206
207static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
208{
209 struct amdgpu_device *adev = ring->adev;
210 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
211
212 return wptr;
213}
214
215
216
217
218
219
220
221
222static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
223{
224 struct amdgpu_device *adev = ring->adev;
225
226 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
227}
228
229static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
230{
231 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
232 int i;
233
234 for (i = 0; i < count; i++)
235 if (sdma && sdma->burst_nop && (i == 0))
236 amdgpu_ring_write(ring, ring->funcs->nop |
237 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
238 else
239 amdgpu_ring_write(ring, ring->funcs->nop);
240}
241
242
243
244
245
246
247
248
249
250
251
252static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
253 struct amdgpu_job *job,
254 struct amdgpu_ib *ib,
255 uint32_t flags)
256{
257 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
258
259
260 sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
261
262 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
263 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
264
265 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
266 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
267 amdgpu_ring_write(ring, ib->length_dw);
268 amdgpu_ring_write(ring, 0);
269 amdgpu_ring_write(ring, 0);
270
271}
272
273
274
275
276
277
278
279
280static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
281{
282 u32 ref_and_mask = 0;
283
284 if (ring->me == 0)
285 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
286 else
287 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
288
289 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
290 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
291 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
292 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
293 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
294 amdgpu_ring_write(ring, ref_and_mask);
295 amdgpu_ring_write(ring, ref_and_mask);
296 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
297 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
313 unsigned flags)
314{
315 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
316
317 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
318 amdgpu_ring_write(ring, lower_32_bits(addr));
319 amdgpu_ring_write(ring, upper_32_bits(addr));
320 amdgpu_ring_write(ring, lower_32_bits(seq));
321
322
323 if (write64bit) {
324 addr += 4;
325 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
326 amdgpu_ring_write(ring, lower_32_bits(addr));
327 amdgpu_ring_write(ring, upper_32_bits(addr));
328 amdgpu_ring_write(ring, upper_32_bits(seq));
329 }
330
331
332 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
333 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
334}
335
336
337
338
339
340
341
342
343static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
344{
345 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
346 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
347 u32 rb_cntl, ib_cntl;
348 int i;
349
350 if ((adev->mman.buffer_funcs_ring == sdma0) ||
351 (adev->mman.buffer_funcs_ring == sdma1))
352 amdgpu_ttm_set_buffer_funcs_status(adev, false);
353
354 for (i = 0; i < adev->sdma.num_instances; i++) {
355 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
356 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
357 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
358 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
359 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
360 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
361 }
362}
363
364
365
366
367
368
369
370
371static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
372{
373
374}
375
376
377
378
379
380
381
382
383
384static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
385{
386 u32 f32_cntl;
387 int i;
388
389 if (!enable) {
390 sdma_v2_4_gfx_stop(adev);
391 sdma_v2_4_rlc_stop(adev);
392 }
393
394 for (i = 0; i < adev->sdma.num_instances; i++) {
395 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
396 if (enable)
397 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
398 else
399 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
400 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
401 }
402}
403
404
405
406
407
408
409
410
411
412static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
413{
414 struct amdgpu_ring *ring;
415 u32 rb_cntl, ib_cntl;
416 u32 rb_bufsz;
417 u32 wb_offset;
418 int i, j, r;
419
420 for (i = 0; i < adev->sdma.num_instances; i++) {
421 ring = &adev->sdma.instance[i].ring;
422 wb_offset = (ring->rptr_offs * 4);
423
424 mutex_lock(&adev->srbm_mutex);
425 for (j = 0; j < 16; j++) {
426 vi_srbm_select(adev, 0, 0, 0, j);
427
428 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
429 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
430 }
431 vi_srbm_select(adev, 0, 0, 0, 0);
432 mutex_unlock(&adev->srbm_mutex);
433
434 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
435 adev->gfx.config.gb_addr_config & 0x70);
436
437 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
438
439
440 rb_bufsz = order_base_2(ring->ring_size / 4);
441 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
442 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
443#ifdef __BIG_ENDIAN
444 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
445 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
446 RPTR_WRITEBACK_SWAP_ENABLE, 1);
447#endif
448 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
449
450
451 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
452 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
453 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
454 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
455
456
457 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
458 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
459 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
460 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
461
462 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
463
464 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
465 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
466
467 ring->wptr = 0;
468 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
469
470
471 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
472 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
473
474 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
475 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
476#ifdef __BIG_ENDIAN
477 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
478#endif
479
480 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
481
482 ring->sched.ready = true;
483 }
484
485 sdma_v2_4_enable(adev, true);
486 for (i = 0; i < adev->sdma.num_instances; i++) {
487 ring = &adev->sdma.instance[i].ring;
488 r = amdgpu_ring_test_helper(ring);
489 if (r)
490 return r;
491
492 if (adev->mman.buffer_funcs_ring == ring)
493 amdgpu_ttm_set_buffer_funcs_status(adev, true);
494 }
495
496 return 0;
497}
498
499
500
501
502
503
504
505
506
507static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
508{
509
510 return 0;
511}
512
513
514
515
516
517
518
519
520
521
522static int sdma_v2_4_start(struct amdgpu_device *adev)
523{
524 int r;
525
526
527 sdma_v2_4_enable(adev, false);
528
529
530 r = sdma_v2_4_gfx_resume(adev);
531 if (r)
532 return r;
533 r = sdma_v2_4_rlc_resume(adev);
534 if (r)
535 return r;
536
537 return 0;
538}
539
540
541
542
543
544
545
546
547
548
549static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
550{
551 struct amdgpu_device *adev = ring->adev;
552 unsigned i;
553 unsigned index;
554 int r;
555 u32 tmp;
556 u64 gpu_addr;
557
558 r = amdgpu_device_wb_get(adev, &index);
559 if (r)
560 return r;
561
562 gpu_addr = adev->wb.gpu_addr + (index * 4);
563 tmp = 0xCAFEDEAD;
564 adev->wb.wb[index] = cpu_to_le32(tmp);
565
566 r = amdgpu_ring_alloc(ring, 5);
567 if (r)
568 goto error_free_wb;
569
570 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
571 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
572 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
573 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
574 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
575 amdgpu_ring_write(ring, 0xDEADBEEF);
576 amdgpu_ring_commit(ring);
577
578 for (i = 0; i < adev->usec_timeout; i++) {
579 tmp = le32_to_cpu(adev->wb.wb[index]);
580 if (tmp == 0xDEADBEEF)
581 break;
582 udelay(1);
583 }
584
585 if (i >= adev->usec_timeout)
586 r = -ETIMEDOUT;
587
588error_free_wb:
589 amdgpu_device_wb_free(adev, index);
590 return r;
591}
592
593
594
595
596
597
598
599
600
601
602static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
603{
604 struct amdgpu_device *adev = ring->adev;
605 struct amdgpu_ib ib;
606 struct dma_fence *f = NULL;
607 unsigned index;
608 u32 tmp = 0;
609 u64 gpu_addr;
610 long r;
611
612 r = amdgpu_device_wb_get(adev, &index);
613 if (r)
614 return r;
615
616 gpu_addr = adev->wb.gpu_addr + (index * 4);
617 tmp = 0xCAFEDEAD;
618 adev->wb.wb[index] = cpu_to_le32(tmp);
619 memset(&ib, 0, sizeof(ib));
620 r = amdgpu_ib_get(adev, NULL, 256,
621 AMDGPU_IB_POOL_DIRECT, &ib);
622 if (r)
623 goto err0;
624
625 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
626 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
627 ib.ptr[1] = lower_32_bits(gpu_addr);
628 ib.ptr[2] = upper_32_bits(gpu_addr);
629 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
630 ib.ptr[4] = 0xDEADBEEF;
631 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
632 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
633 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
634 ib.length_dw = 8;
635
636 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
637 if (r)
638 goto err1;
639
640 r = dma_fence_wait_timeout(f, false, timeout);
641 if (r == 0) {
642 r = -ETIMEDOUT;
643 goto err1;
644 } else if (r < 0) {
645 goto err1;
646 }
647 tmp = le32_to_cpu(adev->wb.wb[index]);
648 if (tmp == 0xDEADBEEF)
649 r = 0;
650 else
651 r = -EINVAL;
652
653err1:
654 amdgpu_ib_free(adev, &ib, NULL);
655 dma_fence_put(f);
656err0:
657 amdgpu_device_wb_free(adev, index);
658 return r;
659}
660
661
662
663
664
665
666
667
668
669
670
671static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
672 uint64_t pe, uint64_t src,
673 unsigned count)
674{
675 unsigned bytes = count * 8;
676
677 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
678 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
679 ib->ptr[ib->length_dw++] = bytes;
680 ib->ptr[ib->length_dw++] = 0;
681 ib->ptr[ib->length_dw++] = lower_32_bits(src);
682 ib->ptr[ib->length_dw++] = upper_32_bits(src);
683 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
684 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
685}
686
687
688
689
690
691
692
693
694
695
696
697
698static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
699 uint64_t value, unsigned count,
700 uint32_t incr)
701{
702 unsigned ndw = count * 2;
703
704 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
705 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
706 ib->ptr[ib->length_dw++] = pe;
707 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
708 ib->ptr[ib->length_dw++] = ndw;
709 for (; ndw > 0; ndw -= 2) {
710 ib->ptr[ib->length_dw++] = lower_32_bits(value);
711 ib->ptr[ib->length_dw++] = upper_32_bits(value);
712 value += incr;
713 }
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
729 uint64_t addr, unsigned count,
730 uint32_t incr, uint64_t flags)
731{
732
733 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
734 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
735 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
736 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
737 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
738 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
739 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
740 ib->ptr[ib->length_dw++] = incr;
741 ib->ptr[ib->length_dw++] = 0;
742 ib->ptr[ib->length_dw++] = count;
743}
744
745
746
747
748
749
750
751
752static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
753{
754 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
755 u32 pad_count;
756 int i;
757
758 pad_count = (-ib->length_dw) & 7;
759 for (i = 0; i < pad_count; i++)
760 if (sdma && sdma->burst_nop && (i == 0))
761 ib->ptr[ib->length_dw++] =
762 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
763 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
764 else
765 ib->ptr[ib->length_dw++] =
766 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
767}
768
769
770
771
772
773
774
775
776static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
777{
778 uint32_t seq = ring->fence_drv.sync_seq;
779 uint64_t addr = ring->fence_drv.gpu_addr;
780
781
782 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
783 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
784 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) |
785 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
786 amdgpu_ring_write(ring, addr & 0xfffffffc);
787 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
788 amdgpu_ring_write(ring, seq);
789 amdgpu_ring_write(ring, 0xffffffff);
790 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
791 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4));
792}
793
794
795
796
797
798
799
800
801
802
803
804static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
805 unsigned vmid, uint64_t pd_addr)
806{
807 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
808
809
810 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
811 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
812 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0));
813 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
814 amdgpu_ring_write(ring, 0);
815 amdgpu_ring_write(ring, 0);
816 amdgpu_ring_write(ring, 0);
817 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
818 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
819}
820
821static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
822 uint32_t reg, uint32_t val)
823{
824 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
825 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
826 amdgpu_ring_write(ring, reg);
827 amdgpu_ring_write(ring, val);
828}
829
830static int sdma_v2_4_early_init(void *handle)
831{
832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
833
834 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
835
836 sdma_v2_4_set_ring_funcs(adev);
837 sdma_v2_4_set_buffer_funcs(adev);
838 sdma_v2_4_set_vm_pte_funcs(adev);
839 sdma_v2_4_set_irq_funcs(adev);
840
841 return 0;
842}
843
844static int sdma_v2_4_sw_init(void *handle)
845{
846 struct amdgpu_ring *ring;
847 int r, i;
848 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849
850
851 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
852 &adev->sdma.trap_irq);
853 if (r)
854 return r;
855
856
857 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
858 &adev->sdma.illegal_inst_irq);
859 if (r)
860 return r;
861
862
863 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
864 &adev->sdma.illegal_inst_irq);
865 if (r)
866 return r;
867
868 r = sdma_v2_4_init_microcode(adev);
869 if (r) {
870 DRM_ERROR("Failed to load sdma firmware!\n");
871 return r;
872 }
873
874 for (i = 0; i < adev->sdma.num_instances; i++) {
875 ring = &adev->sdma.instance[i].ring;
876 ring->ring_obj = NULL;
877 ring->use_doorbell = false;
878 sprintf(ring->name, "sdma%d", i);
879 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
880 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
881 AMDGPU_SDMA_IRQ_INSTANCE1,
882 AMDGPU_RING_PRIO_DEFAULT, NULL);
883 if (r)
884 return r;
885 }
886
887 return r;
888}
889
890static int sdma_v2_4_sw_fini(void *handle)
891{
892 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
893 int i;
894
895 for (i = 0; i < adev->sdma.num_instances; i++)
896 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
897
898 sdma_v2_4_free_microcode(adev);
899 return 0;
900}
901
902static int sdma_v2_4_hw_init(void *handle)
903{
904 int r;
905 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
906
907 sdma_v2_4_init_golden_registers(adev);
908
909 r = sdma_v2_4_start(adev);
910 if (r)
911 return r;
912
913 return r;
914}
915
916static int sdma_v2_4_hw_fini(void *handle)
917{
918 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
919
920 sdma_v2_4_enable(adev, false);
921
922 return 0;
923}
924
925static int sdma_v2_4_suspend(void *handle)
926{
927 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
928
929 return sdma_v2_4_hw_fini(adev);
930}
931
932static int sdma_v2_4_resume(void *handle)
933{
934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
935
936 return sdma_v2_4_hw_init(adev);
937}
938
939static bool sdma_v2_4_is_idle(void *handle)
940{
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
942 u32 tmp = RREG32(mmSRBM_STATUS2);
943
944 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
945 SRBM_STATUS2__SDMA1_BUSY_MASK))
946 return false;
947
948 return true;
949}
950
951static int sdma_v2_4_wait_for_idle(void *handle)
952{
953 unsigned i;
954 u32 tmp;
955 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
956
957 for (i = 0; i < adev->usec_timeout; i++) {
958 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
959 SRBM_STATUS2__SDMA1_BUSY_MASK);
960
961 if (!tmp)
962 return 0;
963 udelay(1);
964 }
965 return -ETIMEDOUT;
966}
967
968static int sdma_v2_4_soft_reset(void *handle)
969{
970 u32 srbm_soft_reset = 0;
971 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
972 u32 tmp = RREG32(mmSRBM_STATUS2);
973
974 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
975
976 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
977 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
978 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
979 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
980 }
981 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
982
983 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
984 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
985 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
986 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
987 }
988
989 if (srbm_soft_reset) {
990 tmp = RREG32(mmSRBM_SOFT_RESET);
991 tmp |= srbm_soft_reset;
992 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
993 WREG32(mmSRBM_SOFT_RESET, tmp);
994 tmp = RREG32(mmSRBM_SOFT_RESET);
995
996 udelay(50);
997
998 tmp &= ~srbm_soft_reset;
999 WREG32(mmSRBM_SOFT_RESET, tmp);
1000 tmp = RREG32(mmSRBM_SOFT_RESET);
1001
1002
1003 udelay(50);
1004 }
1005
1006 return 0;
1007}
1008
1009static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
1010 struct amdgpu_irq_src *src,
1011 unsigned type,
1012 enum amdgpu_interrupt_state state)
1013{
1014 u32 sdma_cntl;
1015
1016 switch (type) {
1017 case AMDGPU_SDMA_IRQ_INSTANCE0:
1018 switch (state) {
1019 case AMDGPU_IRQ_STATE_DISABLE:
1020 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1021 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1022 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1023 break;
1024 case AMDGPU_IRQ_STATE_ENABLE:
1025 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1026 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1027 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1028 break;
1029 default:
1030 break;
1031 }
1032 break;
1033 case AMDGPU_SDMA_IRQ_INSTANCE1:
1034 switch (state) {
1035 case AMDGPU_IRQ_STATE_DISABLE:
1036 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1037 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1038 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1039 break;
1040 case AMDGPU_IRQ_STATE_ENABLE:
1041 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1042 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1043 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1044 break;
1045 default:
1046 break;
1047 }
1048 break;
1049 default:
1050 break;
1051 }
1052 return 0;
1053}
1054
1055static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1056 struct amdgpu_irq_src *source,
1057 struct amdgpu_iv_entry *entry)
1058{
1059 u8 instance_id, queue_id;
1060
1061 instance_id = (entry->ring_id & 0x3) >> 0;
1062 queue_id = (entry->ring_id & 0xc) >> 2;
1063 DRM_DEBUG("IH: SDMA trap\n");
1064 switch (instance_id) {
1065 case 0:
1066 switch (queue_id) {
1067 case 0:
1068 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1069 break;
1070 case 1:
1071
1072 break;
1073 case 2:
1074
1075 break;
1076 }
1077 break;
1078 case 1:
1079 switch (queue_id) {
1080 case 0:
1081 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1082 break;
1083 case 1:
1084
1085 break;
1086 case 2:
1087
1088 break;
1089 }
1090 break;
1091 }
1092 return 0;
1093}
1094
1095static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1096 struct amdgpu_irq_src *source,
1097 struct amdgpu_iv_entry *entry)
1098{
1099 u8 instance_id, queue_id;
1100
1101 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1102 instance_id = (entry->ring_id & 0x3) >> 0;
1103 queue_id = (entry->ring_id & 0xc) >> 2;
1104
1105 if (instance_id <= 1 && queue_id == 0)
1106 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1107 return 0;
1108}
1109
1110static int sdma_v2_4_set_clockgating_state(void *handle,
1111 enum amd_clockgating_state state)
1112{
1113
1114 return 0;
1115}
1116
1117static int sdma_v2_4_set_powergating_state(void *handle,
1118 enum amd_powergating_state state)
1119{
1120 return 0;
1121}
1122
1123static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1124 .name = "sdma_v2_4",
1125 .early_init = sdma_v2_4_early_init,
1126 .late_init = NULL,
1127 .sw_init = sdma_v2_4_sw_init,
1128 .sw_fini = sdma_v2_4_sw_fini,
1129 .hw_init = sdma_v2_4_hw_init,
1130 .hw_fini = sdma_v2_4_hw_fini,
1131 .suspend = sdma_v2_4_suspend,
1132 .resume = sdma_v2_4_resume,
1133 .is_idle = sdma_v2_4_is_idle,
1134 .wait_for_idle = sdma_v2_4_wait_for_idle,
1135 .soft_reset = sdma_v2_4_soft_reset,
1136 .set_clockgating_state = sdma_v2_4_set_clockgating_state,
1137 .set_powergating_state = sdma_v2_4_set_powergating_state,
1138};
1139
1140static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1141 .type = AMDGPU_RING_TYPE_SDMA,
1142 .align_mask = 0xf,
1143 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1144 .support_64bit_ptrs = false,
1145 .secure_submission_supported = true,
1146 .get_rptr = sdma_v2_4_ring_get_rptr,
1147 .get_wptr = sdma_v2_4_ring_get_wptr,
1148 .set_wptr = sdma_v2_4_ring_set_wptr,
1149 .emit_frame_size =
1150 6 +
1151 3 +
1152 6 +
1153 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 +
1154 10 + 10 + 10,
1155 .emit_ib_size = 7 + 6,
1156 .emit_ib = sdma_v2_4_ring_emit_ib,
1157 .emit_fence = sdma_v2_4_ring_emit_fence,
1158 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1159 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1160 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1161 .test_ring = sdma_v2_4_ring_test_ring,
1162 .test_ib = sdma_v2_4_ring_test_ib,
1163 .insert_nop = sdma_v2_4_ring_insert_nop,
1164 .pad_ib = sdma_v2_4_ring_pad_ib,
1165 .emit_wreg = sdma_v2_4_ring_emit_wreg,
1166};
1167
1168static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1169{
1170 int i;
1171
1172 for (i = 0; i < adev->sdma.num_instances; i++) {
1173 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1174 adev->sdma.instance[i].ring.me = i;
1175 }
1176}
1177
1178static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1179 .set = sdma_v2_4_set_trap_irq_state,
1180 .process = sdma_v2_4_process_trap_irq,
1181};
1182
1183static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1184 .process = sdma_v2_4_process_illegal_inst_irq,
1185};
1186
1187static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1188{
1189 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1190 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1191 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1208 uint64_t src_offset,
1209 uint64_t dst_offset,
1210 uint32_t byte_count,
1211 bool tmz)
1212{
1213 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1214 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1215 ib->ptr[ib->length_dw++] = byte_count;
1216 ib->ptr[ib->length_dw++] = 0;
1217 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1218 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1219 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1220 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1234 uint32_t src_data,
1235 uint64_t dst_offset,
1236 uint32_t byte_count)
1237{
1238 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1239 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1240 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1241 ib->ptr[ib->length_dw++] = src_data;
1242 ib->ptr[ib->length_dw++] = byte_count;
1243}
1244
1245static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1246 .copy_max_bytes = 0x1fffff,
1247 .copy_num_dw = 7,
1248 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1249
1250 .fill_max_bytes = 0x1fffff,
1251 .fill_num_dw = 7,
1252 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1253};
1254
1255static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1256{
1257 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1258 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1259}
1260
1261static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
1262 .copy_pte_num_dw = 7,
1263 .copy_pte = sdma_v2_4_vm_copy_pte,
1264
1265 .write_pte = sdma_v2_4_vm_write_pte,
1266 .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
1267};
1268
1269static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
1270{
1271 unsigned i;
1272
1273 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
1274 for (i = 0; i < adev->sdma.num_instances; i++) {
1275 adev->vm_manager.vm_pte_scheds[i] =
1276 &adev->sdma.instance[i].ring.sched;
1277 }
1278 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1279}
1280
1281const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
1282{
1283 .type = AMD_IP_BLOCK_TYPE_SDMA,
1284 .major = 2,
1285 .minor = 4,
1286 .rev = 0,
1287 .funcs = &sdma_v2_4_ip_funcs,
1288};
1289