1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_common.h"
31#include "mmsch_v1_0.h"
32
33#include "uvd/uvd_7_0_offset.h"
34#include "uvd/uvd_7_0_sh_mask.h"
35#include "vce/vce_4_0_offset.h"
36#include "vce/vce_4_0_default.h"
37#include "vce/vce_4_0_sh_mask.h"
38#include "nbif/nbif_6_1_offset.h"
39#include "hdp/hdp_4_0_offset.h"
40#include "mmhub/mmhub_1_0_offset.h"
41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43
44#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
50#define UVD7_MAX_HW_INSTANCES_VEGA20 2
51
52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55static int uvd_v7_0_start(struct amdgpu_device *adev);
56static void uvd_v7_0_stop(struct amdgpu_device *adev);
57static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58
59static int amdgpu_ih_clientid_uvds[] = {
60 SOC15_IH_CLIENTID_UVD,
61 SOC15_IH_CLIENTID_UVD1
62};
63
64
65
66
67
68
69
70
71static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76}
77
78
79
80
81
82
83
84
85static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86{
87 struct amdgpu_device *adev = ring->adev;
88
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 else
92 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93}
94
95
96
97
98
99
100
101
102static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103{
104 struct amdgpu_device *adev = ring->adev;
105
106 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107}
108
109
110
111
112
113
114
115
116static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117{
118 struct amdgpu_device *adev = ring->adev;
119
120 if (ring->use_doorbell)
121 return adev->wb.wb[ring->wptr_offs];
122
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 else
126 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127}
128
129
130
131
132
133
134
135
136static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137{
138 struct amdgpu_device *adev = ring->adev;
139
140 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141}
142
143
144
145
146
147
148
149
150static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151{
152 struct amdgpu_device *adev = ring->adev;
153
154 if (ring->use_doorbell) {
155
156 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 return;
159 }
160
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 lower_32_bits(ring->wptr));
164 else
165 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 lower_32_bits(ring->wptr));
167}
168
169
170
171
172
173
174
175static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176{
177 struct amdgpu_device *adev = ring->adev;
178 uint32_t rptr;
179 unsigned i;
180 int r;
181
182 if (amdgpu_sriov_vf(adev))
183 return 0;
184
185 r = amdgpu_ring_alloc(ring, 16);
186 if (r)
187 return r;
188
189 rptr = amdgpu_ring_get_rptr(ring);
190
191 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
192 amdgpu_ring_commit(ring);
193
194 for (i = 0; i < adev->usec_timeout; i++) {
195 if (amdgpu_ring_get_rptr(ring) != rptr)
196 break;
197 udelay(1);
198 }
199
200 if (i >= adev->usec_timeout)
201 r = -ETIMEDOUT;
202
203 return r;
204}
205
206
207
208
209
210
211
212
213
214
215
216static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
217 struct amdgpu_bo *bo,
218 struct dma_fence **fence)
219{
220 const unsigned ib_size_dw = 16;
221 struct amdgpu_job *job;
222 struct amdgpu_ib *ib;
223 struct dma_fence *f = NULL;
224 uint64_t addr;
225 int i, r;
226
227 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
228 AMDGPU_IB_POOL_DIRECT, &job);
229 if (r)
230 return r;
231
232 ib = &job->ibs[0];
233 addr = amdgpu_bo_gpu_offset(bo);
234
235 ib->length_dw = 0;
236 ib->ptr[ib->length_dw++] = 0x00000018;
237 ib->ptr[ib->length_dw++] = 0x00000001;
238 ib->ptr[ib->length_dw++] = handle;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241 ib->ptr[ib->length_dw++] = addr;
242
243 ib->ptr[ib->length_dw++] = 0x00000014;
244 ib->ptr[ib->length_dw++] = 0x00000002;
245 ib->ptr[ib->length_dw++] = 0x0000001c;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247 ib->ptr[ib->length_dw++] = 0x00000000;
248
249 ib->ptr[ib->length_dw++] = 0x00000008;
250 ib->ptr[ib->length_dw++] = 0x08000001;
251
252 for (i = ib->length_dw; i < ib_size_dw; ++i)
253 ib->ptr[i] = 0x0;
254
255 r = amdgpu_job_submit_direct(job, ring, &f);
256 if (r)
257 goto err;
258
259 if (fence)
260 *fence = dma_fence_get(f);
261 dma_fence_put(f);
262 return 0;
263
264err:
265 amdgpu_job_free(job);
266 return r;
267}
268
269
270
271
272
273
274
275
276
277
278
279static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
280 struct amdgpu_bo *bo,
281 struct dma_fence **fence)
282{
283 const unsigned ib_size_dw = 16;
284 struct amdgpu_job *job;
285 struct amdgpu_ib *ib;
286 struct dma_fence *f = NULL;
287 uint64_t addr;
288 int i, r;
289
290 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
291 AMDGPU_IB_POOL_DIRECT, &job);
292 if (r)
293 return r;
294
295 ib = &job->ibs[0];
296 addr = amdgpu_bo_gpu_offset(bo);
297
298 ib->length_dw = 0;
299 ib->ptr[ib->length_dw++] = 0x00000018;
300 ib->ptr[ib->length_dw++] = 0x00000001;
301 ib->ptr[ib->length_dw++] = handle;
302 ib->ptr[ib->length_dw++] = 0x00000000;
303 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304 ib->ptr[ib->length_dw++] = addr;
305
306 ib->ptr[ib->length_dw++] = 0x00000014;
307 ib->ptr[ib->length_dw++] = 0x00000002;
308 ib->ptr[ib->length_dw++] = 0x0000001c;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310 ib->ptr[ib->length_dw++] = 0x00000000;
311
312 ib->ptr[ib->length_dw++] = 0x00000008;
313 ib->ptr[ib->length_dw++] = 0x08000002;
314
315 for (i = ib->length_dw; i < ib_size_dw; ++i)
316 ib->ptr[i] = 0x0;
317
318 r = amdgpu_job_submit_direct(job, ring, &f);
319 if (r)
320 goto err;
321
322 if (fence)
323 *fence = dma_fence_get(f);
324 dma_fence_put(f);
325 return 0;
326
327err:
328 amdgpu_job_free(job);
329 return r;
330}
331
332
333
334
335
336
337
338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
339{
340 struct dma_fence *fence = NULL;
341 struct amdgpu_bo *bo = NULL;
342 long r;
343
344 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
345 AMDGPU_GEM_DOMAIN_VRAM,
346 &bo, NULL, NULL);
347 if (r)
348 return r;
349
350 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
351 if (r)
352 goto error;
353
354 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
355 if (r)
356 goto error;
357
358 r = dma_fence_wait_timeout(fence, false, timeout);
359 if (r == 0)
360 r = -ETIMEDOUT;
361 else if (r > 0)
362 r = 0;
363
364error:
365 dma_fence_put(fence);
366 amdgpu_bo_unreserve(bo);
367 amdgpu_bo_unref(&bo);
368 return r;
369}
370
371static int uvd_v7_0_early_init(void *handle)
372{
373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
374
375 if (adev->asic_type == CHIP_VEGA20) {
376 u32 harvest;
377 int i;
378
379 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
380 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
381 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
382 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
383 adev->uvd.harvest_config |= 1 << i;
384 }
385 }
386 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
387 AMDGPU_UVD_HARVEST_UVD1))
388
389 return -ENOENT;
390 } else {
391 adev->uvd.num_uvd_inst = 1;
392 }
393
394 if (amdgpu_sriov_vf(adev))
395 adev->uvd.num_enc_rings = 1;
396 else
397 adev->uvd.num_enc_rings = 2;
398 uvd_v7_0_set_ring_funcs(adev);
399 uvd_v7_0_set_enc_ring_funcs(adev);
400 uvd_v7_0_set_irq_funcs(adev);
401
402 return 0;
403}
404
405static int uvd_v7_0_sw_init(void *handle)
406{
407 struct amdgpu_ring *ring;
408
409 int i, j, r;
410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
411
412 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
413 if (adev->uvd.harvest_config & (1 << j))
414 continue;
415
416 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
417 if (r)
418 return r;
419
420
421 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
422 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
423 if (r)
424 return r;
425 }
426 }
427
428 r = amdgpu_uvd_sw_init(adev);
429 if (r)
430 return r;
431
432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
433 const struct common_firmware_header *hdr;
434 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
436 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
437 adev->firmware.fw_size +=
438 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
439
440 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
441 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
442 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
443 adev->firmware.fw_size +=
444 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
445 }
446 DRM_INFO("PSP loading UVD firmware\n");
447 }
448
449 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
450 if (adev->uvd.harvest_config & (1 << j))
451 continue;
452 if (!amdgpu_sriov_vf(adev)) {
453 ring = &adev->uvd.inst[j].ring;
454 sprintf(ring->name, "uvd_%d", ring->me);
455 r = amdgpu_ring_init(adev, ring, 512,
456 &adev->uvd.inst[j].irq, 0,
457 AMDGPU_RING_PRIO_DEFAULT);
458 if (r)
459 return r;
460 }
461
462 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
463 ring = &adev->uvd.inst[j].ring_enc[i];
464 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
465 if (amdgpu_sriov_vf(adev)) {
466 ring->use_doorbell = true;
467
468
469
470
471 if (i == 0)
472 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
473 else
474 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
475 }
476 r = amdgpu_ring_init(adev, ring, 512,
477 &adev->uvd.inst[j].irq, 0,
478 AMDGPU_RING_PRIO_DEFAULT);
479 if (r)
480 return r;
481 }
482 }
483
484 r = amdgpu_uvd_resume(adev);
485 if (r)
486 return r;
487
488 r = amdgpu_uvd_entity_init(adev);
489 if (r)
490 return r;
491
492 r = amdgpu_virt_alloc_mm_table(adev);
493 if (r)
494 return r;
495
496 return r;
497}
498
499static int uvd_v7_0_sw_fini(void *handle)
500{
501 int i, j, r;
502 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
503
504 amdgpu_virt_free_mm_table(adev);
505
506 r = amdgpu_uvd_suspend(adev);
507 if (r)
508 return r;
509
510 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
511 if (adev->uvd.harvest_config & (1 << j))
512 continue;
513 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
514 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
515 }
516 return amdgpu_uvd_sw_fini(adev);
517}
518
519
520
521
522
523
524
525
526static int uvd_v7_0_hw_init(void *handle)
527{
528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 struct amdgpu_ring *ring;
530 uint32_t tmp;
531 int i, j, r;
532
533 if (amdgpu_sriov_vf(adev))
534 r = uvd_v7_0_sriov_start(adev);
535 else
536 r = uvd_v7_0_start(adev);
537 if (r)
538 goto done;
539
540 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
541 if (adev->uvd.harvest_config & (1 << j))
542 continue;
543 ring = &adev->uvd.inst[j].ring;
544
545 if (!amdgpu_sriov_vf(adev)) {
546 r = amdgpu_ring_test_helper(ring);
547 if (r)
548 goto done;
549
550 r = amdgpu_ring_alloc(ring, 10);
551 if (r) {
552 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
553 goto done;
554 }
555
556 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
557 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
558 amdgpu_ring_write(ring, tmp);
559 amdgpu_ring_write(ring, 0xFFFFF);
560
561 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
562 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
563 amdgpu_ring_write(ring, tmp);
564 amdgpu_ring_write(ring, 0xFFFFF);
565
566 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
567 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
568 amdgpu_ring_write(ring, tmp);
569 amdgpu_ring_write(ring, 0xFFFFF);
570
571
572 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
573 mmUVD_SEMA_TIMEOUT_STATUS), 0));
574 amdgpu_ring_write(ring, 0x8);
575
576 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
577 mmUVD_SEMA_CNTL), 0));
578 amdgpu_ring_write(ring, 3);
579
580 amdgpu_ring_commit(ring);
581 }
582
583 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
584 ring = &adev->uvd.inst[j].ring_enc[i];
585 r = amdgpu_ring_test_helper(ring);
586 if (r)
587 goto done;
588 }
589 }
590done:
591 if (!r)
592 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
593
594 return r;
595}
596
597
598
599
600
601
602
603
604static int uvd_v7_0_hw_fini(void *handle)
605{
606 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
607
608 if (!amdgpu_sriov_vf(adev))
609 uvd_v7_0_stop(adev);
610 else {
611
612 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
613 }
614
615 return 0;
616}
617
618static int uvd_v7_0_suspend(void *handle)
619{
620 int r;
621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
622
623 r = uvd_v7_0_hw_fini(adev);
624 if (r)
625 return r;
626
627 return amdgpu_uvd_suspend(adev);
628}
629
630static int uvd_v7_0_resume(void *handle)
631{
632 int r;
633 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634
635 r = amdgpu_uvd_resume(adev);
636 if (r)
637 return r;
638
639 return uvd_v7_0_hw_init(adev);
640}
641
642
643
644
645
646
647
648
649static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
650{
651 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
652 uint32_t offset;
653 int i;
654
655 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
656 if (adev->uvd.harvest_config & (1 << i))
657 continue;
658 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
659 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
660 i == 0 ?
661 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
662 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
663 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
664 i == 0 ?
665 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
666 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
667 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
668 offset = 0;
669 } else {
670 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
671 lower_32_bits(adev->uvd.inst[i].gpu_addr));
672 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
673 upper_32_bits(adev->uvd.inst[i].gpu_addr));
674 offset = size;
675 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
676 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
677 }
678
679 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
680
681 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
682 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
683 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
684 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
685 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
686 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
687
688 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
689 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
690 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
691 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
692 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
693 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
694 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
695
696 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
697 adev->gfx.config.gb_addr_config);
698 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
699 adev->gfx.config.gb_addr_config);
700 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
701 adev->gfx.config.gb_addr_config);
702
703 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
704 }
705}
706
707static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
708 struct amdgpu_mm_table *table)
709{
710 uint32_t data = 0, loop;
711 uint64_t addr = table->gpu_addr;
712 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
713 uint32_t size;
714 int i;
715
716 size = header->header_size + header->vce_table_size + header->uvd_table_size;
717
718
719 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
720 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
721
722
723 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
724 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
725 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
726 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
727
728
729 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
730
731
732 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
733
734 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
735 if (adev->uvd.harvest_config & (1 << i))
736 continue;
737 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
738 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
739 adev->uvd.inst[i].ring_enc[0].wptr = 0;
740 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
741 }
742
743 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
744
745 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
746 loop = 1000;
747 while ((data & 0x10000002) != 0x10000002) {
748 udelay(10);
749 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
750 loop--;
751 if (!loop)
752 break;
753 }
754
755 if (!loop) {
756 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
757 return -EBUSY;
758 }
759
760 return 0;
761}
762
763static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
764{
765 struct amdgpu_ring *ring;
766 uint32_t offset, size, tmp;
767 uint32_t table_size = 0;
768 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
769 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
770 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
771 struct mmsch_v1_0_cmd_end end = { {0} };
772 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
773 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
774 uint8_t i = 0;
775
776 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
777 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
778 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
779 end.cmd_header.command_type = MMSCH_COMMAND__END;
780
781 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
782 header->version = MMSCH_VERSION;
783 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
784
785 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
786 header->uvd_table_offset = header->header_size;
787 else
788 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
789
790 init_table += header->uvd_table_offset;
791
792 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
793 if (adev->uvd.harvest_config & (1 << i))
794 continue;
795 ring = &adev->uvd.inst[i].ring;
796 ring->wptr = 0;
797 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
798
799 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
800 0xFFFFFFFF, 0x00000004);
801
802 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
803 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
804 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
805 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
806 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
807 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
808 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
809 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
810 offset = 0;
811 } else {
812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
813 lower_32_bits(adev->uvd.inst[i].gpu_addr));
814 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
815 upper_32_bits(adev->uvd.inst[i].gpu_addr));
816 offset = size;
817 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
818 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
819
820 }
821
822 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
823
824 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
825 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
826 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
827 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
828 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
829 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
830
831 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
832 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
833 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
834 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
835 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
836 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
837 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
838
839 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
840
841
842
843 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
844 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
845
846
847 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
848 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
849
850
851 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
852 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
853 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
854
855
856 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
857 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
858 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
859 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
860 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
861 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
862 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
863 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
864 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
865
866
867 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
868 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
869 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
870 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
871 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
872 UVD_LMI_CTRL__REQ_MODE_MASK |
873 0x00100000L));
874
875
876 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
877 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
878
879
880 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
881 UVD_VCPU_CNTL__CLK_EN_MASK);
882
883
884 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
885 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
886 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
887
888
889 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
890 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
891
892
893 size = order_base_2(ring->ring_size);
894 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
895 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
896 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
897
898 ring = &adev->uvd.inst[i].ring_enc[0];
899 ring->wptr = 0;
900 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
901 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
902 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
903
904
905 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
906
907
908 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
909 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
910
911 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
912 }
913
914 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
915 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
916 header->uvd_table_size = table_size;
917
918 }
919 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
920}
921
922
923
924
925
926
927
928
929static int uvd_v7_0_start(struct amdgpu_device *adev)
930{
931 struct amdgpu_ring *ring;
932 uint32_t rb_bufsz, tmp;
933 uint32_t lmi_swap_cntl;
934 uint32_t mp_swap_cntl;
935 int i, j, k, r;
936
937 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
938 if (adev->uvd.harvest_config & (1 << k))
939 continue;
940
941 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
942 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
943 }
944
945
946 lmi_swap_cntl = 0;
947 mp_swap_cntl = 0;
948
949 uvd_v7_0_mc_resume(adev);
950
951 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
952 if (adev->uvd.harvest_config & (1 << k))
953 continue;
954 ring = &adev->uvd.inst[k].ring;
955
956 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
957 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
958
959
960 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
961 ~UVD_MASTINT_EN__VCPU_EN_MASK);
962
963
964 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
965 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
966 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
967 mdelay(1);
968
969
970 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
971 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
972 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
973 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
974 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
975 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
976 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
977 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
978 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
979 mdelay(5);
980
981
982 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
983 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
984 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
985 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
986 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
987 UVD_LMI_CTRL__REQ_MODE_MASK |
988 0x00100000L);
989
990#ifdef __BIG_ENDIAN
991
992 lmi_swap_cntl = 0xa;
993 mp_swap_cntl = 0;
994#endif
995 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
996 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
997
998 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
999 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1000 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1001 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1002 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1003 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1004
1005
1006 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1007 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1008 mdelay(5);
1009
1010
1011 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1012 UVD_VCPU_CNTL__CLK_EN_MASK);
1013
1014
1015 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1016 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1017
1018
1019 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1020 mdelay(10);
1021
1022 for (i = 0; i < 10; ++i) {
1023 uint32_t status;
1024
1025 for (j = 0; j < 100; ++j) {
1026 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1027 if (status & 2)
1028 break;
1029 mdelay(10);
1030 }
1031 r = 0;
1032 if (status & 2)
1033 break;
1034
1035 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1036 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1037 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1038 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1039 mdelay(10);
1040 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1041 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1042 mdelay(10);
1043 r = -1;
1044 }
1045
1046 if (r) {
1047 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1048 return r;
1049 }
1050
1051 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1052 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1053 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1054
1055
1056 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1057 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1058
1059
1060 rb_bufsz = order_base_2(ring->ring_size);
1061 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1064 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1066 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1067 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1068
1069
1070 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1071
1072
1073 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1074 (upper_32_bits(ring->gpu_addr) >> 2));
1075
1076
1077 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1078 lower_32_bits(ring->gpu_addr));
1079 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1080 upper_32_bits(ring->gpu_addr));
1081
1082
1083 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1084
1085 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1086 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1087 lower_32_bits(ring->wptr));
1088
1089 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1090 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1091
1092 ring = &adev->uvd.inst[k].ring_enc[0];
1093 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1094 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1095 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1096 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1097 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1098
1099 ring = &adev->uvd.inst[k].ring_enc[1];
1100 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1101 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1102 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1103 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1104 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1105 }
1106 return 0;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116static void uvd_v7_0_stop(struct amdgpu_device *adev)
1117{
1118 uint8_t i = 0;
1119
1120 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1121 if (adev->uvd.harvest_config & (1 << i))
1122 continue;
1123
1124 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1125
1126
1127 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1128 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1129 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1130 mdelay(1);
1131
1132
1133 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1134 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1135 mdelay(5);
1136
1137
1138 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1139
1140
1141 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1142 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1143 }
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1155 unsigned flags)
1156{
1157 struct amdgpu_device *adev = ring->adev;
1158
1159 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1160
1161 amdgpu_ring_write(ring,
1162 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1163 amdgpu_ring_write(ring, seq);
1164 amdgpu_ring_write(ring,
1165 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1166 amdgpu_ring_write(ring, addr & 0xffffffff);
1167 amdgpu_ring_write(ring,
1168 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1169 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1170 amdgpu_ring_write(ring,
1171 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1172 amdgpu_ring_write(ring, 0);
1173
1174 amdgpu_ring_write(ring,
1175 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1176 amdgpu_ring_write(ring, 0);
1177 amdgpu_ring_write(ring,
1178 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1179 amdgpu_ring_write(ring, 0);
1180 amdgpu_ring_write(ring,
1181 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1182 amdgpu_ring_write(ring, 2);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1194 u64 seq, unsigned flags)
1195{
1196
1197 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1198
1199 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1200 amdgpu_ring_write(ring, addr);
1201 amdgpu_ring_write(ring, upper_32_bits(addr));
1202 amdgpu_ring_write(ring, seq);
1203 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1204}
1205
1206
1207
1208
1209
1210
1211static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1212{
1213
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1224{
1225 struct amdgpu_device *adev = ring->adev;
1226 uint32_t tmp = 0;
1227 unsigned i;
1228 int r;
1229
1230 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1231 r = amdgpu_ring_alloc(ring, 3);
1232 if (r)
1233 return r;
1234
1235 amdgpu_ring_write(ring,
1236 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1237 amdgpu_ring_write(ring, 0xDEADBEEF);
1238 amdgpu_ring_commit(ring);
1239 for (i = 0; i < adev->usec_timeout; i++) {
1240 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1241 if (tmp == 0xDEADBEEF)
1242 break;
1243 udelay(1);
1244 }
1245
1246 if (i >= adev->usec_timeout)
1247 r = -ETIMEDOUT;
1248
1249 return r;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1260 uint32_t ib_idx)
1261{
1262 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1263 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1264 unsigned i;
1265
1266
1267 if (!ring->me)
1268 return 0;
1269
1270 for (i = 0; i < ib->length_dw; i += 2) {
1271 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1272
1273 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1274 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1275
1276 amdgpu_set_ib_value(p, ib_idx, i, reg);
1277 }
1278 return 0;
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1290 struct amdgpu_job *job,
1291 struct amdgpu_ib *ib,
1292 uint32_t flags)
1293{
1294 struct amdgpu_device *adev = ring->adev;
1295 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1296
1297 amdgpu_ring_write(ring,
1298 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1299 amdgpu_ring_write(ring, vmid);
1300
1301 amdgpu_ring_write(ring,
1302 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1303 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1304 amdgpu_ring_write(ring,
1305 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1306 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1307 amdgpu_ring_write(ring,
1308 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1309 amdgpu_ring_write(ring, ib->length_dw);
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1321 struct amdgpu_job *job,
1322 struct amdgpu_ib *ib,
1323 uint32_t flags)
1324{
1325 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1326
1327 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1328 amdgpu_ring_write(ring, vmid);
1329 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1330 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1331 amdgpu_ring_write(ring, ib->length_dw);
1332}
1333
1334static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1335 uint32_t reg, uint32_t val)
1336{
1337 struct amdgpu_device *adev = ring->adev;
1338
1339 amdgpu_ring_write(ring,
1340 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1341 amdgpu_ring_write(ring, reg << 2);
1342 amdgpu_ring_write(ring,
1343 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1344 amdgpu_ring_write(ring, val);
1345 amdgpu_ring_write(ring,
1346 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1347 amdgpu_ring_write(ring, 8);
1348}
1349
1350static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1351 uint32_t val, uint32_t mask)
1352{
1353 struct amdgpu_device *adev = ring->adev;
1354
1355 amdgpu_ring_write(ring,
1356 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1357 amdgpu_ring_write(ring, reg << 2);
1358 amdgpu_ring_write(ring,
1359 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1360 amdgpu_ring_write(ring, val);
1361 amdgpu_ring_write(ring,
1362 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1363 amdgpu_ring_write(ring, mask);
1364 amdgpu_ring_write(ring,
1365 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1366 amdgpu_ring_write(ring, 12);
1367}
1368
1369static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1370 unsigned vmid, uint64_t pd_addr)
1371{
1372 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1373 uint32_t data0, data1, mask;
1374
1375 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1376
1377
1378 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1379 data1 = lower_32_bits(pd_addr);
1380 mask = 0xffffffff;
1381 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1382}
1383
1384static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1385{
1386 struct amdgpu_device *adev = ring->adev;
1387 int i;
1388
1389 WARN_ON(ring->wptr % 2 || count % 2);
1390
1391 for (i = 0; i < count / 2; i++) {
1392 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1393 amdgpu_ring_write(ring, 0);
1394 }
1395}
1396
1397static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1398{
1399 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1400}
1401
1402static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1403 uint32_t reg, uint32_t val,
1404 uint32_t mask)
1405{
1406 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1407 amdgpu_ring_write(ring, reg << 2);
1408 amdgpu_ring_write(ring, mask);
1409 amdgpu_ring_write(ring, val);
1410}
1411
1412static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1413 unsigned int vmid, uint64_t pd_addr)
1414{
1415 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1416
1417 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1418
1419
1420 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1421 vmid * hub->ctx_addr_distance,
1422 lower_32_bits(pd_addr), 0xffffffff);
1423}
1424
1425static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1426 uint32_t reg, uint32_t val)
1427{
1428 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1429 amdgpu_ring_write(ring, reg << 2);
1430 amdgpu_ring_write(ring, val);
1431}
1432
1433#if 0
1434static bool uvd_v7_0_is_idle(void *handle)
1435{
1436 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437
1438 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1439}
1440
1441static int uvd_v7_0_wait_for_idle(void *handle)
1442{
1443 unsigned i;
1444 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445
1446 for (i = 0; i < adev->usec_timeout; i++) {
1447 if (uvd_v7_0_is_idle(handle))
1448 return 0;
1449 }
1450 return -ETIMEDOUT;
1451}
1452
1453#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1454static bool uvd_v7_0_check_soft_reset(void *handle)
1455{
1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 u32 srbm_soft_reset = 0;
1458 u32 tmp = RREG32(mmSRBM_STATUS);
1459
1460 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1461 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1462 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1463 AMDGPU_UVD_STATUS_BUSY_MASK))
1464 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1465 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1466
1467 if (srbm_soft_reset) {
1468 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1469 return true;
1470 } else {
1471 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1472 return false;
1473 }
1474}
1475
1476static int uvd_v7_0_pre_soft_reset(void *handle)
1477{
1478 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479
1480 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1481 return 0;
1482
1483 uvd_v7_0_stop(adev);
1484 return 0;
1485}
1486
1487static int uvd_v7_0_soft_reset(void *handle)
1488{
1489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 u32 srbm_soft_reset;
1491
1492 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1493 return 0;
1494 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1495
1496 if (srbm_soft_reset) {
1497 u32 tmp;
1498
1499 tmp = RREG32(mmSRBM_SOFT_RESET);
1500 tmp |= srbm_soft_reset;
1501 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1502 WREG32(mmSRBM_SOFT_RESET, tmp);
1503 tmp = RREG32(mmSRBM_SOFT_RESET);
1504
1505 udelay(50);
1506
1507 tmp &= ~srbm_soft_reset;
1508 WREG32(mmSRBM_SOFT_RESET, tmp);
1509 tmp = RREG32(mmSRBM_SOFT_RESET);
1510
1511
1512 udelay(50);
1513 }
1514
1515 return 0;
1516}
1517
1518static int uvd_v7_0_post_soft_reset(void *handle)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1523 return 0;
1524
1525 mdelay(5);
1526
1527 return uvd_v7_0_start(adev);
1528}
1529#endif
1530
1531static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1532 struct amdgpu_irq_src *source,
1533 unsigned type,
1534 enum amdgpu_interrupt_state state)
1535{
1536
1537 return 0;
1538}
1539
1540static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1541 struct amdgpu_irq_src *source,
1542 struct amdgpu_iv_entry *entry)
1543{
1544 uint32_t ip_instance;
1545
1546 switch (entry->client_id) {
1547 case SOC15_IH_CLIENTID_UVD:
1548 ip_instance = 0;
1549 break;
1550 case SOC15_IH_CLIENTID_UVD1:
1551 ip_instance = 1;
1552 break;
1553 default:
1554 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1555 return 0;
1556 }
1557
1558 DRM_DEBUG("IH: UVD TRAP\n");
1559
1560 switch (entry->src_id) {
1561 case 124:
1562 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1563 break;
1564 case 119:
1565 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1566 break;
1567 case 120:
1568 if (!amdgpu_sriov_vf(adev))
1569 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1570 break;
1571 default:
1572 DRM_ERROR("Unhandled interrupt: %d %d\n",
1573 entry->src_id, entry->src_data[0]);
1574 break;
1575 }
1576
1577 return 0;
1578}
1579
1580#if 0
1581static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1582{
1583 uint32_t data, data1, data2, suvd_flags;
1584
1585 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1586 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1587 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1588
1589 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1590 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1591
1592 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1593 UVD_SUVD_CGC_GATE__SIT_MASK |
1594 UVD_SUVD_CGC_GATE__SMP_MASK |
1595 UVD_SUVD_CGC_GATE__SCM_MASK |
1596 UVD_SUVD_CGC_GATE__SDB_MASK;
1597
1598 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1599 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1600 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1601
1602 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1603 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1604 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1605 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1606 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1607 UVD_CGC_CTRL__SYS_MODE_MASK |
1608 UVD_CGC_CTRL__UDEC_MODE_MASK |
1609 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1610 UVD_CGC_CTRL__REGS_MODE_MASK |
1611 UVD_CGC_CTRL__RBC_MODE_MASK |
1612 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1613 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1614 UVD_CGC_CTRL__IDCT_MODE_MASK |
1615 UVD_CGC_CTRL__MPRD_MODE_MASK |
1616 UVD_CGC_CTRL__MPC_MODE_MASK |
1617 UVD_CGC_CTRL__LBSI_MODE_MASK |
1618 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1619 UVD_CGC_CTRL__WCB_MODE_MASK |
1620 UVD_CGC_CTRL__VCPU_MODE_MASK |
1621 UVD_CGC_CTRL__JPEG_MODE_MASK |
1622 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1623 UVD_CGC_CTRL__SCPU_MODE_MASK);
1624 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1625 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1626 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1627 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1628 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1629 data1 |= suvd_flags;
1630
1631 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1632 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1633 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1634 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1635}
1636
1637static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1638{
1639 uint32_t data, data1, cgc_flags, suvd_flags;
1640
1641 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1642 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1643
1644 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1645 UVD_CGC_GATE__UDEC_MASK |
1646 UVD_CGC_GATE__MPEG2_MASK |
1647 UVD_CGC_GATE__RBC_MASK |
1648 UVD_CGC_GATE__LMI_MC_MASK |
1649 UVD_CGC_GATE__IDCT_MASK |
1650 UVD_CGC_GATE__MPRD_MASK |
1651 UVD_CGC_GATE__MPC_MASK |
1652 UVD_CGC_GATE__LBSI_MASK |
1653 UVD_CGC_GATE__LRBBM_MASK |
1654 UVD_CGC_GATE__UDEC_RE_MASK |
1655 UVD_CGC_GATE__UDEC_CM_MASK |
1656 UVD_CGC_GATE__UDEC_IT_MASK |
1657 UVD_CGC_GATE__UDEC_DB_MASK |
1658 UVD_CGC_GATE__UDEC_MP_MASK |
1659 UVD_CGC_GATE__WCB_MASK |
1660 UVD_CGC_GATE__VCPU_MASK |
1661 UVD_CGC_GATE__SCPU_MASK |
1662 UVD_CGC_GATE__JPEG_MASK |
1663 UVD_CGC_GATE__JPEG2_MASK;
1664
1665 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1666 UVD_SUVD_CGC_GATE__SIT_MASK |
1667 UVD_SUVD_CGC_GATE__SMP_MASK |
1668 UVD_SUVD_CGC_GATE__SCM_MASK |
1669 UVD_SUVD_CGC_GATE__SDB_MASK;
1670
1671 data |= cgc_flags;
1672 data1 |= suvd_flags;
1673
1674 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1675 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1676}
1677
1678static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1679{
1680 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1681
1682 if (enable)
1683 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1684 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1685 else
1686 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1687 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688
1689 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1690}
1691
1692
1693static int uvd_v7_0_set_clockgating_state(void *handle,
1694 enum amd_clockgating_state state)
1695{
1696 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697 bool enable = (state == AMD_CG_STATE_GATE);
1698
1699 uvd_v7_0_set_bypass_mode(adev, enable);
1700
1701 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1702 return 0;
1703
1704 if (enable) {
1705
1706 uvd_v7_0_set_sw_clock_gating(adev);
1707 } else {
1708
1709 if (uvd_v7_0_wait_for_idle(handle))
1710 return -EBUSY;
1711
1712
1713
1714 }
1715
1716 return 0;
1717}
1718
1719static int uvd_v7_0_set_powergating_state(void *handle,
1720 enum amd_powergating_state state)
1721{
1722
1723
1724
1725
1726
1727
1728
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730
1731 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1732 return 0;
1733
1734 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1735
1736 if (state == AMD_PG_STATE_GATE) {
1737 uvd_v7_0_stop(adev);
1738 return 0;
1739 } else {
1740 return uvd_v7_0_start(adev);
1741 }
1742}
1743#endif
1744
1745static int uvd_v7_0_set_clockgating_state(void *handle,
1746 enum amd_clockgating_state state)
1747{
1748
1749 return 0;
1750}
1751
1752const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1753 .name = "uvd_v7_0",
1754 .early_init = uvd_v7_0_early_init,
1755 .late_init = NULL,
1756 .sw_init = uvd_v7_0_sw_init,
1757 .sw_fini = uvd_v7_0_sw_fini,
1758 .hw_init = uvd_v7_0_hw_init,
1759 .hw_fini = uvd_v7_0_hw_fini,
1760 .suspend = uvd_v7_0_suspend,
1761 .resume = uvd_v7_0_resume,
1762 .is_idle = NULL ,
1763 .wait_for_idle = NULL ,
1764 .check_soft_reset = NULL ,
1765 .pre_soft_reset = NULL ,
1766 .soft_reset = NULL ,
1767 .post_soft_reset = NULL ,
1768 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1769 .set_powergating_state = NULL ,
1770};
1771
1772static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1773 .type = AMDGPU_RING_TYPE_UVD,
1774 .align_mask = 0xf,
1775 .support_64bit_ptrs = false,
1776 .no_user_fence = true,
1777 .vmhub = AMDGPU_MMHUB_0,
1778 .get_rptr = uvd_v7_0_ring_get_rptr,
1779 .get_wptr = uvd_v7_0_ring_get_wptr,
1780 .set_wptr = uvd_v7_0_ring_set_wptr,
1781 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1782 .emit_frame_size =
1783 6 +
1784 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1785 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1786 8 +
1787 14 + 14,
1788 .emit_ib_size = 8,
1789 .emit_ib = uvd_v7_0_ring_emit_ib,
1790 .emit_fence = uvd_v7_0_ring_emit_fence,
1791 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1792 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1793 .test_ring = uvd_v7_0_ring_test_ring,
1794 .test_ib = amdgpu_uvd_ring_test_ib,
1795 .insert_nop = uvd_v7_0_ring_insert_nop,
1796 .pad_ib = amdgpu_ring_generic_pad_ib,
1797 .begin_use = amdgpu_uvd_ring_begin_use,
1798 .end_use = amdgpu_uvd_ring_end_use,
1799 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1800 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1801 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1802};
1803
1804static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1805 .type = AMDGPU_RING_TYPE_UVD_ENC,
1806 .align_mask = 0x3f,
1807 .nop = HEVC_ENC_CMD_NO_OP,
1808 .support_64bit_ptrs = false,
1809 .no_user_fence = true,
1810 .vmhub = AMDGPU_MMHUB_0,
1811 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1812 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1813 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1814 .emit_frame_size =
1815 3 + 3 +
1816 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1817 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1818 4 +
1819 5 + 5 +
1820 1,
1821 .emit_ib_size = 5,
1822 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1823 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1824 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1825 .test_ring = uvd_v7_0_enc_ring_test_ring,
1826 .test_ib = uvd_v7_0_enc_ring_test_ib,
1827 .insert_nop = amdgpu_ring_insert_nop,
1828 .insert_end = uvd_v7_0_enc_ring_insert_end,
1829 .pad_ib = amdgpu_ring_generic_pad_ib,
1830 .begin_use = amdgpu_uvd_ring_begin_use,
1831 .end_use = amdgpu_uvd_ring_end_use,
1832 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1833 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1834 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1838{
1839 int i;
1840
1841 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1842 if (adev->uvd.harvest_config & (1 << i))
1843 continue;
1844 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1845 adev->uvd.inst[i].ring.me = i;
1846 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1847 }
1848}
1849
1850static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1851{
1852 int i, j;
1853
1854 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1855 if (adev->uvd.harvest_config & (1 << j))
1856 continue;
1857 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1858 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1859 adev->uvd.inst[j].ring_enc[i].me = j;
1860 }
1861
1862 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1863 }
1864}
1865
1866static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1867 .set = uvd_v7_0_set_interrupt_state,
1868 .process = uvd_v7_0_process_interrupt,
1869};
1870
1871static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1872{
1873 int i;
1874
1875 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1876 if (adev->uvd.harvest_config & (1 << i))
1877 continue;
1878 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1879 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1880 }
1881}
1882
1883const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1884{
1885 .type = AMD_IP_BLOCK_TYPE_UVD,
1886 .major = 7,
1887 .minor = 0,
1888 .rev = 0,
1889 .funcs = &uvd_v7_0_ip_funcs,
1890};
1891