1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56
57
58
59
60
61
62
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68}
69
70
71
72
73
74
75
76
77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
84
85
86
87
88
89
90
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
100
101
102
103
104
105
106
107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
114
115
116
117
118
119
120
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
131
132
133
134
135
136
137
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143}
144
145
146
147
148
149
150
151
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
164
165
166
167
168
169
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196}
197
198
199
200
201
202
203
204
205
206
207
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct dma_fence **fence)
210{
211 const unsigned ib_size_dw = 16;
212 struct amdgpu_job *job;
213 struct amdgpu_ib *ib;
214 struct dma_fence *f = NULL;
215 uint64_t dummy;
216 int i, r;
217
218 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
219 if (r)
220 return r;
221
222 ib = &job->ibs[0];
223 dummy = ib->gpu_addr + 1024;
224
225 ib->length_dw = 0;
226 ib->ptr[ib->length_dw++] = 0x00000018;
227 ib->ptr[ib->length_dw++] = 0x00000001;
228 ib->ptr[ib->length_dw++] = handle;
229 ib->ptr[ib->length_dw++] = 0x00010000;
230 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
231 ib->ptr[ib->length_dw++] = dummy;
232
233 ib->ptr[ib->length_dw++] = 0x00000014;
234 ib->ptr[ib->length_dw++] = 0x00000002;
235 ib->ptr[ib->length_dw++] = 0x0000001c;
236 ib->ptr[ib->length_dw++] = 0x00000001;
237 ib->ptr[ib->length_dw++] = 0x00000000;
238
239 ib->ptr[ib->length_dw++] = 0x00000008;
240 ib->ptr[ib->length_dw++] = 0x08000001;
241
242 for (i = ib->length_dw; i < ib_size_dw; ++i)
243 ib->ptr[i] = 0x0;
244
245 r = amdgpu_job_submit_direct(job, ring, &f);
246 if (r)
247 goto err;
248
249 if (fence)
250 *fence = dma_fence_get(f);
251 dma_fence_put(f);
252 return 0;
253
254err:
255 amdgpu_job_free(job);
256 return r;
257}
258
259
260
261
262
263
264
265
266
267
268
269static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
270 uint32_t handle,
271 struct dma_fence **fence)
272{
273 const unsigned ib_size_dw = 16;
274 struct amdgpu_job *job;
275 struct amdgpu_ib *ib;
276 struct dma_fence *f = NULL;
277 uint64_t dummy;
278 int i, r;
279
280 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
281 if (r)
282 return r;
283
284 ib = &job->ibs[0];
285 dummy = ib->gpu_addr + 1024;
286
287 ib->length_dw = 0;
288 ib->ptr[ib->length_dw++] = 0x00000018;
289 ib->ptr[ib->length_dw++] = 0x00000001;
290 ib->ptr[ib->length_dw++] = handle;
291 ib->ptr[ib->length_dw++] = 0x00010000;
292 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
293 ib->ptr[ib->length_dw++] = dummy;
294
295 ib->ptr[ib->length_dw++] = 0x00000014;
296 ib->ptr[ib->length_dw++] = 0x00000002;
297 ib->ptr[ib->length_dw++] = 0x0000001c;
298 ib->ptr[ib->length_dw++] = 0x00000001;
299 ib->ptr[ib->length_dw++] = 0x00000000;
300
301 ib->ptr[ib->length_dw++] = 0x00000008;
302 ib->ptr[ib->length_dw++] = 0x08000002;
303
304 for (i = ib->length_dw; i < ib_size_dw; ++i)
305 ib->ptr[i] = 0x0;
306
307 r = amdgpu_job_submit_direct(job, ring, &f);
308 if (r)
309 goto err;
310
311 if (fence)
312 *fence = dma_fence_get(f);
313 dma_fence_put(f);
314 return 0;
315
316err:
317 amdgpu_job_free(job);
318 return r;
319}
320
321
322
323
324
325
326
327static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
328{
329 struct dma_fence *fence = NULL;
330 long r;
331
332 r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
333 if (r)
334 goto error;
335
336 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
337 if (r)
338 goto error;
339
340 r = dma_fence_wait_timeout(fence, false, timeout);
341 if (r == 0)
342 r = -ETIMEDOUT;
343 else if (r > 0)
344 r = 0;
345
346error:
347 dma_fence_put(fence);
348 return r;
349}
350
351static int uvd_v6_0_early_init(void *handle)
352{
353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
354 adev->uvd.num_uvd_inst = 1;
355
356 if (!(adev->flags & AMD_IS_APU) &&
357 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
358 return -ENOENT;
359
360 uvd_v6_0_set_ring_funcs(adev);
361
362 if (uvd_v6_0_enc_support(adev)) {
363 adev->uvd.num_enc_rings = 2;
364 uvd_v6_0_set_enc_ring_funcs(adev);
365 }
366
367 uvd_v6_0_set_irq_funcs(adev);
368
369 return 0;
370}
371
372static int uvd_v6_0_sw_init(void *handle)
373{
374 struct amdgpu_ring *ring;
375 int i, r;
376 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
377
378
379 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
380 if (r)
381 return r;
382
383
384 if (uvd_v6_0_enc_support(adev)) {
385 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
386 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
387 if (r)
388 return r;
389 }
390 }
391
392 r = amdgpu_uvd_sw_init(adev);
393 if (r)
394 return r;
395
396 if (!uvd_v6_0_enc_support(adev)) {
397 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
398 adev->uvd.inst->ring_enc[i].funcs = NULL;
399
400 adev->uvd.inst->irq.num_types = 1;
401 adev->uvd.num_enc_rings = 0;
402
403 DRM_INFO("UVD ENC is disabled\n");
404 }
405
406 ring = &adev->uvd.inst->ring;
407 sprintf(ring->name, "uvd");
408 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
409 if (r)
410 return r;
411
412 r = amdgpu_uvd_resume(adev);
413 if (r)
414 return r;
415
416 if (uvd_v6_0_enc_support(adev)) {
417 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
418 ring = &adev->uvd.inst->ring_enc[i];
419 sprintf(ring->name, "uvd_enc%d", i);
420 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
421 if (r)
422 return r;
423 }
424 }
425
426 r = amdgpu_uvd_entity_init(adev);
427
428 return r;
429}
430
431static int uvd_v6_0_sw_fini(void *handle)
432{
433 int i, r;
434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
435
436 r = amdgpu_uvd_suspend(adev);
437 if (r)
438 return r;
439
440 if (uvd_v6_0_enc_support(adev)) {
441 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
442 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
443 }
444
445 return amdgpu_uvd_sw_fini(adev);
446}
447
448
449
450
451
452
453
454
455static int uvd_v6_0_hw_init(void *handle)
456{
457 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
458 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
459 uint32_t tmp;
460 int i, r;
461
462 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
463 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
464 uvd_v6_0_enable_mgcg(adev, true);
465
466 r = amdgpu_ring_test_helper(ring);
467 if (r)
468 goto done;
469
470 r = amdgpu_ring_alloc(ring, 10);
471 if (r) {
472 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
473 goto done;
474 }
475
476 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
477 amdgpu_ring_write(ring, tmp);
478 amdgpu_ring_write(ring, 0xFFFFF);
479
480 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
481 amdgpu_ring_write(ring, tmp);
482 amdgpu_ring_write(ring, 0xFFFFF);
483
484 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
485 amdgpu_ring_write(ring, tmp);
486 amdgpu_ring_write(ring, 0xFFFFF);
487
488
489 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
490 amdgpu_ring_write(ring, 0x8);
491
492 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
493 amdgpu_ring_write(ring, 3);
494
495 amdgpu_ring_commit(ring);
496
497 if (uvd_v6_0_enc_support(adev)) {
498 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
499 ring = &adev->uvd.inst->ring_enc[i];
500 r = amdgpu_ring_test_helper(ring);
501 if (r)
502 goto done;
503 }
504 }
505
506done:
507 if (!r) {
508 if (uvd_v6_0_enc_support(adev))
509 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
510 else
511 DRM_INFO("UVD initialized successfully.\n");
512 }
513
514 return r;
515}
516
517
518
519
520
521
522
523
524static int uvd_v6_0_hw_fini(void *handle)
525{
526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
527 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
528
529 if (RREG32(mmUVD_STATUS) != 0)
530 uvd_v6_0_stop(adev);
531
532 ring->sched.ready = false;
533
534 return 0;
535}
536
537static int uvd_v6_0_suspend(void *handle)
538{
539 int r;
540 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
541
542 r = uvd_v6_0_hw_fini(adev);
543 if (r)
544 return r;
545
546 return amdgpu_uvd_suspend(adev);
547}
548
549static int uvd_v6_0_resume(void *handle)
550{
551 int r;
552 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553
554 r = amdgpu_uvd_resume(adev);
555 if (r)
556 return r;
557
558 return uvd_v6_0_hw_init(adev);
559}
560
561
562
563
564
565
566
567
568static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
569{
570 uint64_t offset;
571 uint32_t size;
572
573
574 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
575 lower_32_bits(adev->uvd.inst->gpu_addr));
576 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
577 upper_32_bits(adev->uvd.inst->gpu_addr));
578
579 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
580 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
581 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
582 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
583
584 offset += size;
585 size = AMDGPU_UVD_HEAP_SIZE;
586 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
587 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
588
589 offset += size;
590 size = AMDGPU_UVD_STACK_SIZE +
591 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
592 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
593 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
594
595 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
596 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
597 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
598
599 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
600}
601
602#if 0
603static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
604 bool enable)
605{
606 u32 data, data1;
607
608 data = RREG32(mmUVD_CGC_GATE);
609 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
610 if (enable) {
611 data |= UVD_CGC_GATE__SYS_MASK |
612 UVD_CGC_GATE__UDEC_MASK |
613 UVD_CGC_GATE__MPEG2_MASK |
614 UVD_CGC_GATE__RBC_MASK |
615 UVD_CGC_GATE__LMI_MC_MASK |
616 UVD_CGC_GATE__IDCT_MASK |
617 UVD_CGC_GATE__MPRD_MASK |
618 UVD_CGC_GATE__MPC_MASK |
619 UVD_CGC_GATE__LBSI_MASK |
620 UVD_CGC_GATE__LRBBM_MASK |
621 UVD_CGC_GATE__UDEC_RE_MASK |
622 UVD_CGC_GATE__UDEC_CM_MASK |
623 UVD_CGC_GATE__UDEC_IT_MASK |
624 UVD_CGC_GATE__UDEC_DB_MASK |
625 UVD_CGC_GATE__UDEC_MP_MASK |
626 UVD_CGC_GATE__WCB_MASK |
627 UVD_CGC_GATE__VCPU_MASK |
628 UVD_CGC_GATE__SCPU_MASK;
629 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
630 UVD_SUVD_CGC_GATE__SIT_MASK |
631 UVD_SUVD_CGC_GATE__SMP_MASK |
632 UVD_SUVD_CGC_GATE__SCM_MASK |
633 UVD_SUVD_CGC_GATE__SDB_MASK |
634 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
635 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
636 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
637 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
638 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
639 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
640 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
641 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
642 } else {
643 data &= ~(UVD_CGC_GATE__SYS_MASK |
644 UVD_CGC_GATE__UDEC_MASK |
645 UVD_CGC_GATE__MPEG2_MASK |
646 UVD_CGC_GATE__RBC_MASK |
647 UVD_CGC_GATE__LMI_MC_MASK |
648 UVD_CGC_GATE__LMI_UMC_MASK |
649 UVD_CGC_GATE__IDCT_MASK |
650 UVD_CGC_GATE__MPRD_MASK |
651 UVD_CGC_GATE__MPC_MASK |
652 UVD_CGC_GATE__LBSI_MASK |
653 UVD_CGC_GATE__LRBBM_MASK |
654 UVD_CGC_GATE__UDEC_RE_MASK |
655 UVD_CGC_GATE__UDEC_CM_MASK |
656 UVD_CGC_GATE__UDEC_IT_MASK |
657 UVD_CGC_GATE__UDEC_DB_MASK |
658 UVD_CGC_GATE__UDEC_MP_MASK |
659 UVD_CGC_GATE__WCB_MASK |
660 UVD_CGC_GATE__VCPU_MASK |
661 UVD_CGC_GATE__SCPU_MASK);
662 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
663 UVD_SUVD_CGC_GATE__SIT_MASK |
664 UVD_SUVD_CGC_GATE__SMP_MASK |
665 UVD_SUVD_CGC_GATE__SCM_MASK |
666 UVD_SUVD_CGC_GATE__SDB_MASK |
667 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
668 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
669 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
670 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
671 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
672 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
673 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
674 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
675 }
676 WREG32(mmUVD_CGC_GATE, data);
677 WREG32(mmUVD_SUVD_CGC_GATE, data1);
678}
679#endif
680
681
682
683
684
685
686
687
688static int uvd_v6_0_start(struct amdgpu_device *adev)
689{
690 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
691 uint32_t rb_bufsz, tmp;
692 uint32_t lmi_swap_cntl;
693 uint32_t mp_swap_cntl;
694 int i, j, r;
695
696
697 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
698
699
700 lmi_swap_cntl = 0;
701 mp_swap_cntl = 0;
702
703 uvd_v6_0_mc_resume(adev);
704
705
706 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
707
708
709 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
710 mdelay(1);
711
712
713 WREG32(mmUVD_SOFT_RESET,
714 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
715 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
716 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
717 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
718 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
719 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
720 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
721 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
722 mdelay(5);
723
724
725 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
726 mdelay(5);
727
728
729 WREG32(mmUVD_LMI_CTRL,
730 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
731 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
732 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
733 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
734 UVD_LMI_CTRL__REQ_MODE_MASK |
735 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
736
737#ifdef __BIG_ENDIAN
738
739 lmi_swap_cntl = 0xa;
740 mp_swap_cntl = 0;
741#endif
742 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
743 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
744
745 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
746 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
747 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
748 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
749 WREG32(mmUVD_MPC_SET_ALU, 0);
750 WREG32(mmUVD_MPC_SET_MUX, 0x88);
751
752
753 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
754 mdelay(5);
755
756
757 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
758
759
760 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
761
762
763 WREG32(mmUVD_SOFT_RESET, 0);
764 mdelay(10);
765
766 for (i = 0; i < 10; ++i) {
767 uint32_t status;
768
769 for (j = 0; j < 100; ++j) {
770 status = RREG32(mmUVD_STATUS);
771 if (status & 2)
772 break;
773 mdelay(10);
774 }
775 r = 0;
776 if (status & 2)
777 break;
778
779 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
780 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
781 mdelay(10);
782 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
783 mdelay(10);
784 r = -1;
785 }
786
787 if (r) {
788 DRM_ERROR("UVD not responding, giving up!!!\n");
789 return r;
790 }
791
792 WREG32_P(mmUVD_MASTINT_EN,
793 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
794 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
795
796
797 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
798
799
800 rb_bufsz = order_base_2(ring->ring_size);
801 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
802 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
803 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
804 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
805 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
806 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
807 WREG32(mmUVD_RBC_RB_CNTL, tmp);
808
809
810 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
811
812
813 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
814
815
816 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
817 lower_32_bits(ring->gpu_addr));
818 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
819 upper_32_bits(ring->gpu_addr));
820
821
822 WREG32(mmUVD_RBC_RB_RPTR, 0);
823
824 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
825 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
826
827 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
828
829 if (uvd_v6_0_enc_support(adev)) {
830 ring = &adev->uvd.inst->ring_enc[0];
831 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
832 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
833 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
834 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
835 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
836
837 ring = &adev->uvd.inst->ring_enc[1];
838 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
839 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
840 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
841 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
842 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
843 }
844
845 return 0;
846}
847
848
849
850
851
852
853
854
855static void uvd_v6_0_stop(struct amdgpu_device *adev)
856{
857
858 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
859
860
861 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
862 mdelay(1);
863
864
865 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
866 mdelay(5);
867
868
869 WREG32(mmUVD_VCPU_CNTL, 0x0);
870
871
872 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
873
874 WREG32(mmUVD_STATUS, 0);
875}
876
877
878
879
880
881
882
883
884
885static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
886 unsigned flags)
887{
888 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
889
890 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
891 amdgpu_ring_write(ring, seq);
892 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
893 amdgpu_ring_write(ring, addr & 0xffffffff);
894 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
895 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
896 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
897 amdgpu_ring_write(ring, 0);
898
899 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
900 amdgpu_ring_write(ring, 0);
901 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
902 amdgpu_ring_write(ring, 0);
903 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
904 amdgpu_ring_write(ring, 2);
905}
906
907
908
909
910
911
912
913
914
915static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
916 u64 seq, unsigned flags)
917{
918 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
919
920 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
921 amdgpu_ring_write(ring, addr);
922 amdgpu_ring_write(ring, upper_32_bits(addr));
923 amdgpu_ring_write(ring, seq);
924 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
925}
926
927
928
929
930
931
932static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
933{
934
935}
936
937
938
939
940
941
942
943
944static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
945{
946 struct amdgpu_device *adev = ring->adev;
947 uint32_t tmp = 0;
948 unsigned i;
949 int r;
950
951 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
952 r = amdgpu_ring_alloc(ring, 3);
953 if (r)
954 return r;
955
956 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
957 amdgpu_ring_write(ring, 0xDEADBEEF);
958 amdgpu_ring_commit(ring);
959 for (i = 0; i < adev->usec_timeout; i++) {
960 tmp = RREG32(mmUVD_CONTEXT_ID);
961 if (tmp == 0xDEADBEEF)
962 break;
963 udelay(1);
964 }
965
966 if (i >= adev->usec_timeout)
967 r = -ETIMEDOUT;
968
969 return r;
970}
971
972
973
974
975
976
977
978
979
980static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
981 struct amdgpu_job *job,
982 struct amdgpu_ib *ib,
983 uint32_t flags)
984{
985 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
986
987 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
988 amdgpu_ring_write(ring, vmid);
989
990 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
991 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
992 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
993 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
994 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
995 amdgpu_ring_write(ring, ib->length_dw);
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1007 struct amdgpu_job *job,
1008 struct amdgpu_ib *ib,
1009 uint32_t flags)
1010{
1011 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1012
1013 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1014 amdgpu_ring_write(ring, vmid);
1015 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1016 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1017 amdgpu_ring_write(ring, ib->length_dw);
1018}
1019
1020static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1021 uint32_t reg, uint32_t val)
1022{
1023 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1024 amdgpu_ring_write(ring, reg << 2);
1025 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1026 amdgpu_ring_write(ring, val);
1027 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1028 amdgpu_ring_write(ring, 0x8);
1029}
1030
1031static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1032 unsigned vmid, uint64_t pd_addr)
1033{
1034 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1035
1036 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1037 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1038 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1039 amdgpu_ring_write(ring, 0);
1040 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1041 amdgpu_ring_write(ring, 1 << vmid);
1042 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1043 amdgpu_ring_write(ring, 0xC);
1044}
1045
1046static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1047{
1048 uint32_t seq = ring->fence_drv.sync_seq;
1049 uint64_t addr = ring->fence_drv.gpu_addr;
1050
1051 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1052 amdgpu_ring_write(ring, lower_32_bits(addr));
1053 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1054 amdgpu_ring_write(ring, upper_32_bits(addr));
1055 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1056 amdgpu_ring_write(ring, 0xffffffff);
1057 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1058 amdgpu_ring_write(ring, seq);
1059 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1060 amdgpu_ring_write(ring, 0xE);
1061}
1062
1063static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1064{
1065 int i;
1066
1067 WARN_ON(ring->wptr % 2 || count % 2);
1068
1069 for (i = 0; i < count / 2; i++) {
1070 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1071 amdgpu_ring_write(ring, 0);
1072 }
1073}
1074
1075static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1076{
1077 uint32_t seq = ring->fence_drv.sync_seq;
1078 uint64_t addr = ring->fence_drv.gpu_addr;
1079
1080 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1081 amdgpu_ring_write(ring, lower_32_bits(addr));
1082 amdgpu_ring_write(ring, upper_32_bits(addr));
1083 amdgpu_ring_write(ring, seq);
1084}
1085
1086static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1087{
1088 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1089}
1090
1091static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1092 unsigned int vmid, uint64_t pd_addr)
1093{
1094 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1095 amdgpu_ring_write(ring, vmid);
1096 amdgpu_ring_write(ring, pd_addr >> 12);
1097
1098 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1099 amdgpu_ring_write(ring, vmid);
1100}
1101
1102static bool uvd_v6_0_is_idle(void *handle)
1103{
1104 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105
1106 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1107}
1108
1109static int uvd_v6_0_wait_for_idle(void *handle)
1110{
1111 unsigned i;
1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1113
1114 for (i = 0; i < adev->usec_timeout; i++) {
1115 if (uvd_v6_0_is_idle(handle))
1116 return 0;
1117 }
1118 return -ETIMEDOUT;
1119}
1120
1121#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1122static bool uvd_v6_0_check_soft_reset(void *handle)
1123{
1124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125 u32 srbm_soft_reset = 0;
1126 u32 tmp = RREG32(mmSRBM_STATUS);
1127
1128 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1129 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1130 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1131 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1132
1133 if (srbm_soft_reset) {
1134 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1135 return true;
1136 } else {
1137 adev->uvd.inst->srbm_soft_reset = 0;
1138 return false;
1139 }
1140}
1141
1142static int uvd_v6_0_pre_soft_reset(void *handle)
1143{
1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145
1146 if (!adev->uvd.inst->srbm_soft_reset)
1147 return 0;
1148
1149 uvd_v6_0_stop(adev);
1150 return 0;
1151}
1152
1153static int uvd_v6_0_soft_reset(void *handle)
1154{
1155 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1156 u32 srbm_soft_reset;
1157
1158 if (!adev->uvd.inst->srbm_soft_reset)
1159 return 0;
1160 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1161
1162 if (srbm_soft_reset) {
1163 u32 tmp;
1164
1165 tmp = RREG32(mmSRBM_SOFT_RESET);
1166 tmp |= srbm_soft_reset;
1167 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1168 WREG32(mmSRBM_SOFT_RESET, tmp);
1169 tmp = RREG32(mmSRBM_SOFT_RESET);
1170
1171 udelay(50);
1172
1173 tmp &= ~srbm_soft_reset;
1174 WREG32(mmSRBM_SOFT_RESET, tmp);
1175 tmp = RREG32(mmSRBM_SOFT_RESET);
1176
1177
1178 udelay(50);
1179 }
1180
1181 return 0;
1182}
1183
1184static int uvd_v6_0_post_soft_reset(void *handle)
1185{
1186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1187
1188 if (!adev->uvd.inst->srbm_soft_reset)
1189 return 0;
1190
1191 mdelay(5);
1192
1193 return uvd_v6_0_start(adev);
1194}
1195
1196static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1197 struct amdgpu_irq_src *source,
1198 unsigned type,
1199 enum amdgpu_interrupt_state state)
1200{
1201
1202 return 0;
1203}
1204
1205static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1206 struct amdgpu_irq_src *source,
1207 struct amdgpu_iv_entry *entry)
1208{
1209 bool int_handled = true;
1210 DRM_DEBUG("IH: UVD TRAP\n");
1211
1212 switch (entry->src_id) {
1213 case 124:
1214 amdgpu_fence_process(&adev->uvd.inst->ring);
1215 break;
1216 case 119:
1217 if (likely(uvd_v6_0_enc_support(adev)))
1218 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1219 else
1220 int_handled = false;
1221 break;
1222 case 120:
1223 if (likely(uvd_v6_0_enc_support(adev)))
1224 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1225 else
1226 int_handled = false;
1227 break;
1228 }
1229
1230 if (false == int_handled)
1231 DRM_ERROR("Unhandled interrupt: %d %d\n",
1232 entry->src_id, entry->src_data[0]);
1233
1234 return 0;
1235}
1236
1237static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1238{
1239 uint32_t data1, data3;
1240
1241 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1242 data3 = RREG32(mmUVD_CGC_GATE);
1243
1244 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1245 UVD_SUVD_CGC_GATE__SIT_MASK |
1246 UVD_SUVD_CGC_GATE__SMP_MASK |
1247 UVD_SUVD_CGC_GATE__SCM_MASK |
1248 UVD_SUVD_CGC_GATE__SDB_MASK |
1249 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1250 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1251 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1252 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1253 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1254 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1255 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1256 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1257
1258 if (enable) {
1259 data3 |= (UVD_CGC_GATE__SYS_MASK |
1260 UVD_CGC_GATE__UDEC_MASK |
1261 UVD_CGC_GATE__MPEG2_MASK |
1262 UVD_CGC_GATE__RBC_MASK |
1263 UVD_CGC_GATE__LMI_MC_MASK |
1264 UVD_CGC_GATE__LMI_UMC_MASK |
1265 UVD_CGC_GATE__IDCT_MASK |
1266 UVD_CGC_GATE__MPRD_MASK |
1267 UVD_CGC_GATE__MPC_MASK |
1268 UVD_CGC_GATE__LBSI_MASK |
1269 UVD_CGC_GATE__LRBBM_MASK |
1270 UVD_CGC_GATE__UDEC_RE_MASK |
1271 UVD_CGC_GATE__UDEC_CM_MASK |
1272 UVD_CGC_GATE__UDEC_IT_MASK |
1273 UVD_CGC_GATE__UDEC_DB_MASK |
1274 UVD_CGC_GATE__UDEC_MP_MASK |
1275 UVD_CGC_GATE__WCB_MASK |
1276 UVD_CGC_GATE__JPEG_MASK |
1277 UVD_CGC_GATE__SCPU_MASK |
1278 UVD_CGC_GATE__JPEG2_MASK);
1279
1280 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1281 data3 |= UVD_CGC_GATE__VCPU_MASK;
1282
1283 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1284 } else {
1285 data3 = 0;
1286 }
1287
1288 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1289 WREG32(mmUVD_CGC_GATE, data3);
1290}
1291
1292static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1293{
1294 uint32_t data, data2;
1295
1296 data = RREG32(mmUVD_CGC_CTRL);
1297 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1298
1299
1300 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1301 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1302
1303
1304 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1305 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1306 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1307
1308 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1309 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1310 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1311 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1312 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1313 UVD_CGC_CTRL__SYS_MODE_MASK |
1314 UVD_CGC_CTRL__UDEC_MODE_MASK |
1315 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1316 UVD_CGC_CTRL__REGS_MODE_MASK |
1317 UVD_CGC_CTRL__RBC_MODE_MASK |
1318 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1319 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1320 UVD_CGC_CTRL__IDCT_MODE_MASK |
1321 UVD_CGC_CTRL__MPRD_MODE_MASK |
1322 UVD_CGC_CTRL__MPC_MODE_MASK |
1323 UVD_CGC_CTRL__LBSI_MODE_MASK |
1324 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1325 UVD_CGC_CTRL__WCB_MODE_MASK |
1326 UVD_CGC_CTRL__VCPU_MODE_MASK |
1327 UVD_CGC_CTRL__JPEG_MODE_MASK |
1328 UVD_CGC_CTRL__SCPU_MODE_MASK |
1329 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1330 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1331 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1332 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1333 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1334 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1335
1336 WREG32(mmUVD_CGC_CTRL, data);
1337 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1338}
1339
1340#if 0
1341static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1342{
1343 uint32_t data, data1, cgc_flags, suvd_flags;
1344
1345 data = RREG32(mmUVD_CGC_GATE);
1346 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1347
1348 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1349 UVD_CGC_GATE__UDEC_MASK |
1350 UVD_CGC_GATE__MPEG2_MASK |
1351 UVD_CGC_GATE__RBC_MASK |
1352 UVD_CGC_GATE__LMI_MC_MASK |
1353 UVD_CGC_GATE__IDCT_MASK |
1354 UVD_CGC_GATE__MPRD_MASK |
1355 UVD_CGC_GATE__MPC_MASK |
1356 UVD_CGC_GATE__LBSI_MASK |
1357 UVD_CGC_GATE__LRBBM_MASK |
1358 UVD_CGC_GATE__UDEC_RE_MASK |
1359 UVD_CGC_GATE__UDEC_CM_MASK |
1360 UVD_CGC_GATE__UDEC_IT_MASK |
1361 UVD_CGC_GATE__UDEC_DB_MASK |
1362 UVD_CGC_GATE__UDEC_MP_MASK |
1363 UVD_CGC_GATE__WCB_MASK |
1364 UVD_CGC_GATE__VCPU_MASK |
1365 UVD_CGC_GATE__SCPU_MASK |
1366 UVD_CGC_GATE__JPEG_MASK |
1367 UVD_CGC_GATE__JPEG2_MASK;
1368
1369 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1370 UVD_SUVD_CGC_GATE__SIT_MASK |
1371 UVD_SUVD_CGC_GATE__SMP_MASK |
1372 UVD_SUVD_CGC_GATE__SCM_MASK |
1373 UVD_SUVD_CGC_GATE__SDB_MASK;
1374
1375 data |= cgc_flags;
1376 data1 |= suvd_flags;
1377
1378 WREG32(mmUVD_CGC_GATE, data);
1379 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1380}
1381#endif
1382
1383static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1384 bool enable)
1385{
1386 u32 orig, data;
1387
1388 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1389 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1390 data |= 0xfff;
1391 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1392
1393 orig = data = RREG32(mmUVD_CGC_CTRL);
1394 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1395 if (orig != data)
1396 WREG32(mmUVD_CGC_CTRL, data);
1397 } else {
1398 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1399 data &= ~0xfff;
1400 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1401
1402 orig = data = RREG32(mmUVD_CGC_CTRL);
1403 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1404 if (orig != data)
1405 WREG32(mmUVD_CGC_CTRL, data);
1406 }
1407}
1408
1409static int uvd_v6_0_set_clockgating_state(void *handle,
1410 enum amd_clockgating_state state)
1411{
1412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1413 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1414
1415 if (enable) {
1416
1417 if (uvd_v6_0_wait_for_idle(handle))
1418 return -EBUSY;
1419 uvd_v6_0_enable_clock_gating(adev, true);
1420
1421
1422 } else {
1423
1424 uvd_v6_0_enable_clock_gating(adev, false);
1425 }
1426 uvd_v6_0_set_sw_clock_gating(adev);
1427 return 0;
1428}
1429
1430static int uvd_v6_0_set_powergating_state(void *handle,
1431 enum amd_powergating_state state)
1432{
1433
1434
1435
1436
1437
1438
1439
1440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1441 int ret = 0;
1442
1443 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1444
1445 if (state == AMD_PG_STATE_GATE) {
1446 uvd_v6_0_stop(adev);
1447 } else {
1448 ret = uvd_v6_0_start(adev);
1449 if (ret)
1450 goto out;
1451 }
1452
1453out:
1454 return ret;
1455}
1456
1457static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1458{
1459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1460 int data;
1461
1462 mutex_lock(&adev->pm.mutex);
1463
1464 if (adev->flags & AMD_IS_APU)
1465 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1466 else
1467 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1468
1469 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1470 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1471 goto out;
1472 }
1473
1474
1475 data = RREG32(mmUVD_CGC_CTRL);
1476 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1477 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1478
1479out:
1480 mutex_unlock(&adev->pm.mutex);
1481}
1482
1483static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1484 .name = "uvd_v6_0",
1485 .early_init = uvd_v6_0_early_init,
1486 .late_init = NULL,
1487 .sw_init = uvd_v6_0_sw_init,
1488 .sw_fini = uvd_v6_0_sw_fini,
1489 .hw_init = uvd_v6_0_hw_init,
1490 .hw_fini = uvd_v6_0_hw_fini,
1491 .suspend = uvd_v6_0_suspend,
1492 .resume = uvd_v6_0_resume,
1493 .is_idle = uvd_v6_0_is_idle,
1494 .wait_for_idle = uvd_v6_0_wait_for_idle,
1495 .check_soft_reset = uvd_v6_0_check_soft_reset,
1496 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1497 .soft_reset = uvd_v6_0_soft_reset,
1498 .post_soft_reset = uvd_v6_0_post_soft_reset,
1499 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1500 .set_powergating_state = uvd_v6_0_set_powergating_state,
1501 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1502};
1503
1504static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1505 .type = AMDGPU_RING_TYPE_UVD,
1506 .align_mask = 0xf,
1507 .support_64bit_ptrs = false,
1508 .no_user_fence = true,
1509 .get_rptr = uvd_v6_0_ring_get_rptr,
1510 .get_wptr = uvd_v6_0_ring_get_wptr,
1511 .set_wptr = uvd_v6_0_ring_set_wptr,
1512 .parse_cs = amdgpu_uvd_ring_parse_cs,
1513 .emit_frame_size =
1514 6 +
1515 10 +
1516 14,
1517 .emit_ib_size = 8,
1518 .emit_ib = uvd_v6_0_ring_emit_ib,
1519 .emit_fence = uvd_v6_0_ring_emit_fence,
1520 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1521 .test_ring = uvd_v6_0_ring_test_ring,
1522 .test_ib = amdgpu_uvd_ring_test_ib,
1523 .insert_nop = uvd_v6_0_ring_insert_nop,
1524 .pad_ib = amdgpu_ring_generic_pad_ib,
1525 .begin_use = amdgpu_uvd_ring_begin_use,
1526 .end_use = amdgpu_uvd_ring_end_use,
1527 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1528};
1529
1530static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1531 .type = AMDGPU_RING_TYPE_UVD,
1532 .align_mask = 0xf,
1533 .support_64bit_ptrs = false,
1534 .no_user_fence = true,
1535 .get_rptr = uvd_v6_0_ring_get_rptr,
1536 .get_wptr = uvd_v6_0_ring_get_wptr,
1537 .set_wptr = uvd_v6_0_ring_set_wptr,
1538 .emit_frame_size =
1539 6 +
1540 10 +
1541 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 +
1542 14 + 14,
1543 .emit_ib_size = 8,
1544 .emit_ib = uvd_v6_0_ring_emit_ib,
1545 .emit_fence = uvd_v6_0_ring_emit_fence,
1546 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1547 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1548 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1549 .test_ring = uvd_v6_0_ring_test_ring,
1550 .test_ib = amdgpu_uvd_ring_test_ib,
1551 .insert_nop = uvd_v6_0_ring_insert_nop,
1552 .pad_ib = amdgpu_ring_generic_pad_ib,
1553 .begin_use = amdgpu_uvd_ring_begin_use,
1554 .end_use = amdgpu_uvd_ring_end_use,
1555 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1556};
1557
1558static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1559 .type = AMDGPU_RING_TYPE_UVD_ENC,
1560 .align_mask = 0x3f,
1561 .nop = HEVC_ENC_CMD_NO_OP,
1562 .support_64bit_ptrs = false,
1563 .no_user_fence = true,
1564 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1565 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1566 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1567 .emit_frame_size =
1568 4 +
1569 5 +
1570 5 + 5 +
1571 1,
1572 .emit_ib_size = 5,
1573 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1574 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1575 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1576 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1577 .test_ring = uvd_v6_0_enc_ring_test_ring,
1578 .test_ib = uvd_v6_0_enc_ring_test_ib,
1579 .insert_nop = amdgpu_ring_insert_nop,
1580 .insert_end = uvd_v6_0_enc_ring_insert_end,
1581 .pad_ib = amdgpu_ring_generic_pad_ib,
1582 .begin_use = amdgpu_uvd_ring_begin_use,
1583 .end_use = amdgpu_uvd_ring_end_use,
1584};
1585
1586static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1587{
1588 if (adev->asic_type >= CHIP_POLARIS10) {
1589 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1590 DRM_INFO("UVD is enabled in VM mode\n");
1591 } else {
1592 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1593 DRM_INFO("UVD is enabled in physical mode\n");
1594 }
1595}
1596
1597static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1598{
1599 int i;
1600
1601 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1602 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1603
1604 DRM_INFO("UVD ENC is enabled in VM mode\n");
1605}
1606
1607static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1608 .set = uvd_v6_0_set_interrupt_state,
1609 .process = uvd_v6_0_process_interrupt,
1610};
1611
1612static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1613{
1614 if (uvd_v6_0_enc_support(adev))
1615 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1616 else
1617 adev->uvd.inst->irq.num_types = 1;
1618
1619 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1620}
1621
1622const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1623{
1624 .type = AMD_IP_BLOCK_TYPE_UVD,
1625 .major = 6,
1626 .minor = 0,
1627 .rev = 0,
1628 .funcs = &uvd_v6_0_ip_funcs,
1629};
1630
1631const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1632{
1633 .type = AMD_IP_BLOCK_TYPE_UVD,
1634 .major = 6,
1635 .minor = 2,
1636 .rev = 0,
1637 .funcs = &uvd_v6_0_ip_funcs,
1638};
1639
1640const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1641{
1642 .type = AMD_IP_BLOCK_TYPE_UVD,
1643 .major = 6,
1644 .minor = 3,
1645 .rev = 0,
1646 .funcs = &uvd_v6_0_ip_funcs,
1647};
1648