1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_common.h"
31#include "mmsch_v1_0.h"
32
33#include "uvd/uvd_7_0_offset.h"
34#include "uvd/uvd_7_0_sh_mask.h"
35#include "vce/vce_4_0_offset.h"
36#include "vce/vce_4_0_default.h"
37#include "vce/vce_4_0_sh_mask.h"
38#include "nbif/nbif_6_1_offset.h"
39#include "mmhub/mmhub_1_0_offset.h"
40#include "mmhub/mmhub_1_0_sh_mask.h"
41#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
42
43#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
44#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
45
46#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
48
49#define UVD7_MAX_HW_INSTANCES_VEGA20 2
50
51static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
52static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
53static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
54static int uvd_v7_0_start(struct amdgpu_device *adev);
55static void uvd_v7_0_stop(struct amdgpu_device *adev);
56static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
57
58static int amdgpu_ih_clientid_uvds[] = {
59 SOC15_IH_CLIENTID_UVD,
60 SOC15_IH_CLIENTID_UVD1
61};
62
63
64
65
66
67
68
69
70static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
71{
72 struct amdgpu_device *adev = ring->adev;
73
74 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
75}
76
77
78
79
80
81
82
83
84static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
85{
86 struct amdgpu_device *adev = ring->adev;
87
88 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
89 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
90 else
91 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
92}
93
94
95
96
97
98
99
100
101static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
102{
103 struct amdgpu_device *adev = ring->adev;
104
105 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
106}
107
108
109
110
111
112
113
114
115static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
116{
117 struct amdgpu_device *adev = ring->adev;
118
119 if (ring->use_doorbell)
120 return adev->wb.wb[ring->wptr_offs];
121
122 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
123 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
124 else
125 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
126}
127
128
129
130
131
132
133
134
135static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
136{
137 struct amdgpu_device *adev = ring->adev;
138
139 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
140}
141
142
143
144
145
146
147
148
149static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
150{
151 struct amdgpu_device *adev = ring->adev;
152
153 if (ring->use_doorbell) {
154
155 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
156 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
157 return;
158 }
159
160 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
161 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
162 lower_32_bits(ring->wptr));
163 else
164 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
165 lower_32_bits(ring->wptr));
166}
167
168
169
170
171
172
173
174static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
175{
176 struct amdgpu_device *adev = ring->adev;
177 uint32_t rptr;
178 unsigned i;
179 int r;
180
181 if (amdgpu_sriov_vf(adev))
182 return 0;
183
184 r = amdgpu_ring_alloc(ring, 16);
185 if (r)
186 return r;
187
188 rptr = amdgpu_ring_get_rptr(ring);
189
190 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
191 amdgpu_ring_commit(ring);
192
193 for (i = 0; i < adev->usec_timeout; i++) {
194 if (amdgpu_ring_get_rptr(ring) != rptr)
195 break;
196 udelay(1);
197 }
198
199 if (i >= adev->usec_timeout)
200 r = -ETIMEDOUT;
201
202 return r;
203}
204
205
206
207
208
209
210
211
212
213
214
215static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
216 struct amdgpu_bo *bo,
217 struct dma_fence **fence)
218{
219 const unsigned ib_size_dw = 16;
220 struct amdgpu_job *job;
221 struct amdgpu_ib *ib;
222 struct dma_fence *f = NULL;
223 uint64_t addr;
224 int i, r;
225
226 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
227 AMDGPU_IB_POOL_DIRECT, &job);
228 if (r)
229 return r;
230
231 ib = &job->ibs[0];
232 addr = amdgpu_bo_gpu_offset(bo);
233
234 ib->length_dw = 0;
235 ib->ptr[ib->length_dw++] = 0x00000018;
236 ib->ptr[ib->length_dw++] = 0x00000001;
237 ib->ptr[ib->length_dw++] = handle;
238 ib->ptr[ib->length_dw++] = 0x00000000;
239 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
240 ib->ptr[ib->length_dw++] = addr;
241
242 ib->ptr[ib->length_dw++] = 0x00000014;
243 ib->ptr[ib->length_dw++] = 0x00000002;
244 ib->ptr[ib->length_dw++] = 0x0000001c;
245 ib->ptr[ib->length_dw++] = 0x00000000;
246 ib->ptr[ib->length_dw++] = 0x00000000;
247
248 ib->ptr[ib->length_dw++] = 0x00000008;
249 ib->ptr[ib->length_dw++] = 0x08000001;
250
251 for (i = ib->length_dw; i < ib_size_dw; ++i)
252 ib->ptr[i] = 0x0;
253
254 r = amdgpu_job_submit_direct(job, ring, &f);
255 if (r)
256 goto err;
257
258 if (fence)
259 *fence = dma_fence_get(f);
260 dma_fence_put(f);
261 return 0;
262
263err:
264 amdgpu_job_free(job);
265 return r;
266}
267
268
269
270
271
272
273
274
275
276
277
278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
279 struct amdgpu_bo *bo,
280 struct dma_fence **fence)
281{
282 const unsigned ib_size_dw = 16;
283 struct amdgpu_job *job;
284 struct amdgpu_ib *ib;
285 struct dma_fence *f = NULL;
286 uint64_t addr;
287 int i, r;
288
289 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
290 AMDGPU_IB_POOL_DIRECT, &job);
291 if (r)
292 return r;
293
294 ib = &job->ibs[0];
295 addr = amdgpu_bo_gpu_offset(bo);
296
297 ib->length_dw = 0;
298 ib->ptr[ib->length_dw++] = 0x00000018;
299 ib->ptr[ib->length_dw++] = 0x00000001;
300 ib->ptr[ib->length_dw++] = handle;
301 ib->ptr[ib->length_dw++] = 0x00000000;
302 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
303 ib->ptr[ib->length_dw++] = addr;
304
305 ib->ptr[ib->length_dw++] = 0x00000014;
306 ib->ptr[ib->length_dw++] = 0x00000002;
307 ib->ptr[ib->length_dw++] = 0x0000001c;
308 ib->ptr[ib->length_dw++] = 0x00000000;
309 ib->ptr[ib->length_dw++] = 0x00000000;
310
311 ib->ptr[ib->length_dw++] = 0x00000008;
312 ib->ptr[ib->length_dw++] = 0x08000002;
313
314 for (i = ib->length_dw; i < ib_size_dw; ++i)
315 ib->ptr[i] = 0x0;
316
317 r = amdgpu_job_submit_direct(job, ring, &f);
318 if (r)
319 goto err;
320
321 if (fence)
322 *fence = dma_fence_get(f);
323 dma_fence_put(f);
324 return 0;
325
326err:
327 amdgpu_job_free(job);
328 return r;
329}
330
331
332
333
334
335
336
337
338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
339{
340 struct dma_fence *fence = NULL;
341 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
342 long r;
343
344 r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
345 if (r)
346 goto error;
347
348 r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
349 if (r)
350 goto error;
351
352 r = dma_fence_wait_timeout(fence, false, timeout);
353 if (r == 0)
354 r = -ETIMEDOUT;
355 else if (r > 0)
356 r = 0;
357
358error:
359 dma_fence_put(fence);
360 return r;
361}
362
363static int uvd_v7_0_early_init(void *handle)
364{
365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
366
367 if (adev->asic_type == CHIP_VEGA20) {
368 u32 harvest;
369 int i;
370
371 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
373 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
374 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
375 adev->uvd.harvest_config |= 1 << i;
376 }
377 }
378 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
379 AMDGPU_UVD_HARVEST_UVD1))
380
381 return -ENOENT;
382 } else {
383 adev->uvd.num_uvd_inst = 1;
384 }
385
386 if (amdgpu_sriov_vf(adev))
387 adev->uvd.num_enc_rings = 1;
388 else
389 adev->uvd.num_enc_rings = 2;
390 uvd_v7_0_set_ring_funcs(adev);
391 uvd_v7_0_set_enc_ring_funcs(adev);
392 uvd_v7_0_set_irq_funcs(adev);
393
394 return 0;
395}
396
397static int uvd_v7_0_sw_init(void *handle)
398{
399 struct amdgpu_ring *ring;
400
401 int i, j, r;
402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
403
404 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
405 if (adev->uvd.harvest_config & (1 << j))
406 continue;
407
408 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
409 if (r)
410 return r;
411
412
413 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
414 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
415 if (r)
416 return r;
417 }
418 }
419
420 r = amdgpu_uvd_sw_init(adev);
421 if (r)
422 return r;
423
424 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
425 const struct common_firmware_header *hdr;
426 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
427 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
428 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
429 adev->firmware.fw_size +=
430 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
431
432 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
433 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
434 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
435 adev->firmware.fw_size +=
436 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
437 }
438 DRM_INFO("PSP loading UVD firmware\n");
439 }
440
441 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
442 if (adev->uvd.harvest_config & (1 << j))
443 continue;
444 if (!amdgpu_sriov_vf(adev)) {
445 ring = &adev->uvd.inst[j].ring;
446 sprintf(ring->name, "uvd_%d", ring->me);
447 r = amdgpu_ring_init(adev, ring, 512,
448 &adev->uvd.inst[j].irq, 0,
449 AMDGPU_RING_PRIO_DEFAULT, NULL);
450 if (r)
451 return r;
452 }
453
454 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
455 ring = &adev->uvd.inst[j].ring_enc[i];
456 sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
457 if (amdgpu_sriov_vf(adev)) {
458 ring->use_doorbell = true;
459
460
461
462
463 if (i == 0)
464 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
465 else
466 ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
467 }
468 r = amdgpu_ring_init(adev, ring, 512,
469 &adev->uvd.inst[j].irq, 0,
470 AMDGPU_RING_PRIO_DEFAULT, NULL);
471 if (r)
472 return r;
473 }
474 }
475
476 r = amdgpu_uvd_resume(adev);
477 if (r)
478 return r;
479
480 r = amdgpu_uvd_entity_init(adev);
481 if (r)
482 return r;
483
484 r = amdgpu_virt_alloc_mm_table(adev);
485 if (r)
486 return r;
487
488 return r;
489}
490
491static int uvd_v7_0_sw_fini(void *handle)
492{
493 int i, j, r;
494 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
495
496 amdgpu_virt_free_mm_table(adev);
497
498 r = amdgpu_uvd_suspend(adev);
499 if (r)
500 return r;
501
502 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
503 if (adev->uvd.harvest_config & (1 << j))
504 continue;
505 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
506 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
507 }
508 return amdgpu_uvd_sw_fini(adev);
509}
510
511
512
513
514
515
516
517
518static int uvd_v7_0_hw_init(void *handle)
519{
520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
521 struct amdgpu_ring *ring;
522 uint32_t tmp;
523 int i, j, r;
524
525 if (amdgpu_sriov_vf(adev))
526 r = uvd_v7_0_sriov_start(adev);
527 else
528 r = uvd_v7_0_start(adev);
529 if (r)
530 goto done;
531
532 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
533 if (adev->uvd.harvest_config & (1 << j))
534 continue;
535 ring = &adev->uvd.inst[j].ring;
536
537 if (!amdgpu_sriov_vf(adev)) {
538 r = amdgpu_ring_test_helper(ring);
539 if (r)
540 goto done;
541
542 r = amdgpu_ring_alloc(ring, 10);
543 if (r) {
544 DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
545 goto done;
546 }
547
548 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
549 mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
550 amdgpu_ring_write(ring, tmp);
551 amdgpu_ring_write(ring, 0xFFFFF);
552
553 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
554 mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
555 amdgpu_ring_write(ring, tmp);
556 amdgpu_ring_write(ring, 0xFFFFF);
557
558 tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
559 mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
560 amdgpu_ring_write(ring, tmp);
561 amdgpu_ring_write(ring, 0xFFFFF);
562
563
564 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
565 mmUVD_SEMA_TIMEOUT_STATUS), 0));
566 amdgpu_ring_write(ring, 0x8);
567
568 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
569 mmUVD_SEMA_CNTL), 0));
570 amdgpu_ring_write(ring, 3);
571
572 amdgpu_ring_commit(ring);
573 }
574
575 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
576 ring = &adev->uvd.inst[j].ring_enc[i];
577 r = amdgpu_ring_test_helper(ring);
578 if (r)
579 goto done;
580 }
581 }
582done:
583 if (!r)
584 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
585
586 return r;
587}
588
589
590
591
592
593
594
595
596static int uvd_v7_0_hw_fini(void *handle)
597{
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599
600 cancel_delayed_work_sync(&adev->uvd.idle_work);
601
602 if (!amdgpu_sriov_vf(adev))
603 uvd_v7_0_stop(adev);
604 else {
605
606 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
607 }
608
609 return 0;
610}
611
612static int uvd_v7_0_suspend(void *handle)
613{
614 int r;
615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
616
617
618
619
620
621
622
623
624
625
626
627
628 cancel_delayed_work_sync(&adev->uvd.idle_work);
629
630 if (adev->pm.dpm_enabled) {
631 amdgpu_dpm_enable_uvd(adev, false);
632 } else {
633 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
634
635 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
636 AMD_PG_STATE_GATE);
637 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
638 AMD_CG_STATE_GATE);
639 }
640
641 r = uvd_v7_0_hw_fini(adev);
642 if (r)
643 return r;
644
645 return amdgpu_uvd_suspend(adev);
646}
647
648static int uvd_v7_0_resume(void *handle)
649{
650 int r;
651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
652
653 r = amdgpu_uvd_resume(adev);
654 if (r)
655 return r;
656
657 return uvd_v7_0_hw_init(adev);
658}
659
660
661
662
663
664
665
666
667static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
668{
669 uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
670 uint32_t offset;
671 int i;
672
673 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
674 if (adev->uvd.harvest_config & (1 << i))
675 continue;
676 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
677 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
678 i == 0 ?
679 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
680 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
681 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
682 i == 0 ?
683 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
684 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
685 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
686 offset = 0;
687 } else {
688 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
689 lower_32_bits(adev->uvd.inst[i].gpu_addr));
690 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
691 upper_32_bits(adev->uvd.inst[i].gpu_addr));
692 offset = size;
693 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
694 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
695 }
696
697 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
698
699 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
700 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
701 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
702 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
703 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
704 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
705
706 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
707 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
708 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
709 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
710 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
711 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
712 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
713
714 WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
715 adev->gfx.config.gb_addr_config);
716 WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
717 adev->gfx.config.gb_addr_config);
718 WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
719 adev->gfx.config.gb_addr_config);
720
721 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
722 }
723}
724
725static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
726 struct amdgpu_mm_table *table)
727{
728 uint32_t data = 0, loop;
729 uint64_t addr = table->gpu_addr;
730 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
731 uint32_t size;
732 int i;
733
734 size = header->header_size + header->vce_table_size + header->uvd_table_size;
735
736
737 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
738 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
739
740
741 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
742 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
743 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
744 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
745
746
747 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
748
749
750 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
751
752 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
753 if (adev->uvd.harvest_config & (1 << i))
754 continue;
755 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
756 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
757 adev->uvd.inst[i].ring_enc[0].wptr = 0;
758 adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
759 }
760
761 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
762
763 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
764 loop = 1000;
765 while ((data & 0x10000002) != 0x10000002) {
766 udelay(10);
767 data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
768 loop--;
769 if (!loop)
770 break;
771 }
772
773 if (!loop) {
774 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
775 return -EBUSY;
776 }
777
778 return 0;
779}
780
781static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
782{
783 struct amdgpu_ring *ring;
784 uint32_t offset, size, tmp;
785 uint32_t table_size = 0;
786 struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
787 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
788 struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
789 struct mmsch_v1_0_cmd_end end = { {0} };
790 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
791 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
792 uint8_t i = 0;
793
794 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
795 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
796 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
797 end.cmd_header.command_type = MMSCH_COMMAND__END;
798
799 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
800 header->version = MMSCH_VERSION;
801 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
802
803 if (header->vce_table_offset == 0 && header->vce_table_size == 0)
804 header->uvd_table_offset = header->header_size;
805 else
806 header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
807
808 init_table += header->uvd_table_offset;
809
810 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
811 if (adev->uvd.harvest_config & (1 << i))
812 continue;
813 ring = &adev->uvd.inst[i].ring;
814 ring->wptr = 0;
815 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
816
817 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
818 0xFFFFFFFF, 0x00000004);
819
820 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
821 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
822 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
823 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
824 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
825 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
826 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
827 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
828 offset = 0;
829 } else {
830 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
831 lower_32_bits(adev->uvd.inst[i].gpu_addr));
832 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
833 upper_32_bits(adev->uvd.inst[i].gpu_addr));
834 offset = size;
835 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
836 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
837
838 }
839
840 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
841
842 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
843 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
844 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
845 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
846 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
847 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
848
849 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
850 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
851 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
852 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
853 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
854 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
855 AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
856
857 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
858
859
860
861 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
862 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
863
864
865 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
866 ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
867
868
869 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
870 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
871 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
872
873
874 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
875 (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
876 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
877 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
878 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
879 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
880 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
881 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
882 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
883
884
885 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
886 (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
887 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
888 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
889 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
890 UVD_LMI_CTRL__REQ_MODE_MASK |
891 0x00100000L));
892
893
894 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
895 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
896
897
898 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
899 UVD_VCPU_CNTL__CLK_EN_MASK);
900
901
902 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
903 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
904 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
905
906
907 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
908 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
909
910
911 size = order_base_2(ring->ring_size);
912 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
913 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
914 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
915
916 ring = &adev->uvd.inst[i].ring_enc[0];
917 ring->wptr = 0;
918 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
919 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
920 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
921
922
923 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
924
925
926 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
927 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
928
929 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
930 }
931
932 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
933 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
934 header->uvd_table_size = table_size;
935
936 }
937 return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
938}
939
940
941
942
943
944
945
946
947static int uvd_v7_0_start(struct amdgpu_device *adev)
948{
949 struct amdgpu_ring *ring;
950 uint32_t rb_bufsz, tmp;
951 uint32_t lmi_swap_cntl;
952 uint32_t mp_swap_cntl;
953 int i, j, k, r;
954
955 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
956 if (adev->uvd.harvest_config & (1 << k))
957 continue;
958
959 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
960 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
961 }
962
963
964 lmi_swap_cntl = 0;
965 mp_swap_cntl = 0;
966
967 uvd_v7_0_mc_resume(adev);
968
969 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
970 if (adev->uvd.harvest_config & (1 << k))
971 continue;
972 ring = &adev->uvd.inst[k].ring;
973
974 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
975 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
976
977
978 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
979 ~UVD_MASTINT_EN__VCPU_EN_MASK);
980
981
982 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
983 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
984 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
985 mdelay(1);
986
987
988 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
989 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
990 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
991 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
992 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
993 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
994 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
995 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
996 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
997 mdelay(5);
998
999
1000 WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1001 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1002 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1003 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1004 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1005 UVD_LMI_CTRL__REQ_MODE_MASK |
1006 0x00100000L);
1007
1008#ifdef __BIG_ENDIAN
1009
1010 lmi_swap_cntl = 0xa;
1011 mp_swap_cntl = 0;
1012#endif
1013 WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1014 WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1015
1016 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1017 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1018 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1019 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1020 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1021 WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1022
1023
1024 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1025 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1026 mdelay(5);
1027
1028
1029 WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1030 UVD_VCPU_CNTL__CLK_EN_MASK);
1031
1032
1033 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1034 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1035
1036
1037 WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1038 mdelay(10);
1039
1040 for (i = 0; i < 10; ++i) {
1041 uint32_t status;
1042
1043 for (j = 0; j < 100; ++j) {
1044 status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1045 if (status & 2)
1046 break;
1047 mdelay(10);
1048 }
1049 r = 0;
1050 if (status & 2)
1051 break;
1052
1053 DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1054 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1055 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1056 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1057 mdelay(10);
1058 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1059 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1060 mdelay(10);
1061 r = -1;
1062 }
1063
1064 if (r) {
1065 DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1066 return r;
1067 }
1068
1069 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1070 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1071 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1072
1073
1074 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1075 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1076
1077
1078 rb_bufsz = order_base_2(ring->ring_size);
1079 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1080 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1081 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1082 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1083 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1084 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1085 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1086
1087
1088 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1089
1090
1091 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1092 (upper_32_bits(ring->gpu_addr) >> 2));
1093
1094
1095 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1096 lower_32_bits(ring->gpu_addr));
1097 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1098 upper_32_bits(ring->gpu_addr));
1099
1100
1101 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1102
1103 ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1104 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1105 lower_32_bits(ring->wptr));
1106
1107 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1108 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1109
1110 ring = &adev->uvd.inst[k].ring_enc[0];
1111 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1112 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1113 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1114 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1115 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1116
1117 ring = &adev->uvd.inst[k].ring_enc[1];
1118 WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1119 WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1120 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1121 WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1122 WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1123 }
1124 return 0;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134static void uvd_v7_0_stop(struct amdgpu_device *adev)
1135{
1136 uint8_t i = 0;
1137
1138 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1139 if (adev->uvd.harvest_config & (1 << i))
1140 continue;
1141
1142 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1143
1144
1145 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1146 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1147 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1148 mdelay(1);
1149
1150
1151 WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1152 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1153 mdelay(5);
1154
1155
1156 WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1157
1158
1159 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1160 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1161 }
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1175 unsigned flags)
1176{
1177 struct amdgpu_device *adev = ring->adev;
1178
1179 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1180
1181 amdgpu_ring_write(ring,
1182 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1183 amdgpu_ring_write(ring, seq);
1184 amdgpu_ring_write(ring,
1185 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1186 amdgpu_ring_write(ring, addr & 0xffffffff);
1187 amdgpu_ring_write(ring,
1188 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1189 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1190 amdgpu_ring_write(ring,
1191 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1192 amdgpu_ring_write(ring, 0);
1193
1194 amdgpu_ring_write(ring,
1195 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1196 amdgpu_ring_write(ring, 0);
1197 amdgpu_ring_write(ring,
1198 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1199 amdgpu_ring_write(ring, 0);
1200 amdgpu_ring_write(ring,
1201 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1202 amdgpu_ring_write(ring, 2);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1216 u64 seq, unsigned flags)
1217{
1218
1219 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1220
1221 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1222 amdgpu_ring_write(ring, addr);
1223 amdgpu_ring_write(ring, upper_32_bits(addr));
1224 amdgpu_ring_write(ring, seq);
1225 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1226}
1227
1228
1229
1230
1231
1232
1233static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1234{
1235
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1246{
1247 struct amdgpu_device *adev = ring->adev;
1248 uint32_t tmp = 0;
1249 unsigned i;
1250 int r;
1251
1252 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1253 r = amdgpu_ring_alloc(ring, 3);
1254 if (r)
1255 return r;
1256
1257 amdgpu_ring_write(ring,
1258 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1259 amdgpu_ring_write(ring, 0xDEADBEEF);
1260 amdgpu_ring_commit(ring);
1261 for (i = 0; i < adev->usec_timeout; i++) {
1262 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1263 if (tmp == 0xDEADBEEF)
1264 break;
1265 udelay(1);
1266 }
1267
1268 if (i >= adev->usec_timeout)
1269 r = -ETIMEDOUT;
1270
1271 return r;
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1282 uint32_t ib_idx)
1283{
1284 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1285 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1286 unsigned i;
1287
1288
1289 if (!ring->me)
1290 return 0;
1291
1292 for (i = 0; i < ib->length_dw; i += 2) {
1293 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1294
1295 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1296 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1297
1298 amdgpu_set_ib_value(p, ib_idx, i, reg);
1299 }
1300 return 0;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1314 struct amdgpu_job *job,
1315 struct amdgpu_ib *ib,
1316 uint32_t flags)
1317{
1318 struct amdgpu_device *adev = ring->adev;
1319 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1320
1321 amdgpu_ring_write(ring,
1322 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1323 amdgpu_ring_write(ring, vmid);
1324
1325 amdgpu_ring_write(ring,
1326 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1327 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1328 amdgpu_ring_write(ring,
1329 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1330 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1331 amdgpu_ring_write(ring,
1332 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1333 amdgpu_ring_write(ring, ib->length_dw);
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1347 struct amdgpu_job *job,
1348 struct amdgpu_ib *ib,
1349 uint32_t flags)
1350{
1351 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1352
1353 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1354 amdgpu_ring_write(ring, vmid);
1355 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1356 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1357 amdgpu_ring_write(ring, ib->length_dw);
1358}
1359
1360static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1361 uint32_t reg, uint32_t val)
1362{
1363 struct amdgpu_device *adev = ring->adev;
1364
1365 amdgpu_ring_write(ring,
1366 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1367 amdgpu_ring_write(ring, reg << 2);
1368 amdgpu_ring_write(ring,
1369 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1370 amdgpu_ring_write(ring, val);
1371 amdgpu_ring_write(ring,
1372 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1373 amdgpu_ring_write(ring, 8);
1374}
1375
1376static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1377 uint32_t val, uint32_t mask)
1378{
1379 struct amdgpu_device *adev = ring->adev;
1380
1381 amdgpu_ring_write(ring,
1382 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1383 amdgpu_ring_write(ring, reg << 2);
1384 amdgpu_ring_write(ring,
1385 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1386 amdgpu_ring_write(ring, val);
1387 amdgpu_ring_write(ring,
1388 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1389 amdgpu_ring_write(ring, mask);
1390 amdgpu_ring_write(ring,
1391 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1392 amdgpu_ring_write(ring, 12);
1393}
1394
1395static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1396 unsigned vmid, uint64_t pd_addr)
1397{
1398 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1399 uint32_t data0, data1, mask;
1400
1401 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1402
1403
1404 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1405 data1 = lower_32_bits(pd_addr);
1406 mask = 0xffffffff;
1407 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1408}
1409
1410static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1411{
1412 struct amdgpu_device *adev = ring->adev;
1413 int i;
1414
1415 WARN_ON(ring->wptr % 2 || count % 2);
1416
1417 for (i = 0; i < count / 2; i++) {
1418 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1419 amdgpu_ring_write(ring, 0);
1420 }
1421}
1422
1423static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1424{
1425 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1426}
1427
1428static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1429 uint32_t reg, uint32_t val,
1430 uint32_t mask)
1431{
1432 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1433 amdgpu_ring_write(ring, reg << 2);
1434 amdgpu_ring_write(ring, mask);
1435 amdgpu_ring_write(ring, val);
1436}
1437
1438static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1439 unsigned int vmid, uint64_t pd_addr)
1440{
1441 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1442
1443 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1444
1445
1446 uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1447 vmid * hub->ctx_addr_distance,
1448 lower_32_bits(pd_addr), 0xffffffff);
1449}
1450
1451static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1452 uint32_t reg, uint32_t val)
1453{
1454 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1455 amdgpu_ring_write(ring, reg << 2);
1456 amdgpu_ring_write(ring, val);
1457}
1458
1459#if 0
1460static bool uvd_v7_0_is_idle(void *handle)
1461{
1462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1463
1464 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1465}
1466
1467static int uvd_v7_0_wait_for_idle(void *handle)
1468{
1469 unsigned i;
1470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1471
1472 for (i = 0; i < adev->usec_timeout; i++) {
1473 if (uvd_v7_0_is_idle(handle))
1474 return 0;
1475 }
1476 return -ETIMEDOUT;
1477}
1478
1479#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1480static bool uvd_v7_0_check_soft_reset(void *handle)
1481{
1482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1483 u32 srbm_soft_reset = 0;
1484 u32 tmp = RREG32(mmSRBM_STATUS);
1485
1486 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1487 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1488 (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1489 AMDGPU_UVD_STATUS_BUSY_MASK))
1490 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1491 SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1492
1493 if (srbm_soft_reset) {
1494 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1495 return true;
1496 } else {
1497 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1498 return false;
1499 }
1500}
1501
1502static int uvd_v7_0_pre_soft_reset(void *handle)
1503{
1504 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1505
1506 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1507 return 0;
1508
1509 uvd_v7_0_stop(adev);
1510 return 0;
1511}
1512
1513static int uvd_v7_0_soft_reset(void *handle)
1514{
1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 u32 srbm_soft_reset;
1517
1518 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1519 return 0;
1520 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1521
1522 if (srbm_soft_reset) {
1523 u32 tmp;
1524
1525 tmp = RREG32(mmSRBM_SOFT_RESET);
1526 tmp |= srbm_soft_reset;
1527 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1528 WREG32(mmSRBM_SOFT_RESET, tmp);
1529 tmp = RREG32(mmSRBM_SOFT_RESET);
1530
1531 udelay(50);
1532
1533 tmp &= ~srbm_soft_reset;
1534 WREG32(mmSRBM_SOFT_RESET, tmp);
1535 tmp = RREG32(mmSRBM_SOFT_RESET);
1536
1537
1538 udelay(50);
1539 }
1540
1541 return 0;
1542}
1543
1544static int uvd_v7_0_post_soft_reset(void *handle)
1545{
1546 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1547
1548 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1549 return 0;
1550
1551 mdelay(5);
1552
1553 return uvd_v7_0_start(adev);
1554}
1555#endif
1556
1557static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1558 struct amdgpu_irq_src *source,
1559 unsigned type,
1560 enum amdgpu_interrupt_state state)
1561{
1562
1563 return 0;
1564}
1565
1566static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1567 struct amdgpu_irq_src *source,
1568 struct amdgpu_iv_entry *entry)
1569{
1570 uint32_t ip_instance;
1571
1572 switch (entry->client_id) {
1573 case SOC15_IH_CLIENTID_UVD:
1574 ip_instance = 0;
1575 break;
1576 case SOC15_IH_CLIENTID_UVD1:
1577 ip_instance = 1;
1578 break;
1579 default:
1580 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1581 return 0;
1582 }
1583
1584 DRM_DEBUG("IH: UVD TRAP\n");
1585
1586 switch (entry->src_id) {
1587 case 124:
1588 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1589 break;
1590 case 119:
1591 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1592 break;
1593 case 120:
1594 if (!amdgpu_sriov_vf(adev))
1595 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1596 break;
1597 default:
1598 DRM_ERROR("Unhandled interrupt: %d %d\n",
1599 entry->src_id, entry->src_data[0]);
1600 break;
1601 }
1602
1603 return 0;
1604}
1605
1606#if 0
1607static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1608{
1609 uint32_t data, data1, data2, suvd_flags;
1610
1611 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1612 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1613 data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1614
1615 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1616 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1617
1618 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1619 UVD_SUVD_CGC_GATE__SIT_MASK |
1620 UVD_SUVD_CGC_GATE__SMP_MASK |
1621 UVD_SUVD_CGC_GATE__SCM_MASK |
1622 UVD_SUVD_CGC_GATE__SDB_MASK;
1623
1624 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1625 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1626 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1627
1628 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1629 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1630 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1631 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1632 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1633 UVD_CGC_CTRL__SYS_MODE_MASK |
1634 UVD_CGC_CTRL__UDEC_MODE_MASK |
1635 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1636 UVD_CGC_CTRL__REGS_MODE_MASK |
1637 UVD_CGC_CTRL__RBC_MODE_MASK |
1638 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1639 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1640 UVD_CGC_CTRL__IDCT_MODE_MASK |
1641 UVD_CGC_CTRL__MPRD_MODE_MASK |
1642 UVD_CGC_CTRL__MPC_MODE_MASK |
1643 UVD_CGC_CTRL__LBSI_MODE_MASK |
1644 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1645 UVD_CGC_CTRL__WCB_MODE_MASK |
1646 UVD_CGC_CTRL__VCPU_MODE_MASK |
1647 UVD_CGC_CTRL__JPEG_MODE_MASK |
1648 UVD_CGC_CTRL__JPEG2_MODE_MASK |
1649 UVD_CGC_CTRL__SCPU_MODE_MASK);
1650 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1651 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1652 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1653 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1654 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1655 data1 |= suvd_flags;
1656
1657 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1658 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1659 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1660 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1661}
1662
1663static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1664{
1665 uint32_t data, data1, cgc_flags, suvd_flags;
1666
1667 data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1668 data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1669
1670 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1671 UVD_CGC_GATE__UDEC_MASK |
1672 UVD_CGC_GATE__MPEG2_MASK |
1673 UVD_CGC_GATE__RBC_MASK |
1674 UVD_CGC_GATE__LMI_MC_MASK |
1675 UVD_CGC_GATE__IDCT_MASK |
1676 UVD_CGC_GATE__MPRD_MASK |
1677 UVD_CGC_GATE__MPC_MASK |
1678 UVD_CGC_GATE__LBSI_MASK |
1679 UVD_CGC_GATE__LRBBM_MASK |
1680 UVD_CGC_GATE__UDEC_RE_MASK |
1681 UVD_CGC_GATE__UDEC_CM_MASK |
1682 UVD_CGC_GATE__UDEC_IT_MASK |
1683 UVD_CGC_GATE__UDEC_DB_MASK |
1684 UVD_CGC_GATE__UDEC_MP_MASK |
1685 UVD_CGC_GATE__WCB_MASK |
1686 UVD_CGC_GATE__VCPU_MASK |
1687 UVD_CGC_GATE__SCPU_MASK |
1688 UVD_CGC_GATE__JPEG_MASK |
1689 UVD_CGC_GATE__JPEG2_MASK;
1690
1691 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1692 UVD_SUVD_CGC_GATE__SIT_MASK |
1693 UVD_SUVD_CGC_GATE__SMP_MASK |
1694 UVD_SUVD_CGC_GATE__SCM_MASK |
1695 UVD_SUVD_CGC_GATE__SDB_MASK;
1696
1697 data |= cgc_flags;
1698 data1 |= suvd_flags;
1699
1700 WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1701 WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1702}
1703
1704static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1705{
1706 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1707
1708 if (enable)
1709 tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1710 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1711 else
1712 tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1713 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1714
1715 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1716}
1717
1718
1719static int uvd_v7_0_set_clockgating_state(void *handle,
1720 enum amd_clockgating_state state)
1721{
1722 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1723 bool enable = (state == AMD_CG_STATE_GATE);
1724
1725 uvd_v7_0_set_bypass_mode(adev, enable);
1726
1727 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1728 return 0;
1729
1730 if (enable) {
1731
1732 uvd_v7_0_set_sw_clock_gating(adev);
1733 } else {
1734
1735 if (uvd_v7_0_wait_for_idle(handle))
1736 return -EBUSY;
1737
1738
1739
1740 }
1741
1742 return 0;
1743}
1744
1745static int uvd_v7_0_set_powergating_state(void *handle,
1746 enum amd_powergating_state state)
1747{
1748
1749
1750
1751
1752
1753
1754
1755 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1756
1757 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1758 return 0;
1759
1760 WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1761
1762 if (state == AMD_PG_STATE_GATE) {
1763 uvd_v7_0_stop(adev);
1764 return 0;
1765 } else {
1766 return uvd_v7_0_start(adev);
1767 }
1768}
1769#endif
1770
1771static int uvd_v7_0_set_clockgating_state(void *handle,
1772 enum amd_clockgating_state state)
1773{
1774
1775 return 0;
1776}
1777
1778const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1779 .name = "uvd_v7_0",
1780 .early_init = uvd_v7_0_early_init,
1781 .late_init = NULL,
1782 .sw_init = uvd_v7_0_sw_init,
1783 .sw_fini = uvd_v7_0_sw_fini,
1784 .hw_init = uvd_v7_0_hw_init,
1785 .hw_fini = uvd_v7_0_hw_fini,
1786 .suspend = uvd_v7_0_suspend,
1787 .resume = uvd_v7_0_resume,
1788 .is_idle = NULL ,
1789 .wait_for_idle = NULL ,
1790 .check_soft_reset = NULL ,
1791 .pre_soft_reset = NULL ,
1792 .soft_reset = NULL ,
1793 .post_soft_reset = NULL ,
1794 .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1795 .set_powergating_state = NULL ,
1796};
1797
1798static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1799 .type = AMDGPU_RING_TYPE_UVD,
1800 .align_mask = 0xf,
1801 .support_64bit_ptrs = false,
1802 .no_user_fence = true,
1803 .vmhub = AMDGPU_MMHUB_0,
1804 .get_rptr = uvd_v7_0_ring_get_rptr,
1805 .get_wptr = uvd_v7_0_ring_get_wptr,
1806 .set_wptr = uvd_v7_0_ring_set_wptr,
1807 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1808 .emit_frame_size =
1809 6 +
1810 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1811 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1812 8 +
1813 14 + 14,
1814 .emit_ib_size = 8,
1815 .emit_ib = uvd_v7_0_ring_emit_ib,
1816 .emit_fence = uvd_v7_0_ring_emit_fence,
1817 .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1818 .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1819 .test_ring = uvd_v7_0_ring_test_ring,
1820 .test_ib = amdgpu_uvd_ring_test_ib,
1821 .insert_nop = uvd_v7_0_ring_insert_nop,
1822 .pad_ib = amdgpu_ring_generic_pad_ib,
1823 .begin_use = amdgpu_uvd_ring_begin_use,
1824 .end_use = amdgpu_uvd_ring_end_use,
1825 .emit_wreg = uvd_v7_0_ring_emit_wreg,
1826 .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1827 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1828};
1829
1830static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1831 .type = AMDGPU_RING_TYPE_UVD_ENC,
1832 .align_mask = 0x3f,
1833 .nop = HEVC_ENC_CMD_NO_OP,
1834 .support_64bit_ptrs = false,
1835 .no_user_fence = true,
1836 .vmhub = AMDGPU_MMHUB_0,
1837 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1838 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1839 .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1840 .emit_frame_size =
1841 3 + 3 +
1842 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1843 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1844 4 +
1845 5 + 5 +
1846 1,
1847 .emit_ib_size = 5,
1848 .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1849 .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1850 .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1851 .test_ring = uvd_v7_0_enc_ring_test_ring,
1852 .test_ib = uvd_v7_0_enc_ring_test_ib,
1853 .insert_nop = amdgpu_ring_insert_nop,
1854 .insert_end = uvd_v7_0_enc_ring_insert_end,
1855 .pad_ib = amdgpu_ring_generic_pad_ib,
1856 .begin_use = amdgpu_uvd_ring_begin_use,
1857 .end_use = amdgpu_uvd_ring_end_use,
1858 .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1859 .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1860 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1861};
1862
1863static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1864{
1865 int i;
1866
1867 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1868 if (adev->uvd.harvest_config & (1 << i))
1869 continue;
1870 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1871 adev->uvd.inst[i].ring.me = i;
1872 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1873 }
1874}
1875
1876static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1877{
1878 int i, j;
1879
1880 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1881 if (adev->uvd.harvest_config & (1 << j))
1882 continue;
1883 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1884 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1885 adev->uvd.inst[j].ring_enc[i].me = j;
1886 }
1887
1888 DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1889 }
1890}
1891
1892static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1893 .set = uvd_v7_0_set_interrupt_state,
1894 .process = uvd_v7_0_process_interrupt,
1895};
1896
1897static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1898{
1899 int i;
1900
1901 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1902 if (adev->uvd.harvest_config & (1 << i))
1903 continue;
1904 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1905 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1906 }
1907}
1908
1909const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1910{
1911 .type = AMD_IP_BLOCK_TYPE_UVD,
1912 .major = 7,
1913 .minor = 0,
1914 .rev = 0,
1915 .funcs = &uvd_v7_0_ip_funcs,
1916};
1917