1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "vcn_v2_0.h"
31
32#include "vcn/vcn_2_5_offset.h"
33#include "vcn/vcn_2_5_sh_mask.h"
34#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
35
36#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
37#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
38#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
39#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
40#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
41#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
42#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
43
44#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
45#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
46#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
47#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
48
49#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
50
51#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
52
53static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
56static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
57static int vcn_v2_5_set_powergating_state(void *handle,
58 enum amd_powergating_state state);
59
60static int amdgpu_ih_clientid_vcns[] = {
61 SOC15_IH_CLIENTID_VCN,
62 SOC15_IH_CLIENTID_VCN1
63};
64
65
66
67
68
69
70
71
72static int vcn_v2_5_early_init(void *handle)
73{
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75 if (adev->asic_type == CHIP_ARCTURUS) {
76 u32 harvest;
77 int i;
78
79 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
80 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
81 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
82 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
83 adev->vcn.harvest_config |= 1 << i;
84 }
85
86 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
87 AMDGPU_VCN_HARVEST_VCN1))
88
89 return -ENOENT;
90 } else
91 adev->vcn.num_vcn_inst = 1;
92
93 adev->vcn.num_enc_rings = 2;
94
95 vcn_v2_5_set_dec_ring_funcs(adev);
96 vcn_v2_5_set_enc_ring_funcs(adev);
97 vcn_v2_5_set_jpeg_ring_funcs(adev);
98 vcn_v2_5_set_irq_funcs(adev);
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110static int vcn_v2_5_sw_init(void *handle)
111{
112 struct amdgpu_ring *ring;
113 int i, j, r;
114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
115
116 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
117 if (adev->vcn.harvest_config & (1 << j))
118 continue;
119
120 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
121 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
122 if (r)
123 return r;
124
125
126 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
127 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
128 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
129 if (r)
130 return r;
131 }
132
133
134 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
136 if (r)
137 return r;
138 }
139
140 r = amdgpu_vcn_sw_init(adev);
141 if (r)
142 return r;
143
144 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
145 const struct common_firmware_header *hdr;
146 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
147 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
149 adev->firmware.fw_size +=
150 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
151
152 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
153 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
154 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
155 adev->firmware.fw_size +=
156 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
157 }
158 DRM_INFO("PSP loading VCN firmware\n");
159 }
160
161 r = amdgpu_vcn_resume(adev);
162 if (r)
163 return r;
164
165 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
166 if (adev->vcn.harvest_config & (1 << j))
167 continue;
168 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
173 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
174
175 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
176 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
177 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
179 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
181 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
183 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
184 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
185
186 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
187 adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
188
189 ring = &adev->vcn.inst[j].ring_dec;
190 ring->use_doorbell = true;
191 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
192 sprintf(ring->name, "vcn_dec_%d", j);
193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
194 if (r)
195 return r;
196
197 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
198 ring = &adev->vcn.inst[j].ring_enc[i];
199 ring->use_doorbell = true;
200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
201 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
202 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
203 if (r)
204 return r;
205 }
206
207 ring = &adev->vcn.inst[j].ring_jpeg;
208 ring->use_doorbell = true;
209 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
210 sprintf(ring->name, "vcn_jpeg_%d", j);
211 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
212 if (r)
213 return r;
214 }
215
216 return 0;
217}
218
219
220
221
222
223
224
225
226static int vcn_v2_5_sw_fini(void *handle)
227{
228 int r;
229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
230
231 r = amdgpu_vcn_suspend(adev);
232 if (r)
233 return r;
234
235 r = amdgpu_vcn_sw_fini(adev);
236
237 return r;
238}
239
240
241
242
243
244
245
246
247static int vcn_v2_5_hw_init(void *handle)
248{
249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
250 struct amdgpu_ring *ring;
251 int i, j, r;
252
253 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
254 if (adev->vcn.harvest_config & (1 << j))
255 continue;
256 ring = &adev->vcn.inst[j].ring_dec;
257
258 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
259 ring->doorbell_index, j);
260
261 r = amdgpu_ring_test_ring(ring);
262 if (r) {
263 ring->sched.ready = false;
264 goto done;
265 }
266
267 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
268 ring = &adev->vcn.inst[j].ring_enc[i];
269 ring->sched.ready = false;
270 continue;
271 r = amdgpu_ring_test_ring(ring);
272 if (r) {
273 ring->sched.ready = false;
274 goto done;
275 }
276 }
277
278 ring = &adev->vcn.inst[j].ring_jpeg;
279 r = amdgpu_ring_test_ring(ring);
280 if (r) {
281 ring->sched.ready = false;
282 goto done;
283 }
284 }
285done:
286 if (!r)
287 DRM_INFO("VCN decode and encode initialized successfully.\n");
288
289 return r;
290}
291
292
293
294
295
296
297
298
299static int vcn_v2_5_hw_fini(void *handle)
300{
301 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
302 struct amdgpu_ring *ring;
303 int i;
304
305 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
306 if (adev->vcn.harvest_config & (1 << i))
307 continue;
308 ring = &adev->vcn.inst[i].ring_dec;
309
310 if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
311 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
312
313 ring->sched.ready = false;
314
315 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
316 ring = &adev->vcn.inst[i].ring_enc[i];
317 ring->sched.ready = false;
318 }
319
320 ring = &adev->vcn.inst[i].ring_jpeg;
321 ring->sched.ready = false;
322 }
323
324 return 0;
325}
326
327
328
329
330
331
332
333
334static int vcn_v2_5_suspend(void *handle)
335{
336 int r;
337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
338
339 r = vcn_v2_5_hw_fini(adev);
340 if (r)
341 return r;
342
343 r = amdgpu_vcn_suspend(adev);
344
345 return r;
346}
347
348
349
350
351
352
353
354
355static int vcn_v2_5_resume(void *handle)
356{
357 int r;
358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
359
360 r = amdgpu_vcn_resume(adev);
361 if (r)
362 return r;
363
364 r = vcn_v2_5_hw_init(adev);
365
366 return r;
367}
368
369
370
371
372
373
374
375
376static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
377{
378 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
379 uint32_t offset;
380 int i;
381
382 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
383 if (adev->vcn.harvest_config & (1 << i))
384 continue;
385
386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
387 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
388 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
389 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
390 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
391 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
392 offset = 0;
393 } else {
394 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
395 lower_32_bits(adev->vcn.inst[i].gpu_addr));
396 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
397 upper_32_bits(adev->vcn.inst[i].gpu_addr));
398 offset = size;
399 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
400 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
401 }
402 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
403
404
405 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
406 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
407 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
408 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
409 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
410 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
411
412
413 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
414 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
415 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
416 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
417 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
418 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
419 }
420}
421
422
423
424
425
426
427
428
429
430static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
431{
432 uint32_t data;
433 int ret = 0;
434 int i;
435
436 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
437 if (adev->vcn.harvest_config & (1 << i))
438 continue;
439
440 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
441 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
442 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
443 else
444 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
445 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
446 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
447 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
448
449 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
450 data &= ~(UVD_CGC_GATE__SYS_MASK
451 | UVD_CGC_GATE__UDEC_MASK
452 | UVD_CGC_GATE__MPEG2_MASK
453 | UVD_CGC_GATE__REGS_MASK
454 | UVD_CGC_GATE__RBC_MASK
455 | UVD_CGC_GATE__LMI_MC_MASK
456 | UVD_CGC_GATE__LMI_UMC_MASK
457 | UVD_CGC_GATE__IDCT_MASK
458 | UVD_CGC_GATE__MPRD_MASK
459 | UVD_CGC_GATE__MPC_MASK
460 | UVD_CGC_GATE__LBSI_MASK
461 | UVD_CGC_GATE__LRBBM_MASK
462 | UVD_CGC_GATE__UDEC_RE_MASK
463 | UVD_CGC_GATE__UDEC_CM_MASK
464 | UVD_CGC_GATE__UDEC_IT_MASK
465 | UVD_CGC_GATE__UDEC_DB_MASK
466 | UVD_CGC_GATE__UDEC_MP_MASK
467 | UVD_CGC_GATE__WCB_MASK
468 | UVD_CGC_GATE__VCPU_MASK
469 | UVD_CGC_GATE__MMSCH_MASK);
470
471 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
472
473 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
474
475 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
476 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
477 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
478 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
479 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
480 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
481 | UVD_CGC_CTRL__SYS_MODE_MASK
482 | UVD_CGC_CTRL__UDEC_MODE_MASK
483 | UVD_CGC_CTRL__MPEG2_MODE_MASK
484 | UVD_CGC_CTRL__REGS_MODE_MASK
485 | UVD_CGC_CTRL__RBC_MODE_MASK
486 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
487 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
488 | UVD_CGC_CTRL__IDCT_MODE_MASK
489 | UVD_CGC_CTRL__MPRD_MODE_MASK
490 | UVD_CGC_CTRL__MPC_MODE_MASK
491 | UVD_CGC_CTRL__LBSI_MODE_MASK
492 | UVD_CGC_CTRL__LRBBM_MODE_MASK
493 | UVD_CGC_CTRL__WCB_MODE_MASK
494 | UVD_CGC_CTRL__VCPU_MODE_MASK
495 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
496 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
497
498
499 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
500 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
501 | UVD_SUVD_CGC_GATE__SIT_MASK
502 | UVD_SUVD_CGC_GATE__SMP_MASK
503 | UVD_SUVD_CGC_GATE__SCM_MASK
504 | UVD_SUVD_CGC_GATE__SDB_MASK
505 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
506 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
507 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
508 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
509 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
510 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
511 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
512 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
513 | UVD_SUVD_CGC_GATE__SCLR_MASK
514 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
515 | UVD_SUVD_CGC_GATE__ENT_MASK
516 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
517 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
518 | UVD_SUVD_CGC_GATE__SITE_MASK
519 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
520 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
521 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
522 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
523 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
524 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
525
526 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
527 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
528 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
529 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
530 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
531 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
532 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
533 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
534 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
535 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
536 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
537 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
538 }
539}
540
541
542
543
544
545
546
547
548
549static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
550{
551 uint32_t data = 0;
552 int i;
553
554 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
555 if (adev->vcn.harvest_config & (1 << i))
556 continue;
557
558 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
559 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
560 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
561 else
562 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
563 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
564 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
565 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
566
567 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
568 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
569 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
570 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
571 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
572 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
573 | UVD_CGC_CTRL__SYS_MODE_MASK
574 | UVD_CGC_CTRL__UDEC_MODE_MASK
575 | UVD_CGC_CTRL__MPEG2_MODE_MASK
576 | UVD_CGC_CTRL__REGS_MODE_MASK
577 | UVD_CGC_CTRL__RBC_MODE_MASK
578 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
579 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
580 | UVD_CGC_CTRL__IDCT_MODE_MASK
581 | UVD_CGC_CTRL__MPRD_MODE_MASK
582 | UVD_CGC_CTRL__MPC_MODE_MASK
583 | UVD_CGC_CTRL__LBSI_MODE_MASK
584 | UVD_CGC_CTRL__LRBBM_MODE_MASK
585 | UVD_CGC_CTRL__WCB_MODE_MASK
586 | UVD_CGC_CTRL__VCPU_MODE_MASK);
587 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
588
589 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
590 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
591 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
592 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
593 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
594 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
595 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
596 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
597 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
598 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
599 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
600 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
601 }
602}
603
604
605
606
607
608
609
610
611static int jpeg_v2_5_start(struct amdgpu_device *adev)
612{
613 struct amdgpu_ring *ring;
614 uint32_t tmp;
615 int i;
616
617 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
618 if (adev->vcn.harvest_config & (1 << i))
619 continue;
620 ring = &adev->vcn.inst[i].ring_jpeg;
621
622 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
623 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
624
625
626 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
627 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
628 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
629 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
630 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
631
632 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
633 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
634 | JPEG_CGC_GATE__JPEG2_DEC_MASK
635 | JPEG_CGC_GATE__JMCIF_MASK
636 | JPEG_CGC_GATE__JRBBM_MASK);
637 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
638
639 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
640 tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
641 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
642 | JPEG_CGC_CTRL__JMCIF_MODE_MASK
643 | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
644 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
645
646
647 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
648 adev->gfx.config.gb_addr_config);
649 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
650 adev->gfx.config.gb_addr_config);
651
652
653 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
654 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
655
656
657 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
658 JPEG_SYS_INT_EN__DJRBC_MASK,
659 ~JPEG_SYS_INT_EN__DJRBC_MASK);
660
661 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
662 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
663 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
664 lower_32_bits(ring->gpu_addr));
665 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
666 upper_32_bits(ring->gpu_addr));
667 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
668 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
669 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
670 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
671 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
672 }
673
674 return 0;
675}
676
677
678
679
680
681
682
683
684static int jpeg_v2_5_stop(struct amdgpu_device *adev)
685{
686 uint32_t tmp;
687 int i;
688
689 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
690 if (adev->vcn.harvest_config & (1 << i))
691 continue;
692
693 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
694 UVD_JMI_CNTL__SOFT_RESET_MASK,
695 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
696
697 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
698 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
699 |JPEG_CGC_GATE__JPEG2_DEC_MASK
700 |JPEG_CGC_GATE__JMCIF_MASK
701 |JPEG_CGC_GATE__JRBBM_MASK);
702 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
703
704
705 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
706 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
707 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
708 }
709
710 return 0;
711}
712
713static int vcn_v2_5_start(struct amdgpu_device *adev)
714{
715 struct amdgpu_ring *ring;
716 uint32_t rb_bufsz, tmp;
717 int i, j, k, r;
718
719 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
720 if (adev->vcn.harvest_config & (1 << i))
721 continue;
722
723 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
724 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
725
726
727 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
728 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
729 }
730
731
732 vcn_v2_5_disable_clock_gating(adev);
733
734 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
735 if (adev->vcn.harvest_config & (1 << i))
736 continue;
737
738 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
739 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
740
741
742 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
743 ~UVD_MASTINT_EN__VCPU_EN_MASK);
744
745
746 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
747 tmp &= ~0xff;
748 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
749 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
750 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
751 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
752 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
753
754
755 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
756 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
757 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
758 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
759
760
761 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
762 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
763 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
764 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
765 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
766
767
768 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
769 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
770 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
771 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
772 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
773
774
775 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
776 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
777 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
778 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
779 }
780
781 vcn_v2_5_mc_resume(adev);
782
783 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
784 if (adev->vcn.harvest_config & (1 << i))
785 continue;
786
787 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
788 adev->gfx.config.gb_addr_config);
789 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
790 adev->gfx.config.gb_addr_config);
791
792
793 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
794 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
795
796
797 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
798 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
799
800 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
801 ~UVD_VCPU_CNTL__BLK_RST_MASK);
802
803 for (k = 0; k < 10; ++k) {
804 uint32_t status;
805
806 for (j = 0; j < 100; ++j) {
807 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
808 if (status & 2)
809 break;
810 if (amdgpu_emu_mode == 1)
811 msleep(500);
812 else
813 mdelay(10);
814 }
815 r = 0;
816 if (status & 2)
817 break;
818
819 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
820 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
821 UVD_VCPU_CNTL__BLK_RST_MASK,
822 ~UVD_VCPU_CNTL__BLK_RST_MASK);
823 mdelay(10);
824 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
825 ~UVD_VCPU_CNTL__BLK_RST_MASK);
826
827 mdelay(10);
828 r = -1;
829 }
830
831 if (r) {
832 DRM_ERROR("VCN decode not responding, giving up!!!\n");
833 return r;
834 }
835
836
837 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
838 UVD_MASTINT_EN__VCPU_EN_MASK,
839 ~UVD_MASTINT_EN__VCPU_EN_MASK);
840
841
842 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
843 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
844
845 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
846
847 ring = &adev->vcn.inst[i].ring_dec;
848
849 rb_bufsz = order_base_2(ring->ring_size);
850 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
851 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
852 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
853 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
854 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
855 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
856
857
858 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
859 lower_32_bits(ring->gpu_addr));
860 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
861 upper_32_bits(ring->gpu_addr));
862
863
864 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
865
866 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
867 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
868 lower_32_bits(ring->wptr));
869 ring = &adev->vcn.inst[i].ring_enc[0];
870 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
871 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
872 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
873 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
874 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
875
876 ring = &adev->vcn.inst[i].ring_enc[1];
877 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
878 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
879 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
880 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
881 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
882 }
883 r = jpeg_v2_5_start(adev);
884
885 return r;
886}
887
888static int vcn_v2_5_stop(struct amdgpu_device *adev)
889{
890 uint32_t tmp;
891 int i, r;
892
893 r = jpeg_v2_5_stop(adev);
894 if (r)
895 return r;
896
897 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
898 if (adev->vcn.harvest_config & (1 << i))
899 continue;
900
901 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
902 if (r)
903 return r;
904
905 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
906 UVD_LMI_STATUS__READ_CLEAN_MASK |
907 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
908 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
909 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
910 if (r)
911 return r;
912
913
914 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
915 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
916 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
917
918 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
919 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
920 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
921 if (r)
922 return r;
923
924
925 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
926 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
927 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
928
929
930 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
931 UVD_VCPU_CNTL__BLK_RST_MASK,
932 ~UVD_VCPU_CNTL__BLK_RST_MASK);
933
934
935 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
936 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
937
938
939 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
940
941 vcn_v2_5_enable_clock_gating(adev);
942
943
944 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
945 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
946 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
947 }
948
949 return 0;
950}
951
952
953
954
955
956
957
958
959static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
960{
961 struct amdgpu_device *adev = ring->adev;
962
963 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
964}
965
966
967
968
969
970
971
972
973static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
974{
975 struct amdgpu_device *adev = ring->adev;
976
977 if (ring->use_doorbell)
978 return adev->wb.wb[ring->wptr_offs];
979 else
980 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
981}
982
983
984
985
986
987
988
989
990static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
991{
992 struct amdgpu_device *adev = ring->adev;
993
994 if (ring->use_doorbell) {
995 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
996 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
997 } else {
998 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
999 }
1000}
1001
1002static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1003 .type = AMDGPU_RING_TYPE_VCN_DEC,
1004 .align_mask = 0xf,
1005 .vmhub = AMDGPU_MMHUB_1,
1006 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1007 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1008 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1009 .emit_frame_size =
1010 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1011 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1012 8 +
1013 14 + 14 +
1014 6,
1015 .emit_ib_size = 8,
1016 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1017 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1018 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1019 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1020 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1021 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1022 .insert_start = vcn_v2_0_dec_ring_insert_start,
1023 .insert_end = vcn_v2_0_dec_ring_insert_end,
1024 .pad_ib = amdgpu_ring_generic_pad_ib,
1025 .begin_use = amdgpu_vcn_ring_begin_use,
1026 .end_use = amdgpu_vcn_ring_end_use,
1027 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1028 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1029 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1030};
1031
1032
1033
1034
1035
1036
1037
1038
1039static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1040{
1041 struct amdgpu_device *adev = ring->adev;
1042
1043 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1044 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1045 else
1046 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1057{
1058 struct amdgpu_device *adev = ring->adev;
1059
1060 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1061 if (ring->use_doorbell)
1062 return adev->wb.wb[ring->wptr_offs];
1063 else
1064 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1065 } else {
1066 if (ring->use_doorbell)
1067 return adev->wb.wb[ring->wptr_offs];
1068 else
1069 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1081{
1082 struct amdgpu_device *adev = ring->adev;
1083
1084 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1085 if (ring->use_doorbell) {
1086 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1087 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1088 } else {
1089 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1090 }
1091 } else {
1092 if (ring->use_doorbell) {
1093 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1094 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1095 } else {
1096 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1097 }
1098 }
1099}
1100
1101static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1102 .type = AMDGPU_RING_TYPE_VCN_ENC,
1103 .align_mask = 0x3f,
1104 .nop = VCN_ENC_CMD_NO_OP,
1105 .vmhub = AMDGPU_MMHUB_1,
1106 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1107 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1108 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1109 .emit_frame_size =
1110 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1111 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1112 4 +
1113 5 + 5 +
1114 1,
1115 .emit_ib_size = 5,
1116 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1117 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1118 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1119 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1120 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1121 .insert_nop = amdgpu_ring_insert_nop,
1122 .insert_end = vcn_v2_0_enc_ring_insert_end,
1123 .pad_ib = amdgpu_ring_generic_pad_ib,
1124 .begin_use = amdgpu_vcn_ring_begin_use,
1125 .end_use = amdgpu_vcn_ring_end_use,
1126 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1127 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1128 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1129};
1130
1131
1132
1133
1134
1135
1136
1137
1138static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1139{
1140 struct amdgpu_device *adev = ring->adev;
1141
1142 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1153{
1154 struct amdgpu_device *adev = ring->adev;
1155
1156 if (ring->use_doorbell)
1157 return adev->wb.wb[ring->wptr_offs];
1158 else
1159 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1170{
1171 struct amdgpu_device *adev = ring->adev;
1172
1173 if (ring->use_doorbell) {
1174 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1175 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1176 } else {
1177 WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1178 }
1179}
1180
1181static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1182 .type = AMDGPU_RING_TYPE_VCN_JPEG,
1183 .align_mask = 0xf,
1184 .vmhub = AMDGPU_MMHUB_1,
1185 .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
1186 .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
1187 .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
1188 .emit_frame_size =
1189 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1190 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1191 8 +
1192 18 + 18 +
1193 8 + 16,
1194 .emit_ib_size = 22,
1195 .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
1196 .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
1197 .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
1198 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1199 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1200 .insert_nop = vcn_v2_0_jpeg_ring_nop,
1201 .insert_start = vcn_v2_0_jpeg_ring_insert_start,
1202 .insert_end = vcn_v2_0_jpeg_ring_insert_end,
1203 .pad_ib = amdgpu_ring_generic_pad_ib,
1204 .begin_use = amdgpu_vcn_ring_begin_use,
1205 .end_use = amdgpu_vcn_ring_end_use,
1206 .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
1207 .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
1208 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1209};
1210
1211static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1212{
1213 int i;
1214
1215 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1216 if (adev->vcn.harvest_config & (1 << i))
1217 continue;
1218 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1219 adev->vcn.inst[i].ring_dec.me = i;
1220 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1221 }
1222}
1223
1224static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1225{
1226 int i, j;
1227
1228 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1229 if (adev->vcn.harvest_config & (1 << j))
1230 continue;
1231 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1232 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1233 adev->vcn.inst[j].ring_enc[i].me = j;
1234 }
1235 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1236 }
1237}
1238
1239static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1240{
1241 int i;
1242
1243 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1244 if (adev->vcn.harvest_config & (1 << i))
1245 continue;
1246 adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1247 adev->vcn.inst[i].ring_jpeg.me = i;
1248 DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
1249 }
1250}
1251
1252static bool vcn_v2_5_is_idle(void *handle)
1253{
1254 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255 int i, ret = 1;
1256
1257 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1258 if (adev->vcn.harvest_config & (1 << i))
1259 continue;
1260 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1261 }
1262
1263 return ret;
1264}
1265
1266static int vcn_v2_5_wait_for_idle(void *handle)
1267{
1268 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1269 int i, ret = 0;
1270
1271 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1272 if (adev->vcn.harvest_config & (1 << i))
1273 continue;
1274 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1275 UVD_STATUS__IDLE, ret);
1276 if (ret)
1277 return ret;
1278 }
1279
1280 return ret;
1281}
1282
1283static int vcn_v2_5_set_clockgating_state(void *handle,
1284 enum amd_clockgating_state state)
1285{
1286 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1288
1289 if (enable) {
1290 if (vcn_v2_5_is_idle(handle))
1291 return -EBUSY;
1292 vcn_v2_5_enable_clock_gating(adev);
1293 } else {
1294 vcn_v2_5_disable_clock_gating(adev);
1295 }
1296
1297 return 0;
1298}
1299
1300static int vcn_v2_5_set_powergating_state(void *handle,
1301 enum amd_powergating_state state)
1302{
1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304 int ret;
1305
1306 if(state == adev->vcn.cur_state)
1307 return 0;
1308
1309 if (state == AMD_PG_STATE_GATE)
1310 ret = vcn_v2_5_stop(adev);
1311 else
1312 ret = vcn_v2_5_start(adev);
1313
1314 if(!ret)
1315 adev->vcn.cur_state = state;
1316
1317 return ret;
1318}
1319
1320static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1321 struct amdgpu_irq_src *source,
1322 unsigned type,
1323 enum amdgpu_interrupt_state state)
1324{
1325 return 0;
1326}
1327
1328static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1329 struct amdgpu_irq_src *source,
1330 struct amdgpu_iv_entry *entry)
1331{
1332 uint32_t ip_instance;
1333
1334 switch (entry->client_id) {
1335 case SOC15_IH_CLIENTID_VCN:
1336 ip_instance = 0;
1337 break;
1338 case SOC15_IH_CLIENTID_VCN1:
1339 ip_instance = 1;
1340 break;
1341 default:
1342 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1343 return 0;
1344 }
1345
1346 DRM_DEBUG("IH: VCN TRAP\n");
1347
1348 switch (entry->src_id) {
1349 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1350 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1351 break;
1352 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1353 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1354 break;
1355 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1356 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1357 break;
1358 case VCN_2_0__SRCID__JPEG_DECODE:
1359 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
1360 break;
1361 default:
1362 DRM_ERROR("Unhandled interrupt: %d %d\n",
1363 entry->src_id, entry->src_data[0]);
1364 break;
1365 }
1366
1367 return 0;
1368}
1369
1370static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1371 .set = vcn_v2_5_set_interrupt_state,
1372 .process = vcn_v2_5_process_interrupt,
1373};
1374
1375static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1376{
1377 int i;
1378
1379 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1380 if (adev->vcn.harvest_config & (1 << i))
1381 continue;
1382 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
1383 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1384 }
1385}
1386
1387static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1388 .name = "vcn_v2_5",
1389 .early_init = vcn_v2_5_early_init,
1390 .late_init = NULL,
1391 .sw_init = vcn_v2_5_sw_init,
1392 .sw_fini = vcn_v2_5_sw_fini,
1393 .hw_init = vcn_v2_5_hw_init,
1394 .hw_fini = vcn_v2_5_hw_fini,
1395 .suspend = vcn_v2_5_suspend,
1396 .resume = vcn_v2_5_resume,
1397 .is_idle = vcn_v2_5_is_idle,
1398 .wait_for_idle = vcn_v2_5_wait_for_idle,
1399 .check_soft_reset = NULL,
1400 .pre_soft_reset = NULL,
1401 .soft_reset = NULL,
1402 .post_soft_reset = NULL,
1403 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1404 .set_powergating_state = vcn_v2_5_set_powergating_state,
1405};
1406
1407const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1408{
1409 .type = AMD_IP_BLOCK_TYPE_VCN,
1410 .major = 2,
1411 .minor = 5,
1412 .rev = 0,
1413 .funcs = &vcn_v2_5_ip_funcs,
1414};
1415