1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_common.h"
31
32#include "vcn/vcn_1_0_offset.h"
33#include "vcn/vcn_1_0_sh_mask.h"
34#include "hdp/hdp_4_0_offset.h"
35#include "mmhub/mmhub_9_1_offset.h"
36#include "mmhub/mmhub_9_1_sh_mask.h"
37
38static int vcn_v1_0_stop(struct amdgpu_device *adev);
39static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
40static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
41static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
42
43
44
45
46
47
48
49
50static int vcn_v1_0_early_init(void *handle)
51{
52 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
53
54 adev->vcn.num_enc_rings = 2;
55
56 vcn_v1_0_set_dec_ring_funcs(adev);
57 vcn_v1_0_set_enc_ring_funcs(adev);
58 vcn_v1_0_set_irq_funcs(adev);
59
60 return 0;
61}
62
63
64
65
66
67
68
69
70static int vcn_v1_0_sw_init(void *handle)
71{
72 struct amdgpu_ring *ring;
73 int i, r;
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75
76
77 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
78 if (r)
79 return r;
80
81
82 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
83 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
84 &adev->vcn.irq);
85 if (r)
86 return r;
87 }
88
89 r = amdgpu_vcn_sw_init(adev);
90 if (r)
91 return r;
92
93 r = amdgpu_vcn_resume(adev);
94 if (r)
95 return r;
96
97 ring = &adev->vcn.ring_dec;
98 sprintf(ring->name, "vcn_dec");
99 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
100 if (r)
101 return r;
102
103 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
104 ring = &adev->vcn.ring_enc[i];
105 sprintf(ring->name, "vcn_enc%d", i);
106 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
107 if (r)
108 return r;
109 }
110
111 return r;
112}
113
114
115
116
117
118
119
120
121static int vcn_v1_0_sw_fini(void *handle)
122{
123 int r;
124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
125
126 r = amdgpu_vcn_suspend(adev);
127 if (r)
128 return r;
129
130 r = amdgpu_vcn_sw_fini(adev);
131
132 return r;
133}
134
135
136
137
138
139
140
141
142static int vcn_v1_0_hw_init(void *handle)
143{
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
145 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
146 int i, r;
147
148 ring->ready = true;
149 r = amdgpu_ring_test_ring(ring);
150 if (r) {
151 ring->ready = false;
152 goto done;
153 }
154
155 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
156 ring = &adev->vcn.ring_enc[i];
157 ring->ready = true;
158 r = amdgpu_ring_test_ring(ring);
159 if (r) {
160 ring->ready = false;
161 goto done;
162 }
163 }
164
165done:
166 if (!r)
167 DRM_INFO("VCN decode and encode initialized successfully.\n");
168
169 return r;
170}
171
172
173
174
175
176
177
178
179static int vcn_v1_0_hw_fini(void *handle)
180{
181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
182 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
183
184 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
185 vcn_v1_0_stop(adev);
186
187 ring->ready = false;
188
189 return 0;
190}
191
192
193
194
195
196
197
198
199static int vcn_v1_0_suspend(void *handle)
200{
201 int r;
202 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
203
204 r = vcn_v1_0_hw_fini(adev);
205 if (r)
206 return r;
207
208 r = amdgpu_vcn_suspend(adev);
209
210 return r;
211}
212
213
214
215
216
217
218
219
220static int vcn_v1_0_resume(void *handle)
221{
222 int r;
223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224
225 r = amdgpu_vcn_resume(adev);
226 if (r)
227 return r;
228
229 r = vcn_v1_0_hw_init(adev);
230
231 return r;
232}
233
234
235
236
237
238
239
240
241static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
242{
243 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
244
245 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
246 lower_32_bits(adev->vcn.gpu_addr));
247 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
248 upper_32_bits(adev->vcn.gpu_addr));
249 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
250 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
251 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
252
253 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
254 lower_32_bits(adev->vcn.gpu_addr + size));
255 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
256 upper_32_bits(adev->vcn.gpu_addr + size));
257 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
258 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
259
260 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
261 lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
262 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
263 upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
264 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
265 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
266 AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
267
268 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
269 adev->gfx.config.gb_addr_config);
270 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
271 adev->gfx.config.gb_addr_config);
272 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
273 adev->gfx.config.gb_addr_config);
274}
275
276
277
278
279
280
281
282
283
284static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
285{
286 uint32_t data;
287
288
289 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
290
291 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
292 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
293 else
294 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
295
296 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
297 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
298 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
299
300 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
301 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
302 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
303
304
305 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
306 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
307 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
308 else
309 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
310
311 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
312 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
313 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
314
315 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
316 data &= ~(UVD_CGC_GATE__SYS_MASK
317 | UVD_CGC_GATE__UDEC_MASK
318 | UVD_CGC_GATE__MPEG2_MASK
319 | UVD_CGC_GATE__REGS_MASK
320 | UVD_CGC_GATE__RBC_MASK
321 | UVD_CGC_GATE__LMI_MC_MASK
322 | UVD_CGC_GATE__LMI_UMC_MASK
323 | UVD_CGC_GATE__IDCT_MASK
324 | UVD_CGC_GATE__MPRD_MASK
325 | UVD_CGC_GATE__MPC_MASK
326 | UVD_CGC_GATE__LBSI_MASK
327 | UVD_CGC_GATE__LRBBM_MASK
328 | UVD_CGC_GATE__UDEC_RE_MASK
329 | UVD_CGC_GATE__UDEC_CM_MASK
330 | UVD_CGC_GATE__UDEC_IT_MASK
331 | UVD_CGC_GATE__UDEC_DB_MASK
332 | UVD_CGC_GATE__UDEC_MP_MASK
333 | UVD_CGC_GATE__WCB_MASK
334 | UVD_CGC_GATE__VCPU_MASK
335 | UVD_CGC_GATE__SCPU_MASK);
336 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
337
338 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
339 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
340 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
341 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
342 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
343 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
344 | UVD_CGC_CTRL__SYS_MODE_MASK
345 | UVD_CGC_CTRL__UDEC_MODE_MASK
346 | UVD_CGC_CTRL__MPEG2_MODE_MASK
347 | UVD_CGC_CTRL__REGS_MODE_MASK
348 | UVD_CGC_CTRL__RBC_MODE_MASK
349 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
350 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
351 | UVD_CGC_CTRL__IDCT_MODE_MASK
352 | UVD_CGC_CTRL__MPRD_MODE_MASK
353 | UVD_CGC_CTRL__MPC_MODE_MASK
354 | UVD_CGC_CTRL__LBSI_MODE_MASK
355 | UVD_CGC_CTRL__LRBBM_MODE_MASK
356 | UVD_CGC_CTRL__WCB_MODE_MASK
357 | UVD_CGC_CTRL__VCPU_MODE_MASK
358 | UVD_CGC_CTRL__SCPU_MODE_MASK);
359 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
360
361
362 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
363 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
364 | UVD_SUVD_CGC_GATE__SIT_MASK
365 | UVD_SUVD_CGC_GATE__SMP_MASK
366 | UVD_SUVD_CGC_GATE__SCM_MASK
367 | UVD_SUVD_CGC_GATE__SDB_MASK
368 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
369 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
370 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
371 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
372 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
373 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
374 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
375 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
376 | UVD_SUVD_CGC_GATE__SCLR_MASK
377 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
378 | UVD_SUVD_CGC_GATE__ENT_MASK
379 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
380 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
381 | UVD_SUVD_CGC_GATE__SITE_MASK
382 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
383 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
384 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
385 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
386 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
387 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
388
389 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
390 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
391 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
392 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
393 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
394 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
395 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
396 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
397 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
398 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
399 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
400 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
401}
402
403
404
405
406
407
408
409
410
411static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
412{
413 uint32_t data = 0;
414
415
416 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
417 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
418 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
419 else
420 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
421 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
422 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
423 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
424
425 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
426 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
427 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
428
429
430 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
431 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
432 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
433 else
434 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
435 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
436 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
437 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
438
439 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
440 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
441 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
442 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
443 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
444 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
445 | UVD_CGC_CTRL__SYS_MODE_MASK
446 | UVD_CGC_CTRL__UDEC_MODE_MASK
447 | UVD_CGC_CTRL__MPEG2_MODE_MASK
448 | UVD_CGC_CTRL__REGS_MODE_MASK
449 | UVD_CGC_CTRL__RBC_MODE_MASK
450 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
451 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
452 | UVD_CGC_CTRL__IDCT_MODE_MASK
453 | UVD_CGC_CTRL__MPRD_MODE_MASK
454 | UVD_CGC_CTRL__MPC_MODE_MASK
455 | UVD_CGC_CTRL__LBSI_MODE_MASK
456 | UVD_CGC_CTRL__LRBBM_MODE_MASK
457 | UVD_CGC_CTRL__WCB_MODE_MASK
458 | UVD_CGC_CTRL__VCPU_MODE_MASK
459 | UVD_CGC_CTRL__SCPU_MODE_MASK);
460 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
461
462 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
463 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
464 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
465 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
466 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
467 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
468 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
469 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
470 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
471 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
472 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
473 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
474}
475
476static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
477{
478 uint32_t data = 0;
479 int ret;
480
481 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
482 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
483 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
484 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
485 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
486 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
487 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
488 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
489 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
490 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
491 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
492 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
493
494 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
495 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
496 } else {
497 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
498 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
499 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
500 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
501 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
502 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
503 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
504 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
505 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
506 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
507 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
508 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
509 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret);
510 }
511
512
513
514 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
515 data &= ~0x103;
516 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
517 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
518
519 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
520}
521
522static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
523{
524 uint32_t data = 0;
525 int ret;
526
527 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
528
529 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
530 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
531 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
532 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
533
534
535 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
536 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
537 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
538 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
539 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
540 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
541 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
542 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
543 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
544 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
545 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
546
547 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
548
549 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
550 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
551 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
552 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
553 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
554 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
555 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
556 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
557 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
558 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
559 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
560 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
561 }
562}
563
564
565
566
567
568
569
570
571static int vcn_v1_0_start(struct amdgpu_device *adev)
572{
573 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
574 uint32_t rb_bufsz, tmp;
575 uint32_t lmi_swap_cntl;
576 int i, j, r;
577
578
579 lmi_swap_cntl = 0;
580
581 vcn_v1_0_mc_resume(adev);
582
583 vcn_1_0_disable_static_power_gating(adev);
584
585 vcn_v1_0_disable_clock_gating(adev);
586
587
588 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
589 ~UVD_MASTINT_EN__VCPU_EN_MASK);
590
591
592 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
593 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
594 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
595 mdelay(1);
596
597
598 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
599 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
600 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
601 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
602 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
603 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
604 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
605 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
606 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
607 mdelay(5);
608
609
610 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
611 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
612 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
613 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
614 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
615 UVD_LMI_CTRL__REQ_MODE_MASK |
616 0x00100000L);
617
618#ifdef __BIG_ENDIAN
619
620 lmi_swap_cntl = 0xa;
621#endif
622 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
623
624 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
625 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
626 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
627 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
628 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
629 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
630
631
632 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
633 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
634 mdelay(5);
635
636
637 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
638 UVD_VCPU_CNTL__CLK_EN_MASK);
639
640
641 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
642 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
643
644
645 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
646 mdelay(10);
647
648 for (i = 0; i < 10; ++i) {
649 uint32_t status;
650
651 for (j = 0; j < 100; ++j) {
652 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
653 if (status & 2)
654 break;
655 mdelay(10);
656 }
657 r = 0;
658 if (status & 2)
659 break;
660
661 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
662 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
663 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
664 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
665 mdelay(10);
666 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
667 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
668 mdelay(10);
669 r = -1;
670 }
671
672 if (r) {
673 DRM_ERROR("VCN decode not responding, giving up!!!\n");
674 return r;
675 }
676
677 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
678 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
679 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
680
681
682 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
683 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
684
685
686 rb_bufsz = order_base_2(ring->ring_size);
687 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
688 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
689 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
690 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
691 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
692 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
693 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
694
695
696 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
697
698
699 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
700 (upper_32_bits(ring->gpu_addr) >> 2));
701
702
703 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
704 lower_32_bits(ring->gpu_addr));
705 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
706 upper_32_bits(ring->gpu_addr));
707
708
709 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
710
711 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
712 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
713 lower_32_bits(ring->wptr));
714
715 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
716 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
717
718 ring = &adev->vcn.ring_enc[0];
719 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
720 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
721 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
722 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
723 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
724
725 ring = &adev->vcn.ring_enc[1];
726 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
727 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
728 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
729 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
730 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
731
732 return 0;
733}
734
735
736
737
738
739
740
741
742static int vcn_v1_0_stop(struct amdgpu_device *adev)
743{
744
745 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
746
747
748 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
749 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
750 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
751 mdelay(1);
752
753
754 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
755 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
756 mdelay(5);
757
758
759 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
760
761
762 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
763 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
764
765 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
766
767 vcn_v1_0_enable_clock_gating(adev);
768 vcn_1_0_enable_static_power_gating(adev);
769 return 0;
770}
771
772static bool vcn_v1_0_is_idle(void *handle)
773{
774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775
776 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
777}
778
779static int vcn_v1_0_wait_for_idle(void *handle)
780{
781 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782 int ret = 0;
783
784 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
785
786 return ret;
787}
788
789static int vcn_v1_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
794
795 if (enable) {
796
797 if (vcn_v1_0_is_idle(handle))
798 return -EBUSY;
799 vcn_v1_0_enable_clock_gating(adev);
800 } else {
801
802 vcn_v1_0_disable_clock_gating(adev);
803 }
804 return 0;
805}
806
807
808
809
810
811
812
813
814static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
815{
816 struct amdgpu_device *adev = ring->adev;
817
818 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
819}
820
821
822
823
824
825
826
827
828static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
829{
830 struct amdgpu_device *adev = ring->adev;
831
832 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
833}
834
835
836
837
838
839
840
841
842static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
843{
844 struct amdgpu_device *adev = ring->adev;
845
846 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
847}
848
849
850
851
852
853
854
855
856static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
857{
858 struct amdgpu_device *adev = ring->adev;
859
860 amdgpu_ring_write(ring,
861 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
862 amdgpu_ring_write(ring, 0);
863 amdgpu_ring_write(ring,
864 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
865 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
866}
867
868
869
870
871
872
873
874
875static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
876{
877 struct amdgpu_device *adev = ring->adev;
878
879 amdgpu_ring_write(ring,
880 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
881 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
882}
883
884
885
886
887
888
889
890
891
892static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
893 unsigned flags)
894{
895 struct amdgpu_device *adev = ring->adev;
896
897 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
898
899 amdgpu_ring_write(ring,
900 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
901 amdgpu_ring_write(ring, seq);
902 amdgpu_ring_write(ring,
903 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
904 amdgpu_ring_write(ring, addr & 0xffffffff);
905 amdgpu_ring_write(ring,
906 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
907 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
908 amdgpu_ring_write(ring,
909 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
910 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
911
912 amdgpu_ring_write(ring,
913 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
914 amdgpu_ring_write(ring, 0);
915 amdgpu_ring_write(ring,
916 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
917 amdgpu_ring_write(ring, 0);
918 amdgpu_ring_write(ring,
919 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
920 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
921}
922
923
924
925
926
927
928
929
930
931static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
932 struct amdgpu_ib *ib,
933 unsigned vmid, bool ctx_switch)
934{
935 struct amdgpu_device *adev = ring->adev;
936
937 amdgpu_ring_write(ring,
938 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
939 amdgpu_ring_write(ring, vmid);
940
941 amdgpu_ring_write(ring,
942 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
943 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
944 amdgpu_ring_write(ring,
945 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
946 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
947 amdgpu_ring_write(ring,
948 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
949 amdgpu_ring_write(ring, ib->length_dw);
950}
951
952static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
953 uint32_t reg, uint32_t val,
954 uint32_t mask)
955{
956 struct amdgpu_device *adev = ring->adev;
957
958 amdgpu_ring_write(ring,
959 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
960 amdgpu_ring_write(ring, reg << 2);
961 amdgpu_ring_write(ring,
962 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
963 amdgpu_ring_write(ring, val);
964 amdgpu_ring_write(ring,
965 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
966 amdgpu_ring_write(ring, mask);
967 amdgpu_ring_write(ring,
968 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
969 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
970}
971
972static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
973 unsigned vmid, uint64_t pd_addr)
974{
975 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
976 uint32_t data0, data1, mask;
977
978 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
979
980
981 data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
982 data1 = lower_32_bits(pd_addr);
983 mask = 0xffffffff;
984 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
985}
986
987static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
988 uint32_t reg, uint32_t val)
989{
990 struct amdgpu_device *adev = ring->adev;
991
992 amdgpu_ring_write(ring,
993 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
994 amdgpu_ring_write(ring, reg << 2);
995 amdgpu_ring_write(ring,
996 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
997 amdgpu_ring_write(ring, val);
998 amdgpu_ring_write(ring,
999 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1000 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1011{
1012 struct amdgpu_device *adev = ring->adev;
1013
1014 if (ring == &adev->vcn.ring_enc[0])
1015 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1016 else
1017 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1028{
1029 struct amdgpu_device *adev = ring->adev;
1030
1031 if (ring == &adev->vcn.ring_enc[0])
1032 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1033 else
1034 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1045{
1046 struct amdgpu_device *adev = ring->adev;
1047
1048 if (ring == &adev->vcn.ring_enc[0])
1049 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1050 lower_32_bits(ring->wptr));
1051 else
1052 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1053 lower_32_bits(ring->wptr));
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1065 u64 seq, unsigned flags)
1066{
1067 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1068
1069 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1070 amdgpu_ring_write(ring, addr);
1071 amdgpu_ring_write(ring, upper_32_bits(addr));
1072 amdgpu_ring_write(ring, seq);
1073 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1074}
1075
1076static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1077{
1078 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1090 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1091{
1092 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1093 amdgpu_ring_write(ring, vmid);
1094 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1095 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1096 amdgpu_ring_write(ring, ib->length_dw);
1097}
1098
1099static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1100 uint32_t reg, uint32_t val,
1101 uint32_t mask)
1102{
1103 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1104 amdgpu_ring_write(ring, reg << 2);
1105 amdgpu_ring_write(ring, mask);
1106 amdgpu_ring_write(ring, val);
1107}
1108
1109static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1110 unsigned int vmid, uint64_t pd_addr)
1111{
1112 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1113
1114 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1115
1116
1117 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1118 lower_32_bits(pd_addr), 0xffffffff);
1119}
1120
1121static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1122 uint32_t reg, uint32_t val)
1123{
1124 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1125 amdgpu_ring_write(ring, reg << 2);
1126 amdgpu_ring_write(ring, val);
1127}
1128
1129static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1130 struct amdgpu_irq_src *source,
1131 unsigned type,
1132 enum amdgpu_interrupt_state state)
1133{
1134 return 0;
1135}
1136
1137static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1138 struct amdgpu_irq_src *source,
1139 struct amdgpu_iv_entry *entry)
1140{
1141 DRM_DEBUG("IH: VCN TRAP\n");
1142
1143 switch (entry->src_id) {
1144 case 124:
1145 amdgpu_fence_process(&adev->vcn.ring_dec);
1146 break;
1147 case 119:
1148 amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1149 break;
1150 case 120:
1151 amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1152 break;
1153 default:
1154 DRM_ERROR("Unhandled interrupt: %d %d\n",
1155 entry->src_id, entry->src_data[0]);
1156 break;
1157 }
1158
1159 return 0;
1160}
1161
1162static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1163{
1164 struct amdgpu_device *adev = ring->adev;
1165 int i;
1166
1167 WARN_ON(ring->wptr % 2 || count % 2);
1168
1169 for (i = 0; i < count / 2; i++) {
1170 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1171 amdgpu_ring_write(ring, 0);
1172 }
1173}
1174
1175static int vcn_v1_0_set_powergating_state(void *handle,
1176 enum amd_powergating_state state)
1177{
1178
1179
1180
1181
1182
1183
1184
1185 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186
1187 if (state == AMD_PG_STATE_GATE)
1188 return vcn_v1_0_stop(adev);
1189 else
1190 return vcn_v1_0_start(adev);
1191}
1192
1193static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1194 .name = "vcn_v1_0",
1195 .early_init = vcn_v1_0_early_init,
1196 .late_init = NULL,
1197 .sw_init = vcn_v1_0_sw_init,
1198 .sw_fini = vcn_v1_0_sw_fini,
1199 .hw_init = vcn_v1_0_hw_init,
1200 .hw_fini = vcn_v1_0_hw_fini,
1201 .suspend = vcn_v1_0_suspend,
1202 .resume = vcn_v1_0_resume,
1203 .is_idle = vcn_v1_0_is_idle,
1204 .wait_for_idle = vcn_v1_0_wait_for_idle,
1205 .check_soft_reset = NULL ,
1206 .pre_soft_reset = NULL ,
1207 .soft_reset = NULL ,
1208 .post_soft_reset = NULL ,
1209 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1210 .set_powergating_state = vcn_v1_0_set_powergating_state,
1211};
1212
1213static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1214 .type = AMDGPU_RING_TYPE_VCN_DEC,
1215 .align_mask = 0xf,
1216 .support_64bit_ptrs = false,
1217 .vmhub = AMDGPU_MMHUB,
1218 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1219 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1220 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1221 .emit_frame_size =
1222 6 + 6 +
1223 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1224 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1225 8 +
1226 14 + 14 +
1227 6,
1228 .emit_ib_size = 8,
1229 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1230 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1231 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1232 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1233 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1234 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1235 .insert_start = vcn_v1_0_dec_ring_insert_start,
1236 .insert_end = vcn_v1_0_dec_ring_insert_end,
1237 .pad_ib = amdgpu_ring_generic_pad_ib,
1238 .begin_use = amdgpu_vcn_ring_begin_use,
1239 .end_use = amdgpu_vcn_ring_end_use,
1240 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1241 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1242 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1243};
1244
1245static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1246 .type = AMDGPU_RING_TYPE_VCN_ENC,
1247 .align_mask = 0x3f,
1248 .nop = VCN_ENC_CMD_NO_OP,
1249 .support_64bit_ptrs = false,
1250 .vmhub = AMDGPU_MMHUB,
1251 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1252 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1253 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1254 .emit_frame_size =
1255 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1256 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1257 4 +
1258 5 + 5 +
1259 1,
1260 .emit_ib_size = 5,
1261 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1262 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1263 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1264 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1265 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1266 .insert_nop = amdgpu_ring_insert_nop,
1267 .insert_end = vcn_v1_0_enc_ring_insert_end,
1268 .pad_ib = amdgpu_ring_generic_pad_ib,
1269 .begin_use = amdgpu_vcn_ring_begin_use,
1270 .end_use = amdgpu_vcn_ring_end_use,
1271 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1272 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1273 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1274};
1275
1276static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1277{
1278 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1279 DRM_INFO("VCN decode is enabled in VM mode\n");
1280}
1281
1282static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1283{
1284 int i;
1285
1286 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1287 adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1288
1289 DRM_INFO("VCN encode is enabled in VM mode\n");
1290}
1291
1292static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1293 .set = vcn_v1_0_set_interrupt_state,
1294 .process = vcn_v1_0_process_interrupt,
1295};
1296
1297static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1298{
1299 adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1300 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1301}
1302
1303const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1304{
1305 .type = AMD_IP_BLOCK_TYPE_VCN,
1306 .major = 1,
1307 .minor = 0,
1308 .rev = 0,
1309 .funcs = &vcn_v1_0_ip_funcs,
1310};
1311