1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "amdgpu.h"
32#include "amdgpu_pm.h"
33#include "amdgpu_vcn.h"
34#include "soc15d.h"
35
36
37#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
38#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
39#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
40#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
41#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
42#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
43#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
44#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
45#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
46#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
47#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
48#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
49#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
50
51MODULE_FIRMWARE(FIRMWARE_RAVEN);
52MODULE_FIRMWARE(FIRMWARE_PICASSO);
53MODULE_FIRMWARE(FIRMWARE_RAVEN2);
54MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
55MODULE_FIRMWARE(FIRMWARE_RENOIR);
56MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
57MODULE_FIRMWARE(FIRMWARE_NAVI10);
58MODULE_FIRMWARE(FIRMWARE_NAVI14);
59MODULE_FIRMWARE(FIRMWARE_NAVI12);
60MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
61MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
62MODULE_FIRMWARE(FIRMWARE_VANGOGH);
63MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
64
65static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
66
67int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
68{
69 unsigned long bo_size;
70 const char *fw_name;
71 const struct common_firmware_header *hdr;
72 unsigned char fw_check;
73 int i, r;
74
75 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
76 mutex_init(&adev->vcn.vcn_pg_lock);
77 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
78 atomic_set(&adev->vcn.total_submission_cnt, 0);
79 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
80 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
81
82 switch (adev->asic_type) {
83 case CHIP_RAVEN:
84 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
85 fw_name = FIRMWARE_RAVEN2;
86 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
87 fw_name = FIRMWARE_PICASSO;
88 else
89 fw_name = FIRMWARE_RAVEN;
90 break;
91 case CHIP_ARCTURUS:
92 fw_name = FIRMWARE_ARCTURUS;
93 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
94 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
95 adev->vcn.indirect_sram = true;
96 break;
97 case CHIP_RENOIR:
98 if (adev->apu_flags & AMD_APU_IS_RENOIR)
99 fw_name = FIRMWARE_RENOIR;
100 else
101 fw_name = FIRMWARE_GREEN_SARDINE;
102
103 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
104 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
105 adev->vcn.indirect_sram = true;
106 break;
107 case CHIP_NAVI10:
108 fw_name = FIRMWARE_NAVI10;
109 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
110 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
111 adev->vcn.indirect_sram = true;
112 break;
113 case CHIP_NAVI14:
114 fw_name = FIRMWARE_NAVI14;
115 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
116 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
117 adev->vcn.indirect_sram = true;
118 break;
119 case CHIP_NAVI12:
120 fw_name = FIRMWARE_NAVI12;
121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
123 adev->vcn.indirect_sram = true;
124 break;
125 case CHIP_SIENNA_CICHLID:
126 fw_name = FIRMWARE_SIENNA_CICHLID;
127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
129 adev->vcn.indirect_sram = true;
130 break;
131 case CHIP_NAVY_FLOUNDER:
132 fw_name = FIRMWARE_NAVY_FLOUNDER;
133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
135 adev->vcn.indirect_sram = true;
136 break;
137 case CHIP_VANGOGH:
138 fw_name = FIRMWARE_VANGOGH;
139 break;
140 case CHIP_DIMGREY_CAVEFISH:
141 fw_name = FIRMWARE_DIMGREY_CAVEFISH;
142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 adev->vcn.indirect_sram = true;
145 break;
146 default:
147 return -EINVAL;
148 }
149
150 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
151 if (r) {
152 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
153 fw_name);
154 return r;
155 }
156
157 r = amdgpu_ucode_validate(adev->vcn.fw);
158 if (r) {
159 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
160 fw_name);
161 release_firmware(adev->vcn.fw);
162 adev->vcn.fw = NULL;
163 return r;
164 }
165
166 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
167 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
168
169
170
171
172
173
174
175 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
176 if (fw_check) {
177 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
178
179 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
180 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
181 enc_major = fw_check;
182 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
183 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
184 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
185 enc_major, enc_minor, dec_ver, vep, fw_rev);
186 } else {
187 unsigned int version_major, version_minor, family_id;
188
189 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
190 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
191 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
192 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
193 version_major, version_minor, family_id);
194 }
195
196 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
197 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
198 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
199 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
200
201 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
202 if (adev->vcn.harvest_config & (1 << i))
203 continue;
204
205 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
206 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
207 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
208 if (r) {
209 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
210 return r;
211 }
212
213 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
214 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
215 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
216 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
217
218 if (adev->vcn.indirect_sram) {
219 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
220 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
221 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
222 if (r) {
223 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
224 return r;
225 }
226 }
227 }
228
229 return 0;
230}
231
232int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
233{
234 int i, j;
235
236 cancel_delayed_work_sync(&adev->vcn.idle_work);
237
238 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
239 if (adev->vcn.harvest_config & (1 << j))
240 continue;
241
242 if (adev->vcn.indirect_sram) {
243 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
244 &adev->vcn.inst[j].dpg_sram_gpu_addr,
245 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
246 }
247 kvfree(adev->vcn.inst[j].saved_bo);
248
249 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
250 &adev->vcn.inst[j].gpu_addr,
251 (void **)&adev->vcn.inst[j].cpu_addr);
252
253 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
254
255 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
256 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
257 }
258
259 release_firmware(adev->vcn.fw);
260 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
261 mutex_destroy(&adev->vcn.vcn_pg_lock);
262
263 return 0;
264}
265
266int amdgpu_vcn_suspend(struct amdgpu_device *adev)
267{
268 unsigned size;
269 void *ptr;
270 int i;
271
272 cancel_delayed_work_sync(&adev->vcn.idle_work);
273
274 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
275 if (adev->vcn.harvest_config & (1 << i))
276 continue;
277 if (adev->vcn.inst[i].vcpu_bo == NULL)
278 return 0;
279
280 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
281 ptr = adev->vcn.inst[i].cpu_addr;
282
283 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
284 if (!adev->vcn.inst[i].saved_bo)
285 return -ENOMEM;
286
287 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
288 }
289 return 0;
290}
291
292int amdgpu_vcn_resume(struct amdgpu_device *adev)
293{
294 unsigned size;
295 void *ptr;
296 int i;
297
298 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
299 if (adev->vcn.harvest_config & (1 << i))
300 continue;
301 if (adev->vcn.inst[i].vcpu_bo == NULL)
302 return -EINVAL;
303
304 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
305 ptr = adev->vcn.inst[i].cpu_addr;
306
307 if (adev->vcn.inst[i].saved_bo != NULL) {
308 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
309 kvfree(adev->vcn.inst[i].saved_bo);
310 adev->vcn.inst[i].saved_bo = NULL;
311 } else {
312 const struct common_firmware_header *hdr;
313 unsigned offset;
314
315 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
316 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
317 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
318 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
319 le32_to_cpu(hdr->ucode_size_bytes));
320 size -= le32_to_cpu(hdr->ucode_size_bytes);
321 ptr += le32_to_cpu(hdr->ucode_size_bytes);
322 }
323 memset_io(ptr, 0, size);
324 }
325 }
326 return 0;
327}
328
329static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
330{
331 struct amdgpu_device *adev =
332 container_of(work, struct amdgpu_device, vcn.idle_work.work);
333 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
334 unsigned int i, j;
335 int r = 0;
336
337 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
338 if (adev->vcn.harvest_config & (1 << j))
339 continue;
340
341 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
342 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
343 }
344
345 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
346 struct dpg_pause_state new_state;
347
348 if (fence[j] ||
349 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
350 new_state.fw_based = VCN_DPG_STATE__PAUSE;
351 else
352 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
353
354 adev->vcn.pause_dpg_mode(adev, j, &new_state);
355 }
356
357 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
358 fences += fence[j];
359 }
360
361 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
362 amdgpu_gfx_off_ctrl(adev, true);
363 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
364 AMD_PG_STATE_GATE);
365 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
366 false);
367 if (r)
368 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
369 } else {
370 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
371 }
372}
373
374void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
375{
376 struct amdgpu_device *adev = ring->adev;
377 int r = 0;
378
379 atomic_inc(&adev->vcn.total_submission_cnt);
380
381 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
382 amdgpu_gfx_off_ctrl(adev, false);
383 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
384 true);
385 if (r)
386 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
387 }
388
389 mutex_lock(&adev->vcn.vcn_pg_lock);
390 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
391 AMD_PG_STATE_UNGATE);
392
393 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
394 struct dpg_pause_state new_state;
395
396 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
397 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
398 new_state.fw_based = VCN_DPG_STATE__PAUSE;
399 } else {
400 unsigned int fences = 0;
401 unsigned int i;
402
403 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
404 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
405
406 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
407 new_state.fw_based = VCN_DPG_STATE__PAUSE;
408 else
409 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
410 }
411
412 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
413 }
414 mutex_unlock(&adev->vcn.vcn_pg_lock);
415}
416
417void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
418{
419 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
420 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
421 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
422
423 atomic_dec(&ring->adev->vcn.total_submission_cnt);
424
425 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
426}
427
428int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
429{
430 struct amdgpu_device *adev = ring->adev;
431 uint32_t tmp = 0;
432 unsigned i;
433 int r;
434
435
436 if (amdgpu_sriov_vf(adev))
437 return 0;
438
439 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
440 r = amdgpu_ring_alloc(ring, 3);
441 if (r)
442 return r;
443 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
444 amdgpu_ring_write(ring, 0xDEADBEEF);
445 amdgpu_ring_commit(ring);
446 for (i = 0; i < adev->usec_timeout; i++) {
447 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
448 if (tmp == 0xDEADBEEF)
449 break;
450 udelay(1);
451 }
452
453 if (i >= adev->usec_timeout)
454 r = -ETIMEDOUT;
455
456 return r;
457}
458
459int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
460{
461 struct amdgpu_device *adev = ring->adev;
462 uint32_t rptr;
463 unsigned int i;
464 int r;
465
466 if (amdgpu_sriov_vf(adev))
467 return 0;
468
469 r = amdgpu_ring_alloc(ring, 16);
470 if (r)
471 return r;
472
473 rptr = amdgpu_ring_get_rptr(ring);
474
475 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
476 amdgpu_ring_commit(ring);
477
478 for (i = 0; i < adev->usec_timeout; i++) {
479 if (amdgpu_ring_get_rptr(ring) != rptr)
480 break;
481 udelay(1);
482 }
483
484 if (i >= adev->usec_timeout)
485 r = -ETIMEDOUT;
486
487 return r;
488}
489
490static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
491 struct amdgpu_bo *bo,
492 struct dma_fence **fence)
493{
494 struct amdgpu_device *adev = ring->adev;
495 struct dma_fence *f = NULL;
496 struct amdgpu_job *job;
497 struct amdgpu_ib *ib;
498 uint64_t addr;
499 void *msg = NULL;
500 int i, r;
501
502 r = amdgpu_job_alloc_with_ib(adev, 64,
503 AMDGPU_IB_POOL_DIRECT, &job);
504 if (r)
505 goto err;
506
507 ib = &job->ibs[0];
508 addr = amdgpu_bo_gpu_offset(bo);
509 msg = amdgpu_bo_kptr(bo);
510 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
511 ib->ptr[1] = addr;
512 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
513 ib->ptr[3] = addr >> 32;
514 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
515 ib->ptr[5] = 0;
516 for (i = 6; i < 16; i += 2) {
517 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
518 ib->ptr[i+1] = 0;
519 }
520 ib->length_dw = 16;
521
522 r = amdgpu_job_submit_direct(job, ring, &f);
523 if (r)
524 goto err_free;
525
526 amdgpu_bo_fence(bo, f, false);
527 amdgpu_bo_unreserve(bo);
528 amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
529
530 if (fence)
531 *fence = dma_fence_get(f);
532 dma_fence_put(f);
533
534 return 0;
535
536err_free:
537 amdgpu_job_free(job);
538
539err:
540 amdgpu_bo_unreserve(bo);
541 amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
542 return r;
543}
544
545static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
546 struct amdgpu_bo **bo)
547{
548 struct amdgpu_device *adev = ring->adev;
549 uint32_t *msg;
550 int r, i;
551
552 *bo = NULL;
553 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
554 AMDGPU_GEM_DOMAIN_VRAM,
555 bo, NULL, (void **)&msg);
556 if (r)
557 return r;
558
559 msg[0] = cpu_to_le32(0x00000028);
560 msg[1] = cpu_to_le32(0x00000038);
561 msg[2] = cpu_to_le32(0x00000001);
562 msg[3] = cpu_to_le32(0x00000000);
563 msg[4] = cpu_to_le32(handle);
564 msg[5] = cpu_to_le32(0x00000000);
565 msg[6] = cpu_to_le32(0x00000001);
566 msg[7] = cpu_to_le32(0x00000028);
567 msg[8] = cpu_to_le32(0x00000010);
568 msg[9] = cpu_to_le32(0x00000000);
569 msg[10] = cpu_to_le32(0x00000007);
570 msg[11] = cpu_to_le32(0x00000000);
571 msg[12] = cpu_to_le32(0x00000780);
572 msg[13] = cpu_to_le32(0x00000440);
573 for (i = 14; i < 1024; ++i)
574 msg[i] = cpu_to_le32(0x0);
575
576 return 0;
577}
578
579static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
580 struct amdgpu_bo **bo)
581{
582 struct amdgpu_device *adev = ring->adev;
583 uint32_t *msg;
584 int r, i;
585
586 *bo = NULL;
587 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
588 AMDGPU_GEM_DOMAIN_VRAM,
589 bo, NULL, (void **)&msg);
590 if (r)
591 return r;
592
593 msg[0] = cpu_to_le32(0x00000028);
594 msg[1] = cpu_to_le32(0x00000018);
595 msg[2] = cpu_to_le32(0x00000000);
596 msg[3] = cpu_to_le32(0x00000002);
597 msg[4] = cpu_to_le32(handle);
598 msg[5] = cpu_to_le32(0x00000000);
599 for (i = 6; i < 1024; ++i)
600 msg[i] = cpu_to_le32(0x0);
601
602 return 0;
603}
604
605int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
606{
607 struct dma_fence *fence = NULL;
608 struct amdgpu_bo *bo;
609 long r;
610
611 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
612 if (r)
613 goto error;
614
615 r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
616 if (r)
617 goto error;
618 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
619 if (r)
620 goto error;
621
622 r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
623 if (r)
624 goto error;
625
626 r = dma_fence_wait_timeout(fence, false, timeout);
627 if (r == 0)
628 r = -ETIMEDOUT;
629 else if (r > 0)
630 r = 0;
631
632 dma_fence_put(fence);
633error:
634 return r;
635}
636
637static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
638 struct amdgpu_bo *bo,
639 struct dma_fence **fence)
640{
641 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
642 const unsigned int ib_size_dw = 64;
643 struct amdgpu_device *adev = ring->adev;
644 struct dma_fence *f = NULL;
645 struct amdgpu_job *job;
646 struct amdgpu_ib *ib;
647 uint64_t addr;
648 int i, r;
649
650 r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
651 AMDGPU_IB_POOL_DIRECT, &job);
652 if (r)
653 goto err;
654
655 ib = &job->ibs[0];
656 addr = amdgpu_bo_gpu_offset(bo);
657 ib->length_dw = 0;
658
659 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
660 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
661 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
662 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
663 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
664
665 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
666 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
667 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
668
669 for (i = ib->length_dw; i < ib_size_dw; ++i)
670 ib->ptr[i] = 0x0;
671
672 r = amdgpu_job_submit_direct(job, ring, &f);
673 if (r)
674 goto err_free;
675
676 amdgpu_bo_fence(bo, f, false);
677 amdgpu_bo_unreserve(bo);
678 amdgpu_bo_unref(&bo);
679
680 if (fence)
681 *fence = dma_fence_get(f);
682 dma_fence_put(f);
683
684 return 0;
685
686err_free:
687 amdgpu_job_free(job);
688
689err:
690 amdgpu_bo_unreserve(bo);
691 amdgpu_bo_unref(&bo);
692 return r;
693}
694
695int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
696{
697 struct dma_fence *fence = NULL;
698 struct amdgpu_bo *bo;
699 long r;
700
701 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
702 if (r)
703 goto error;
704
705 r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
706 if (r)
707 goto error;
708 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
709 if (r)
710 goto error;
711
712 r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
713 if (r)
714 goto error;
715
716 r = dma_fence_wait_timeout(fence, false, timeout);
717 if (r == 0)
718 r = -ETIMEDOUT;
719 else if (r > 0)
720 r = 0;
721
722 dma_fence_put(fence);
723error:
724 return r;
725}
726
727int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
728{
729 struct amdgpu_device *adev = ring->adev;
730 uint32_t rptr;
731 unsigned i;
732 int r;
733
734 if (amdgpu_sriov_vf(adev))
735 return 0;
736
737 r = amdgpu_ring_alloc(ring, 16);
738 if (r)
739 return r;
740
741 rptr = amdgpu_ring_get_rptr(ring);
742
743 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
744 amdgpu_ring_commit(ring);
745
746 for (i = 0; i < adev->usec_timeout; i++) {
747 if (amdgpu_ring_get_rptr(ring) != rptr)
748 break;
749 udelay(1);
750 }
751
752 if (i >= adev->usec_timeout)
753 r = -ETIMEDOUT;
754
755 return r;
756}
757
758static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
759 struct amdgpu_bo *bo,
760 struct dma_fence **fence)
761{
762 const unsigned ib_size_dw = 16;
763 struct amdgpu_job *job;
764 struct amdgpu_ib *ib;
765 struct dma_fence *f = NULL;
766 uint64_t addr;
767 int i, r;
768
769 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
770 AMDGPU_IB_POOL_DIRECT, &job);
771 if (r)
772 return r;
773
774 ib = &job->ibs[0];
775 addr = amdgpu_bo_gpu_offset(bo);
776
777 ib->length_dw = 0;
778 ib->ptr[ib->length_dw++] = 0x00000018;
779 ib->ptr[ib->length_dw++] = 0x00000001;
780 ib->ptr[ib->length_dw++] = handle;
781 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
782 ib->ptr[ib->length_dw++] = addr;
783 ib->ptr[ib->length_dw++] = 0x0000000b;
784
785 ib->ptr[ib->length_dw++] = 0x00000014;
786 ib->ptr[ib->length_dw++] = 0x00000002;
787 ib->ptr[ib->length_dw++] = 0x0000001c;
788 ib->ptr[ib->length_dw++] = 0x00000000;
789 ib->ptr[ib->length_dw++] = 0x00000000;
790
791 ib->ptr[ib->length_dw++] = 0x00000008;
792 ib->ptr[ib->length_dw++] = 0x08000001;
793
794 for (i = ib->length_dw; i < ib_size_dw; ++i)
795 ib->ptr[i] = 0x0;
796
797 r = amdgpu_job_submit_direct(job, ring, &f);
798 if (r)
799 goto err;
800
801 if (fence)
802 *fence = dma_fence_get(f);
803 dma_fence_put(f);
804
805 return 0;
806
807err:
808 amdgpu_job_free(job);
809 return r;
810}
811
812static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
813 struct amdgpu_bo *bo,
814 struct dma_fence **fence)
815{
816 const unsigned ib_size_dw = 16;
817 struct amdgpu_job *job;
818 struct amdgpu_ib *ib;
819 struct dma_fence *f = NULL;
820 uint64_t addr;
821 int i, r;
822
823 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
824 AMDGPU_IB_POOL_DIRECT, &job);
825 if (r)
826 return r;
827
828 ib = &job->ibs[0];
829 addr = amdgpu_bo_gpu_offset(bo);
830
831 ib->length_dw = 0;
832 ib->ptr[ib->length_dw++] = 0x00000018;
833 ib->ptr[ib->length_dw++] = 0x00000001;
834 ib->ptr[ib->length_dw++] = handle;
835 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
836 ib->ptr[ib->length_dw++] = addr;
837 ib->ptr[ib->length_dw++] = 0x0000000b;
838
839 ib->ptr[ib->length_dw++] = 0x00000014;
840 ib->ptr[ib->length_dw++] = 0x00000002;
841 ib->ptr[ib->length_dw++] = 0x0000001c;
842 ib->ptr[ib->length_dw++] = 0x00000000;
843 ib->ptr[ib->length_dw++] = 0x00000000;
844
845 ib->ptr[ib->length_dw++] = 0x00000008;
846 ib->ptr[ib->length_dw++] = 0x08000002;
847
848 for (i = ib->length_dw; i < ib_size_dw; ++i)
849 ib->ptr[i] = 0x0;
850
851 r = amdgpu_job_submit_direct(job, ring, &f);
852 if (r)
853 goto err;
854
855 if (fence)
856 *fence = dma_fence_get(f);
857 dma_fence_put(f);
858
859 return 0;
860
861err:
862 amdgpu_job_free(job);
863 return r;
864}
865
866int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
867{
868 struct dma_fence *fence = NULL;
869 struct amdgpu_bo *bo = NULL;
870 long r;
871
872 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
873 AMDGPU_GEM_DOMAIN_VRAM,
874 &bo, NULL, NULL);
875 if (r)
876 return r;
877
878 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
879 if (r)
880 goto error;
881
882 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
883 if (r)
884 goto error;
885
886 r = dma_fence_wait_timeout(fence, false, timeout);
887 if (r == 0)
888 r = -ETIMEDOUT;
889 else if (r > 0)
890 r = 0;
891
892error:
893 dma_fence_put(fence);
894 amdgpu_bo_unreserve(bo);
895 amdgpu_bo_free_kernel(&bo, NULL, NULL);
896
897 return r;
898}
899