Lines Matching refs:vcn

82 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);  in amdgpu_vcn_sw_init()
83 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
84 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
85 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
86 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
87 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
103 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
113 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
119 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
125 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
134 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
145 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
154 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
160 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
166 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
172 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); in amdgpu_vcn_sw_init()
179 r = amdgpu_ucode_validate(adev->vcn.fw); in amdgpu_vcn_sw_init()
183 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_init()
184 adev->vcn.fw = NULL; in amdgpu_vcn_sw_init()
188 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
189 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
223 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
224 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
228 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
229 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
235 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
237 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
240 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
242 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
243 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
258 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
259 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
262 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_fini()
263 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
264 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
265 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
267 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
269 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
270 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
271 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
273 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
275 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
276 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
279 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_fini()
280 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
281 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
315 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
317 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
318 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
320 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
323 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
324 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
326 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
327 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
331 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
344 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
345 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
347 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
350 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
351 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
353 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
355 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
358 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
359 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
364 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
368 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
384 container_of(work, struct amdgpu_device, vcn.idle_work.work); in amdgpu_vcn_idle_work_handler()
389 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
390 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
393 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in amdgpu_vcn_idle_work_handler()
394 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
401 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
406 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
409 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
413 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
421 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
430 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
432 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { in amdgpu_vcn_ring_begin_use()
439 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
447 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
453 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
454 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
456 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
462 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
464 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
471 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
473 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
475 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
489 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
493 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
497 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
557 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
559 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
561 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
564 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()
962 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_setup_ucode()
964 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_setup_ucode()
965 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_setup_ucode()
974 adev->firmware.ucode[idx].fw = adev->vcn.fw; in amdgpu_vcn_setup_ucode()