Lines Matching refs:uvd

154 	if (adev->uvd.address_64_bit)  in amdgpu_uvd_create_msg_bo_helper()
191 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
262 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
269 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
273 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
274 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
279 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
281 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
300 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
302 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
307 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
319 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
321 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_uvd_sw_init()
325 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
329 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in amdgpu_uvd_sw_init()
330 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_init()
333 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_init()
334 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_init()
341 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
342 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
343 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
348 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
350 r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); in amdgpu_uvd_sw_init()
356 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
359 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
362 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
365 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
368 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
376 void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); in amdgpu_uvd_sw_fini()
379 drm_sched_entity_destroy(&adev->uvd.entity); in amdgpu_uvd_sw_fini()
381 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_sw_fini()
382 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_fini()
384 kvfree(adev->uvd.inst[j].saved_bo); in amdgpu_uvd_sw_fini()
386 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_fini()
387 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_fini()
388 (void **)&adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_fini()
390 amdgpu_ring_fini(&adev->uvd.inst[j].ring); in amdgpu_uvd_sw_fini()
393 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in amdgpu_uvd_sw_fini()
395 amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); in amdgpu_uvd_sw_fini()
396 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
413 ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_entity_init()
415 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_uvd_entity_init()
432 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_suspend()
436 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_suspend()
437 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend()
440 if (i == adev->uvd.max_handles) in amdgpu_uvd_suspend()
444 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_suspend()
445 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_suspend()
447 if (adev->uvd.inst[j].vcpu_bo == NULL) in amdgpu_uvd_suspend()
450 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); in amdgpu_uvd_suspend()
451 ptr = adev->uvd.inst[j].cpu_addr; in amdgpu_uvd_suspend()
453 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_uvd_suspend()
454 if (!adev->uvd.inst[j].saved_bo) in amdgpu_uvd_suspend()
460 memset(adev->uvd.inst[j].saved_bo, 0, size); in amdgpu_uvd_suspend()
462 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); in amdgpu_uvd_suspend()
480 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in amdgpu_uvd_resume()
481 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_resume()
483 if (adev->uvd.inst[i].vcpu_bo == NULL) in amdgpu_uvd_resume()
486 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); in amdgpu_uvd_resume()
487 ptr = adev->uvd.inst[i].cpu_addr; in amdgpu_uvd_resume()
489 if (adev->uvd.inst[i].saved_bo != NULL) { in amdgpu_uvd_resume()
491 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); in amdgpu_uvd_resume()
494 kvfree(adev->uvd.inst[i].saved_bo); in amdgpu_uvd_resume()
495 adev->uvd.inst[i].saved_bo = NULL; in amdgpu_uvd_resume()
500 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
504 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
513 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); in amdgpu_uvd_resume()
521 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_free_handles()
524 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
525 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
527 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
540 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
541 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
590 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
749 if (!adev->uvd.use_ctx_buf){ in amdgpu_uvd_cs_msg_decode()
797 adev->uvd.decode_image_width = width; in amdgpu_uvd_cs_msg_decode()
847 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
848 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
854 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
855 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
871 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
872 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
873 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
886 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
887 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
954 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
962 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
1100 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
1182 r = amdgpu_job_submit(job, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1210 struct amdgpu_bo *bo = adev->uvd.ib_bo; in amdgpu_uvd_get_create_msg()
1243 bo = adev->uvd.ib_bo; in amdgpu_uvd_get_destroy_msg()
1270 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1273 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in amdgpu_uvd_idle_work_handler()
1274 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_idle_work_handler()
1276 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler()
1277 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { in amdgpu_uvd_idle_work_handler()
1278 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler()
1294 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1306 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1323 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1378 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1384 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()