Lines Matching refs:uvd

89 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])  in uvd_v7_0_enc_ring_get_rptr()
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) in uvd_v7_0_enc_ring_get_wptr()
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) in uvd_v7_0_enc_ring_set_wptr()
342 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; in uvd_v7_0_enc_ring_test_ib()
372 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; in uvd_v7_0_early_init()
373 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_early_init()
376 adev->uvd.harvest_config |= 1 << i; in uvd_v7_0_early_init()
379 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 | in uvd_v7_0_early_init()
384 adev->uvd.num_uvd_inst = 1; in uvd_v7_0_early_init()
388 adev->uvd.num_enc_rings = 1; in uvd_v7_0_early_init()
390 adev->uvd.num_enc_rings = 2; in uvd_v7_0_early_init()
405 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_sw_init()
406 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_init()
409 … amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq); in uvd_v7_0_sw_init()
414 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_sw_init()
415 …id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq); in uvd_v7_0_sw_init()
427 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in uvd_v7_0_sw_init()
429 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw; in uvd_v7_0_sw_init()
433 if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) { in uvd_v7_0_sw_init()
435 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw; in uvd_v7_0_sw_init()
442 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_sw_init()
443 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_init()
446 ring = &adev->uvd.inst[j].ring; in uvd_v7_0_sw_init()
449 &adev->uvd.inst[j].irq, 0, in uvd_v7_0_sw_init()
455 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_sw_init()
456 ring = &adev->uvd.inst[j].ring_enc[i]; in uvd_v7_0_sw_init()
470 &adev->uvd.inst[j].irq, 0, in uvd_v7_0_sw_init()
503 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in uvd_v7_0_sw_fini()
504 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_fini()
506 for (i = 0; i < adev->uvd.num_enc_rings; ++i) in uvd_v7_0_sw_fini()
507 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in uvd_v7_0_sw_fini()
533 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in uvd_v7_0_hw_init()
534 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_hw_init()
536 ring = &adev->uvd.inst[j].ring; in uvd_v7_0_hw_init()
576 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_hw_init()
577 ring = &adev->uvd.inst[j].ring_enc[i]; in uvd_v7_0_hw_init()
601 cancel_delayed_work_sync(&adev->uvd.idle_work); in uvd_v7_0_hw_fini()
629 cancel_delayed_work_sync(&adev->uvd.idle_work); in uvd_v7_0_suspend()
674 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_mc_resume()
675 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_mc_resume()
690 lower_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_mc_resume()
692 upper_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_mc_resume()
701 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_mc_resume()
703 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_mc_resume()
708 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_mc_resume()
710 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_mc_resume()
722 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); in uvd_v7_0_mc_resume()
753 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_mmsch_start()
754 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_mmsch_start()
756 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); in uvd_v7_0_mmsch_start()
757 *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0; in uvd_v7_0_mmsch_start()
758 adev->uvd.inst[i].ring_enc[0].wptr = 0; in uvd_v7_0_mmsch_start()
759 adev->uvd.inst[i].ring_enc[0].wptr_old = 0; in uvd_v7_0_mmsch_start()
811 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_sriov_start()
812 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_sriov_start()
814 ring = &adev->uvd.inst[i].ring; in uvd_v7_0_sriov_start()
816 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); in uvd_v7_0_sriov_start()
832 lower_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_sriov_start()
834 upper_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_sriov_start()
844 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_sriov_start()
846 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_sriov_start()
851 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_sriov_start()
853 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_sriov_start()
858 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); in uvd_v7_0_sriov_start()
917 ring = &adev->uvd.inst[i].ring_enc[0]; in uvd_v7_0_sriov_start()
956 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { in uvd_v7_0_start()
957 if (adev->uvd.harvest_config & (1 << k)) in uvd_v7_0_start()
970 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { in uvd_v7_0_start()
971 if (adev->uvd.harvest_config & (1 << k)) in uvd_v7_0_start()
973 ring = &adev->uvd.inst[k].ring; in uvd_v7_0_start()
1111 ring = &adev->uvd.inst[k].ring_enc[0]; in uvd_v7_0_start()
1118 ring = &adev->uvd.inst[k].ring_enc[1]; in uvd_v7_0_start()
1139 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_stop()
1140 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_stop()
1496 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1499 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1508 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1520 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1522 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1550 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1590 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring); in uvd_v7_0_process_interrupt()
1593 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]); in uvd_v7_0_process_interrupt()
1597 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]); in uvd_v7_0_process_interrupt()
1869 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_set_ring_funcs()
1870 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_set_ring_funcs()
1872 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; in uvd_v7_0_set_ring_funcs()
1873 adev->uvd.inst[i].ring.me = i; in uvd_v7_0_set_ring_funcs()
1882 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_set_enc_ring_funcs()
1883 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_set_enc_ring_funcs()
1885 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_set_enc_ring_funcs()
1886 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; in uvd_v7_0_set_enc_ring_funcs()
1887 adev->uvd.inst[j].ring_enc[i].me = j; in uvd_v7_0_set_enc_ring_funcs()
1903 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_set_irq_funcs()
1904 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_set_irq_funcs()
1906 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; in uvd_v7_0_set_irq_funcs()
1907 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; in uvd_v7_0_set_irq_funcs()