Lines Matching refs:vcn

93 		adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;  in vcn_v3_0_early_init()
94 adev->vcn.harvest_config = 0; in vcn_v3_0_early_init()
95 adev->vcn.num_enc_rings = 1; in vcn_v3_0_early_init()
98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v3_0_early_init()
104 adev->vcn.num_enc_rings = 0; in vcn_v3_0_early_init()
106 adev->vcn.num_enc_rings = 2; in vcn_v3_0_early_init()
148 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; in vcn_v3_0_sw_init()
153 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_init()
156 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_sw_init()
159 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
160 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
161 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
162 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
163 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
164 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
166 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
167 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); in vcn_v3_0_sw_init()
168 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
169 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); in vcn_v3_0_sw_init()
170 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
171 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); in vcn_v3_0_sw_init()
172 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
173 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); in vcn_v3_0_sw_init()
174 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
175 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); in vcn_v3_0_sw_init()
179 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq); in vcn_v3_0_sw_init()
183 atomic_set(&adev->vcn.inst[i].sched_score, 0); in vcn_v3_0_sw_init()
185 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_sw_init()
188 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1); in vcn_v3_0_sw_init()
190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; in vcn_v3_0_sw_init()
193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, in vcn_v3_0_sw_init()
195 &adev->vcn.inst[i].sched_score); in vcn_v3_0_sw_init()
199 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { in vcn_v3_0_sw_init()
204 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); in vcn_v3_0_sw_init()
208 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_sw_init()
211 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j; in vcn_v3_0_sw_init()
213 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; in vcn_v3_0_sw_init()
216 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, in vcn_v3_0_sw_init()
217 hw_prio, &adev->vcn.inst[i].sched_score); in vcn_v3_0_sw_init()
222 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_sw_init()
234 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); in vcn_v3_0_sw_init()
243 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode; in vcn_v3_0_sw_init()
261 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_fini()
264 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_sw_fini()
266 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_sw_fini()
305 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_init()
306 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_init()
309 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_hw_init()
321 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { in vcn_v3_0_hw_init()
322 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_hw_init()
336 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_init()
337 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_init()
340 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_hw_init()
349 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { in vcn_v3_0_hw_init()
350 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_hw_init()
378 cancel_delayed_work_sync(&adev->vcn.idle_work); in vcn_v3_0_hw_fini()
380 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_fini()
381 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_fini()
386 (adev->vcn.cur_state != AMD_PG_STATE_GATE && in vcn_v3_0_hw_fini()
448 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v3_0_mc_resume()
461 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v3_0_mc_resume()
463 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v3_0_mc_resume()
472 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v3_0_mc_resume()
474 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v3_0_mc_resume()
480 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v3_0_mc_resume()
482 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v3_0_mc_resume()
488 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v3_0_mc_resume()
490 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v3_0_mc_resume()
498 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v3_0_mc_resume_dpg_mode()
524 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
527 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
545 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
548 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
565 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
568 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
577 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
580 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
942 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v3_0_start_dpg_mode()
956 …adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_ad… in vcn_v3_0_start_dpg_mode()
1038 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, in vcn_v3_0_start_dpg_mode()
1039 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - in vcn_v3_0_start_dpg_mode()
1040 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); in vcn_v3_0_start_dpg_mode()
1042 ring = &adev->vcn.inst[inst_idx].ring_dec; in vcn_v3_0_start_dpg_mode()
1104 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_start()
1105 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_start()
1109 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); in vcn_v3_0_start()
1229 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_start()
1239 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_start()
1260 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v3_0_start()
1269 ring = &adev->vcn.inst[i].ring_enc[1]; in vcn_v3_0_start()
1322 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_start_sriov()
1323 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_start_sriov()
1332 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v3_0_start_sriov()
1348 lower_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v3_0_start_sriov()
1351 upper_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v3_0_start_sriov()
1362 cache_addr = adev->vcn.inst[i].gpu_addr + offset; in vcn_v3_0_start_sriov()
1376 cache_addr = adev->vcn.inst[i].gpu_addr + offset + in vcn_v3_0_start_sriov()
1391 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { in vcn_v3_0_start_sriov()
1392 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_start_sriov()
1406 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_start_sriov()
1527 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_stop()
1528 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_stop()
1606 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { in vcn_v3_0_pause_dpg_mode()
1608 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); in vcn_v3_0_pause_dpg_mode()
1633 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v3_0_pause_dpg_mode()
1635 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; in vcn_v3_0_pause_dpg_mode()
1645 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; in vcn_v3_0_pause_dpg_mode()
1671 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; in vcn_v3_0_pause_dpg_mode()
1722 fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr; in vcn_v3_0_dec_ring_set_wptr()
1775 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) in vcn_v3_0_limit_sched()
1883 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { in vcn_v3_0_ring_patch_cs_in_place()
1885 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { in vcn_v3_0_ring_patch_cs_in_place()
1887 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && in vcn_v3_0_ring_patch_cs_in_place()
1941 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v3_0_enc_ring_get_rptr()
1958 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v3_0_enc_ring_get_wptr()
1982 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v3_0_enc_ring_set_wptr()
2033 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_dec_ring_funcs()
2034 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_dec_ring_funcs()
2038 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; in vcn_v3_0_set_dec_ring_funcs()
2040 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs; in vcn_v3_0_set_dec_ring_funcs()
2041 adev->vcn.inst[i].ring_dec.me = i; in vcn_v3_0_set_dec_ring_funcs()
2051 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_enc_ring_funcs()
2052 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_enc_ring_funcs()
2055 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { in vcn_v3_0_set_enc_ring_funcs()
2056 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; in vcn_v3_0_set_enc_ring_funcs()
2057 adev->vcn.inst[i].ring_enc[j].me = i; in vcn_v3_0_set_enc_ring_funcs()
2059 if (adev->vcn.num_enc_rings > 0) in vcn_v3_0_set_enc_ring_funcs()
2069 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_is_idle()
2070 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_is_idle()
2084 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_wait_for_idle()
2085 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_wait_for_idle()
2104 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_clockgating_state()
2105 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_clockgating_state()
2131 adev->vcn.cur_state = AMD_PG_STATE_UNGATE; in vcn_v3_0_set_powergating_state()
2135 if(state == adev->vcn.cur_state) in vcn_v3_0_set_powergating_state()
2144 adev->vcn.cur_state = state; in vcn_v3_0_set_powergating_state()
2179 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); in vcn_v3_0_process_interrupt()
2182 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); in vcn_v3_0_process_interrupt()
2185 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); in vcn_v3_0_process_interrupt()
2205 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_irq_funcs()
2206 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_irq_funcs()
2209 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; in vcn_v3_0_set_irq_funcs()
2210 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; in vcn_v3_0_set_irq_funcs()