Lines Matching refs:vcn

86 		adev->vcn.num_vcn_inst = 2;  in vcn_v2_5_early_init()
87 adev->vcn.harvest_config = 0; in vcn_v2_5_early_init()
88 adev->vcn.num_enc_rings = 1; in vcn_v2_5_early_init()
93 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v2_5_early_init()
96 adev->vcn.harvest_config |= 1 << i; in vcn_v2_5_early_init()
98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v2_5_early_init()
103 adev->vcn.num_enc_rings = 2; in vcn_v2_5_early_init()
127 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { in vcn_v2_5_sw_init()
128 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_sw_init()
132 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); in vcn_v2_5_sw_init()
137 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_5_sw_init()
139 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); in vcn_v2_5_sw_init()
146 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); in vcn_v2_5_sw_init()
161 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { in vcn_v2_5_sw_init()
164 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_sw_init()
166 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
167 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
168 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
169 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
170 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
171 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
173 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
174 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9); in vcn_v2_5_sw_init()
175 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
176 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0); in vcn_v2_5_sw_init()
177 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
178 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1); in vcn_v2_5_sw_init()
179 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
180 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD); in vcn_v2_5_sw_init()
181 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; in vcn_v2_5_sw_init()
182 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP); in vcn_v2_5_sw_init()
184 ring = &adev->vcn.inst[j].ring_dec; in vcn_v2_5_sw_init()
187 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + in vcn_v2_5_sw_init()
190 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, in vcn_v2_5_sw_init()
195 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_5_sw_init()
198 ring = &adev->vcn.inst[j].ring_enc[i]; in vcn_v2_5_sw_init()
201 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + in vcn_v2_5_sw_init()
206 &adev->vcn.inst[j].irq, 0, in vcn_v2_5_sw_init()
212 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr; in vcn_v2_5_sw_init()
216 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); in vcn_v2_5_sw_init()
226 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; in vcn_v2_5_sw_init()
245 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v2_5_sw_fini()
246 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_sw_fini()
248 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v2_5_sw_fini()
283 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in vcn_v2_5_hw_init()
284 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_hw_init()
288 adev->vcn.inst[j].ring_enc[0].sched.ready = true; in vcn_v2_5_hw_init()
289 adev->vcn.inst[j].ring_enc[1].sched.ready = false; in vcn_v2_5_hw_init()
290 adev->vcn.inst[j].ring_enc[2].sched.ready = false; in vcn_v2_5_hw_init()
291 adev->vcn.inst[j].ring_dec.sched.ready = true; in vcn_v2_5_hw_init()
294 ring = &adev->vcn.inst[j].ring_dec; in vcn_v2_5_hw_init()
303 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_5_hw_init()
304 ring = &adev->vcn.inst[j].ring_enc[i]; in vcn_v2_5_hw_init()
332 cancel_delayed_work_sync(&adev->vcn.idle_work); in vcn_v2_5_hw_fini()
334 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_hw_fini()
335 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_hw_fini()
339 (adev->vcn.cur_state != AMD_PG_STATE_GATE && in vcn_v2_5_hw_fini()
398 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v2_5_mc_resume()
402 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_mc_resume()
403 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_mc_resume()
415 lower_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v2_5_mc_resume()
417 upper_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v2_5_mc_resume()
426 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); in vcn_v2_5_mc_resume()
428 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); in vcn_v2_5_mc_resume()
434 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v2_5_mc_resume()
436 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v2_5_mc_resume()
442 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); in vcn_v2_5_mc_resume()
444 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); in vcn_v2_5_mc_resume()
453 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v2_5_mc_resume_dpg_mode()
479 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
482 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
500 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
503 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
520 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
523 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
532 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
535 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v2_5_mc_resume_dpg_mode()
559 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_disable_clock_gating()
560 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_disable_clock_gating()
724 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_enable_clock_gating()
725 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_enable_clock_gating()
803 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v2_5_start_dpg_mode()
817 …adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_ad… in vcn_v2_5_start_dpg_mode()
897 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, in vcn_v2_5_start_dpg_mode()
898 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - in vcn_v2_5_start_dpg_mode()
899 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); in vcn_v2_5_start_dpg_mode()
901 ring = &adev->vcn.inst[inst_idx].ring_dec; in vcn_v2_5_start_dpg_mode()
956 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_start()
957 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_start()
960 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); in vcn_v2_5_start()
979 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_start()
980 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_start()
1028 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_start()
1029 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v2_5_start()
1030 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_start()
1093 ring = &adev->vcn.inst[i].ring_dec; in vcn_v2_5_start()
1119 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v2_5_start()
1128 ring = &adev->vcn.inst[i].ring_enc[1]; in vcn_v2_5_start()
1215 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_sriov_start()
1226 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); in vcn_v2_5_sriov_start()
1244 lower_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v2_5_sriov_start()
1248 upper_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v2_5_sriov_start()
1261 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); in vcn_v2_5_sriov_start()
1265 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); in vcn_v2_5_sriov_start()
1275 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + in vcn_v2_5_sriov_start()
1280 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + in vcn_v2_5_sriov_start()
1289 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v2_5_sriov_start()
1302 ring = &adev->vcn.inst[i].ring_dec; in vcn_v2_5_sriov_start()
1369 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_stop()
1370 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_stop()
1440 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { in vcn_v2_5_pause_dpg_mode()
1442 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); in vcn_v2_5_pause_dpg_mode()
1451 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v2_5_pause_dpg_mode()
1469 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; in vcn_v2_5_pause_dpg_mode()
1479 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; in vcn_v2_5_pause_dpg_mode()
1501 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; in vcn_v2_5_pause_dpg_mode()
1630 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v2_5_enc_ring_get_rptr()
1647 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v2_5_enc_ring_get_wptr()
1671 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v2_5_enc_ring_set_wptr()
1752 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_set_dec_ring_funcs()
1753 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_set_dec_ring_funcs()
1756 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; in vcn_v2_5_set_dec_ring_funcs()
1758 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs; in vcn_v2_5_set_dec_ring_funcs()
1759 adev->vcn.inst[i].ring_dec.me = i; in vcn_v2_5_set_dec_ring_funcs()
1768 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in vcn_v2_5_set_enc_ring_funcs()
1769 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_set_enc_ring_funcs()
1771 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_5_set_enc_ring_funcs()
1773 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; in vcn_v2_5_set_enc_ring_funcs()
1775 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs; in vcn_v2_5_set_enc_ring_funcs()
1776 adev->vcn.inst[j].ring_enc[i].me = j; in vcn_v2_5_set_enc_ring_funcs()
1787 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_is_idle()
1788 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_is_idle()
1801 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_wait_for_idle()
1802 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_wait_for_idle()
1842 if(state == adev->vcn.cur_state) in vcn_v2_5_set_powergating_state()
1851 adev->vcn.cur_state = state; in vcn_v2_5_set_powergating_state()
1886 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); in vcn_v2_5_process_interrupt()
1889 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); in vcn_v2_5_process_interrupt()
1892 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); in vcn_v2_5_process_interrupt()
1915 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v2_5_set_irq_funcs()
1916 if (adev->vcn.harvest_config & (1 << i)) in vcn_v2_5_set_irq_funcs()
1918 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; in vcn_v2_5_set_irq_funcs()
1919 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; in vcn_v2_5_set_irq_funcs()
2007 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) in vcn_v2_6_query_poison_status()
2029 adev->vcn.ras = &vcn_v2_6_ras; in vcn_v2_5_set_ras_funcs()