Lines Matching refs:vcn
132 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; in vcn_v3_0_early_init()
133 adev->vcn.harvest_config = 0; in vcn_v3_0_early_init()
134 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in vcn_v3_0_early_init()
135 adev->vcn.inst[i].num_enc_rings = 1; in vcn_v3_0_early_init()
138 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v3_0_early_init()
143 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_early_init()
146 adev->vcn.inst[i].num_enc_rings = 0; in vcn_v3_0_early_init()
148 adev->vcn.inst[i].num_enc_rings = 2; in vcn_v3_0_early_init()
156 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_early_init()
157 adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state; in vcn_v3_0_early_init()
190 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; in vcn_v3_0_sw_init()
195 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_init()
198 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_sw_init()
211 adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
212 adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
213 adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
214 adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
215 adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
216 adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
218 adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
219 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); in vcn_v3_0_sw_init()
220 adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
221 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); in vcn_v3_0_sw_init()
222 adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
223 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); in vcn_v3_0_sw_init()
224 adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
225 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); in vcn_v3_0_sw_init()
226 adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; in vcn_v3_0_sw_init()
227 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); in vcn_v3_0_sw_init()
231 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq); in vcn_v3_0_sw_init()
235 atomic_set(&adev->vcn.inst[i].sched_score, 0); in vcn_v3_0_sw_init()
237 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_sw_init()
240 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1); in vcn_v3_0_sw_init()
242 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; in vcn_v3_0_sw_init()
246 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, in vcn_v3_0_sw_init()
248 &adev->vcn.inst[i].sched_score); in vcn_v3_0_sw_init()
252 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { in vcn_v3_0_sw_init()
257 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); in vcn_v3_0_sw_init()
261 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_sw_init()
264 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j; in vcn_v3_0_sw_init()
266 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; in vcn_v3_0_sw_init()
270 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, in vcn_v3_0_sw_init()
271 hw_prio, &adev->vcn.inst[i].sched_score); in vcn_v3_0_sw_init()
276 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_sw_init()
289 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); in vcn_v3_0_sw_init()
292 adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode; in vcn_v3_0_sw_init()
293 adev->vcn.inst[i].reset = vcn_v3_0_reset; in vcn_v3_0_sw_init()
296 adev->vcn.supported_reset = in vcn_v3_0_sw_init()
297 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); in vcn_v3_0_sw_init()
299 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in vcn_v3_0_sw_init()
308 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); in vcn_v3_0_sw_init()
311 adev->vcn.ip_dump = NULL; in vcn_v3_0_sw_init()
313 adev->vcn.ip_dump = ptr; in vcn_v3_0_sw_init()
336 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_fini()
339 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_sw_fini()
341 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_sw_fini()
354 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_fini()
364 kfree(adev->vcn.ip_dump); in vcn_v3_0_sw_fini()
387 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_init()
388 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_init()
391 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_hw_init()
403 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { in vcn_v3_0_hw_init()
404 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_hw_init()
418 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_init()
419 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_init()
422 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_hw_init()
431 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { in vcn_v3_0_hw_init()
432 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_hw_init()
455 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_hw_fini()
456 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; in vcn_v3_0_hw_fini()
458 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_hw_fini()
491 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_suspend()
512 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_resume()
534 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4); in vcn_v3_0_mc_resume()
547 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v3_0_mc_resume()
549 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v3_0_mc_resume()
558 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v3_0_mc_resume()
560 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v3_0_mc_resume()
566 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v3_0_mc_resume()
568 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v3_0_mc_resume()
574 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v3_0_mc_resume()
576 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v3_0_mc_resume()
587 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4); in vcn_v3_0_mc_resume_dpg_mode()
613 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
616 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
634 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
637 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
654 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
657 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
666 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
669 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v3_0_mc_resume_dpg_mode()
1042 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v3_0_start_dpg_mode()
1056 …adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_ad… in vcn_v3_0_start_dpg_mode()
1140 ring = &adev->vcn.inst[inst_idx].ring_dec; in vcn_v3_0_start_dpg_mode()
1206 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_start()
1331 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_start()
1341 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v3_0_start()
1363 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v3_0_start()
1372 ring = &adev->vcn.inst[i].ring_enc[1]; in vcn_v3_0_start()
1429 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_start_sriov()
1430 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_start_sriov()
1439 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); in vcn_v3_0_start_sriov()
1455 lower_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v3_0_start_sriov()
1458 upper_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v3_0_start_sriov()
1469 cache_addr = adev->vcn.inst[i].gpu_addr + offset; in vcn_v3_0_start_sriov()
1483 cache_addr = adev->vcn.inst[i].gpu_addr + offset + in vcn_v3_0_start_sriov()
1498 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { in vcn_v3_0_start_sriov()
1499 ring = &adev->vcn.inst[i].ring_enc[j]; in vcn_v3_0_start_sriov()
1513 ring = &adev->vcn.inst[i].ring_dec; in vcn_v3_0_start_sriov()
1643 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_stop()
1728 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { in vcn_v3_0_pause_dpg_mode()
1730 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); in vcn_v3_0_pause_dpg_mode()
1756 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v3_0_pause_dpg_mode()
1758 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; in vcn_v3_0_pause_dpg_mode()
1768 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; in vcn_v3_0_pause_dpg_mode()
1794 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; in vcn_v3_0_pause_dpg_mode()
1845 fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr; in vcn_v3_0_dec_ring_set_wptr()
1897 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) in vcn_v3_0_limit_sched()
2005 if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) { in vcn_v3_0_ring_patch_cs_in_place()
2007 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) { in vcn_v3_0_ring_patch_cs_in_place()
2009 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) && in vcn_v3_0_ring_patch_cs_in_place()
2063 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v3_0_enc_ring_get_rptr()
2080 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v3_0_enc_ring_get_wptr()
2104 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { in vcn_v3_0_enc_ring_set_wptr()
2155 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_dec_ring_funcs()
2156 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_dec_ring_funcs()
2160 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; in vcn_v3_0_set_dec_ring_funcs()
2162 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs; in vcn_v3_0_set_dec_ring_funcs()
2163 adev->vcn.inst[i].ring_dec.me = i; in vcn_v3_0_set_dec_ring_funcs()
2171 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_enc_ring_funcs()
2172 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_enc_ring_funcs()
2175 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { in vcn_v3_0_set_enc_ring_funcs()
2176 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; in vcn_v3_0_set_enc_ring_funcs()
2177 adev->vcn.inst[i].ring_enc[j].me = i; in vcn_v3_0_set_enc_ring_funcs()
2199 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_is_idle()
2200 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_is_idle()
2214 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_wait_for_idle()
2215 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_wait_for_idle()
2234 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_clockgating_state()
2235 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; in vcn_v3_0_set_clockgating_state()
2236 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_clockgating_state()
2310 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); in vcn_v3_0_process_interrupt()
2313 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); in vcn_v3_0_process_interrupt()
2316 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); in vcn_v3_0_process_interrupt()
2336 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v3_0_set_irq_funcs()
2337 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_set_irq_funcs()
2340 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; in vcn_v3_0_set_irq_funcs()
2341 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; in vcn_v3_0_set_irq_funcs()
2353 if (!adev->vcn.ip_dump) in vcn_v3_0_print_ip_state()
2356 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); in vcn_v3_0_print_ip_state()
2357 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_print_ip_state()
2358 if (adev->vcn.harvest_config & (1 << i)) { in vcn_v3_0_print_ip_state()
2364 is_powered = (adev->vcn.ip_dump[inst_off] & in vcn_v3_0_print_ip_state()
2371 adev->vcn.ip_dump[inst_off + j]); in vcn_v3_0_print_ip_state()
2386 if (!adev->vcn.ip_dump) in vcn_v3_0_dump_ip_state()
2389 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_dump_ip_state()
2390 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_dump_ip_state()
2395 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); in vcn_v3_0_dump_ip_state()
2396 is_powered = (adev->vcn.ip_dump[inst_off] & in vcn_v3_0_dump_ip_state()
2401 adev->vcn.ip_dump[inst_off + j] = in vcn_v3_0_dump_ip_state()