Lines Matching refs:vcn
121 adev->vcn.harvest_config = VCN_HARVEST_MMSCH; in vcn_v4_0_early_init()
122 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_early_init()
124 adev->vcn.harvest_config |= 1 << i; in vcn_v4_0_early_init()
130 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) in vcn_v4_0_early_init()
132 adev->vcn.inst[i].num_enc_rings = 1; in vcn_v4_0_early_init()
138 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_early_init()
139 adev->vcn.inst[i].set_pg_state = vcn_v4_0_set_pg_state; in vcn_v4_0_early_init()
153 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v4_0_fw_shared_init()
169 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); in vcn_v4_0_fw_shared_init()
189 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_sw_init()
190 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_sw_init()
205 atomic_set(&adev->vcn.inst[i].sched_score, 1); in vcn_v4_0_sw_init()
207 atomic_set(&adev->vcn.inst[i].sched_score, 0); in vcn_v4_0_sw_init()
211 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); in vcn_v4_0_sw_init()
217 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); in vcn_v4_0_sw_init()
221 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_sw_init()
224 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * in vcn_v4_0_sw_init()
225 (adev->vcn.inst[i].num_enc_rings + 1) + 1; in vcn_v4_0_sw_init()
227 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; in vcn_v4_0_sw_init()
231 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, in vcn_v4_0_sw_init()
232 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); in vcn_v4_0_sw_init()
239 adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode; in vcn_v4_0_sw_init()
242 adev->vcn.supported_reset = in vcn_v4_0_sw_init()
243 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); in vcn_v4_0_sw_init()
245 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in vcn_v4_0_sw_init()
259 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); in vcn_v4_0_sw_init()
262 adev->vcn.ip_dump = NULL; in vcn_v4_0_sw_init()
264 adev->vcn.ip_dump = ptr; in vcn_v4_0_sw_init()
287 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_sw_fini()
290 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_sw_fini()
293 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v4_0_sw_fini()
304 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_sw_fini()
312 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_sw_fini()
318 kfree(adev->vcn.ip_dump); in vcn_v4_0_sw_fini()
341 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_hw_init()
342 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_hw_init()
345 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_hw_init()
352 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_hw_init()
353 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_hw_init()
356 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_hw_init()
359 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); in vcn_v4_0_hw_init()
382 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_hw_fini()
383 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; in vcn_v4_0_hw_fini()
385 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_hw_fini()
420 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_suspend()
441 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_resume()
466 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; in vcn_v4_0_mc_resume()
479 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v4_0_mc_resume()
481 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); in vcn_v4_0_mc_resume()
489 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v4_0_mc_resume()
491 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); in vcn_v4_0_mc_resume()
497 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v4_0_mc_resume()
499 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); in vcn_v4_0_mc_resume()
505 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v4_0_mc_resume()
507 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); in vcn_v4_0_mc_resume()
528 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; in vcn_v4_0_mc_resume_dpg_mode()
554 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
557 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
575 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
578 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
595 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
598 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
607 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
610 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); in vcn_v4_0_mc_resume_dpg_mode()
1012 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v4_0_start_dpg_mode()
1026 …adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_ad… in vcn_v4_0_start_dpg_mode()
1100 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; in vcn_v4_0_start_dpg_mode()
1151 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_start()
1157 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v4_0_start()
1160 return vcn_v4_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); in vcn_v4_0_start()
1287 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_start()
1383 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_start_sriov()
1384 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_start_sriov()
1396 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); in vcn_v4_0_start_sriov()
1412 lower_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v4_0_start_sriov()
1415 upper_32_bits(adev->vcn.inst[i].gpu_addr)); in vcn_v4_0_start_sriov()
1426 cache_addr = adev->vcn.inst[i].gpu_addr + offset; in vcn_v4_0_start_sriov()
1440 cache_addr = adev->vcn.inst[i].gpu_addr + offset + in vcn_v4_0_start_sriov()
1455 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v4_0_start_sriov()
1458 ring_enc = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_start_sriov()
1469 if (!(adev->vcn.harvest_config & (1 << 0))) { in vcn_v4_0_start_sriov()
1470 rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); in vcn_v4_0_start_sriov()
1471 rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr); in vcn_v4_0_start_sriov()
1472 rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4; in vcn_v4_0_start_sriov()
1474 if (!(adev->vcn.harvest_config & (1 << 1))) { in vcn_v4_0_start_sriov()
1475 rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); in vcn_v4_0_start_sriov()
1476 rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr); in vcn_v4_0_start_sriov()
1477 rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4; in vcn_v4_0_start_sriov()
1489 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); in vcn_v4_0_start_sriov()
1492 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); in vcn_v4_0_start_sriov()
1619 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_stop()
1622 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; in vcn_v4_0_stop()
1714 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { in vcn_v4_0_pause_dpg_mode()
1716 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); in vcn_v4_0_pause_dpg_mode()
1742 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; in vcn_v4_0_pause_dpg_mode()
1759 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v4_0_unified_ring_get_rptr()
1776 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v4_0_unified_ring_get_wptr()
1796 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) in vcn_v4_0_unified_ring_set_wptr()
1817 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) in vcn_v4_0_limit_sched()
1976 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; in vcn_v4_0_ring_reset()
2032 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_set_unified_ring_funcs()
2033 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_set_unified_ring_funcs()
2039 adev->vcn.inst[i].ring_enc[0].funcs = in vcn_v4_0_set_unified_ring_funcs()
2041 adev->vcn.inst[i].ring_enc[0].me = i; in vcn_v4_0_set_unified_ring_funcs()
2057 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_is_idle()
2058 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_is_idle()
2079 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_wait_for_idle()
2080 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_wait_for_idle()
2107 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_set_clockgating_state()
2108 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; in vcn_v4_0_set_clockgating_state()
2110 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_set_clockgating_state()
2206 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); in vcn_v4_0_process_interrupt()
2237 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_set_irq_funcs()
2238 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_set_irq_funcs()
2241 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; in vcn_v4_0_set_irq_funcs()
2242 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; in vcn_v4_0_set_irq_funcs()
2244 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; in vcn_v4_0_set_irq_funcs()
2245 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; in vcn_v4_0_set_irq_funcs()
2256 if (!adev->vcn.ip_dump) in vcn_v4_0_print_ip_state()
2259 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst); in vcn_v4_0_print_ip_state()
2260 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_print_ip_state()
2261 if (adev->vcn.harvest_config & (1 << i)) { in vcn_v4_0_print_ip_state()
2267 is_powered = (adev->vcn.ip_dump[inst_off] & in vcn_v4_0_print_ip_state()
2274 adev->vcn.ip_dump[inst_off + j]); in vcn_v4_0_print_ip_state()
2289 if (!adev->vcn.ip_dump) in vcn_v4_0_dump_ip_state()
2292 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_dump_ip_state()
2293 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_dump_ip_state()
2298 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS); in vcn_v4_0_dump_ip_state()
2299 is_powered = (adev->vcn.ip_dump[inst_off] & in vcn_v4_0_dump_ip_state()
2304 adev->vcn.ip_dump[inst_off + j] = in vcn_v4_0_dump_ip_state()
2361 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) in vcn_v4_0_query_ras_poison_status()
2384 adev->vcn.ras = &vcn_v4_0_ras; in vcn_v4_0_set_ras_funcs()