Lines Matching refs:sdma
208 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_2_ring_insert_nop() local
212 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_2_ring_insert_nop()
369 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_gfx_stop()
428 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_ctx_switch_enable()
467 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_enable()
495 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_gfx_resume()
496 ring = &adev->sdma.instance[i].ring; in sdma_v5_2_gfx_resume()
671 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_load_microcode()
672 if (!adev->sdma.instance[i].fw) in sdma_v5_2_load_microcode()
675 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v5_2_load_microcode()
680 (adev->sdma.instance[i].fw->data + in sdma_v5_2_load_microcode()
691 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); in sdma_v5_2_load_microcode()
704 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_soft_reset()
1085 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_2_ring_pad_ib() local
1091 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_2_ring_pad_ib()
1230 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_sw_init()
1233 &adev->sdma.trap_irq); in sdma_v5_2_sw_init()
1244 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_sw_init()
1245 ring = &adev->sdma.instance[i].ring; in sdma_v5_2_sw_init()
1257 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v5_2_sw_init()
1272 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v5_2_sw_fini()
1273 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v5_2_sw_fini()
1322 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_is_idle()
1441 amdgpu_fence_process(&adev->sdma.instance[0].ring); in sdma_v5_2_process_trap_irq()
1457 amdgpu_fence_process(&adev->sdma.instance[1].ring); in sdma_v5_2_process_trap_irq()
1473 amdgpu_fence_process(&adev->sdma.instance[2].ring); in sdma_v5_2_process_trap_irq()
1489 amdgpu_fence_process(&adev->sdma.instance[3].ring); in sdma_v5_2_process_trap_irq()
1519 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_update_medium_grain_clock_gating()
1521 …if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2,… in sdma_v5_2_update_medium_grain_clock_gating()
1556 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_update_medium_grain_light_sleep()
1558 …if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2,… in sdma_v5_2_update_medium_grain_light_sleep()
1692 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_set_ring_funcs()
1693 adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs; in sdma_v5_2_set_ring_funcs()
1694 adev->sdma.instance[i].ring.me = i; in sdma_v5_2_set_ring_funcs()
1709 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + in sdma_v5_2_set_irq_funcs()
1710 adev->sdma.num_instances; in sdma_v5_2_set_irq_funcs()
1711 adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs; in sdma_v5_2_set_irq_funcs()
1712 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs; in sdma_v5_2_set_irq_funcs()
1781 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v5_2_set_buffer_funcs()
1798 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_set_vm_pte_funcs()
1800 &adev->sdma.instance[i].ring.sched; in sdma_v5_2_set_vm_pte_funcs()
1802 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v5_2_set_vm_pte_funcs()