Lines Matching refs:sdma
245 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_init_microcode()
403 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_0_ring_insert_nop() local
407 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_0_ring_insert_nop()
566 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_gfx_stop()
625 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_ctx_switch_enable()
667 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_enable()
694 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_gfx_resume()
695 ring = &adev->sdma.instance[i].ring; in sdma_v5_0_gfx_resume()
871 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_load_microcode()
872 if (!adev->sdma.instance[i].fw) in sdma_v5_0_load_microcode()
875 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v5_0_load_microcode()
880 (adev->sdma.instance[i].fw->data + in sdma_v5_0_load_microcode()
891 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); in sdma_v5_0_load_microcode()
1249 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_0_ring_pad_ib() local
1255 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_0_ring_pad_ib()
1363 &adev->sdma.trap_irq); in sdma_v5_0_sw_init()
1370 &adev->sdma.trap_irq); in sdma_v5_0_sw_init()
1380 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_sw_init()
1381 ring = &adev->sdma.instance[i].ring; in sdma_v5_0_sw_init()
1393 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v5_0_sw_init()
1409 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v5_0_sw_fini()
1410 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v5_0_sw_fini()
1464 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_is_idle()
1593 amdgpu_fence_process(&adev->sdma.instance[0].ring); in sdma_v5_0_process_trap_irq()
1609 amdgpu_fence_process(&adev->sdma.instance[1].ring); in sdma_v5_0_process_trap_irq()
1639 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_update_medium_grain_clock_gating()
1676 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_update_medium_grain_light_sleep()
1804 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_set_ring_funcs()
1805 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs; in sdma_v5_0_set_ring_funcs()
1806 adev->sdma.instance[i].ring.me = i; in sdma_v5_0_set_ring_funcs()
1821 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + in sdma_v5_0_set_irq_funcs()
1822 adev->sdma.num_instances; in sdma_v5_0_set_irq_funcs()
1823 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs; in sdma_v5_0_set_irq_funcs()
1824 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs; in sdma_v5_0_set_irq_funcs()
1893 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v5_0_set_buffer_funcs()
1910 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_set_vm_pte_funcs()
1912 &adev->sdma.instance[i].ring.sched; in sdma_v5_0_set_vm_pte_funcs()
1914 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v5_0_set_vm_pte_funcs()