Lines Matching refs:sdma

225 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);  in sdma_v6_0_ring_insert_nop()  local
229 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v6_0_ring_insert_nop()
383 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_stop()
438 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_enable()
464 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_resume()
465 ring = &adev->sdma.instance[i].ring; in sdma_v6_0_gfx_resume()
537 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v6_0_gfx_resume()
632 if (!adev->sdma.instance[0].fw) in sdma_v6_0_load_microcode()
641 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; in sdma_v6_0_load_microcode()
646 (adev->sdma.instance[0].fw->data + in sdma_v6_0_load_microcode()
661 (adev->sdma.instance[0].fw->data + in sdma_v6_0_load_microcode()
673 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_load_microcode()
675 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; in sdma_v6_0_load_microcode()
680 (adev->sdma.instance[0].fw->data + in sdma_v6_0_load_microcode()
691 … WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); in sdma_v6_0_load_microcode()
697 (adev->sdma.instance[0].fw->data + in sdma_v6_0_load_microcode()
708 … WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); in sdma_v6_0_load_microcode()
723 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_soft_reset()
758 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_check_soft_reset()
759 ring = &adev->sdma.instance[i].ring; in sdma_v6_0_check_soft_reset()
1125 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v6_0_ring_pad_ib() local
1131 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v6_0_ring_pad_ib()
1222 adev->sdma.ras = &sdma_v6_0_3_ras; in sdma_v6_0_set_ras_funcs()
1253 &adev->sdma.trap_irq); in sdma_v6_0_sw_init()
1263 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_sw_init()
1264 ring = &adev->sdma.instance[i].ring; in sdma_v6_0_sw_init()
1277 &adev->sdma.trap_irq, in sdma_v6_0_sw_init()
1297 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v6_0_sw_fini()
1298 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v6_0_sw_fini()
1347 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_is_idle()
1473 amdgpu_fence_process(&adev->sdma.instance[instances].ring); in sdma_v6_0_process_trap_irq()
1566 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_set_ring_funcs()
1567 adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs; in sdma_v6_0_set_ring_funcs()
1568 adev->sdma.instance[i].ring.me = i; in sdma_v6_0_set_ring_funcs()
1583 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + in sdma_v6_0_set_irq_funcs()
1584 adev->sdma.num_instances; in sdma_v6_0_set_irq_funcs()
1585 adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs; in sdma_v6_0_set_irq_funcs()
1586 adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs; in sdma_v6_0_set_irq_funcs()
1654 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v6_0_set_buffer_funcs()
1669 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_set_vm_pte_funcs()
1671 &adev->sdma.instance[i].ring.sched; in sdma_v6_0_set_vm_pte_funcs()
1673 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v6_0_set_vm_pte_funcs()