Lines Matching refs:sdma
247 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v7_0_ring_insert_nop() local
251 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v7_0_ring_insert_nop()
403 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_stop()
458 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_enable()
486 ring = &adev->sdma.instance[i].ring; in sdma_v7_0_gfx_resume_instance()
563 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v7_0_gfx_resume_instance()
637 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_resume()
664 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v12_0_free_ucode_buffer()
665 amdgpu_bo_free_kernel(&adev->sdma.instance[i].sdma_fw_obj, in sdma_v12_0_free_ucode_buffer()
666 &adev->sdma.instance[i].sdma_fw_gpu_addr, in sdma_v12_0_free_ucode_buffer()
667 (void **)&adev->sdma.instance[i].sdma_fw_ptr); in sdma_v12_0_free_ucode_buffer()
690 if (!adev->sdma.instance[0].fw) in sdma_v7_0_load_microcode()
694 adev->sdma.instance[0].fw->data; in sdma_v7_0_load_microcode()
697 fw_data = (const __le32 *)(adev->sdma.instance[0].fw->data + in sdma_v7_0_load_microcode()
701 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_load_microcode()
705 &adev->sdma.instance[i].sdma_fw_obj, in sdma_v7_0_load_microcode()
706 &adev->sdma.instance[i].sdma_fw_gpu_addr, in sdma_v7_0_load_microcode()
707 (void **)&adev->sdma.instance[i].sdma_fw_ptr); in sdma_v7_0_load_microcode()
713 memcpy(adev->sdma.instance[i].sdma_fw_ptr, fw_data, fw_size); in sdma_v7_0_load_microcode()
715 amdgpu_bo_kunmap(adev->sdma.instance[i].sdma_fw_obj); in sdma_v7_0_load_microcode()
716 amdgpu_bo_unreserve(adev->sdma.instance[i].sdma_fw_obj); in sdma_v7_0_load_microcode()
723 lower_32_bits(adev->sdma.instance[i].sdma_fw_gpu_addr)); in sdma_v7_0_load_microcode()
725 upper_32_bits(adev->sdma.instance[i].sdma_fw_gpu_addr)); in sdma_v7_0_load_microcode()
760 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_soft_reset()
795 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_check_soft_reset()
796 ring = &adev->sdma.instance[i].ring; in sdma_v7_0_check_soft_reset()
812 if (ring->me >= adev->sdma.num_instances) { in sdma_v7_0_reset_queue()
1165 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v7_0_ring_pad_ib() local
1171 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v7_0_ring_pad_ib()
1265 adev->sdma.no_user_submission = false; in sdma_v7_0_early_init()
1266 adev->sdma.disable_uq = true; in sdma_v7_0_early_init()
1269 adev->sdma.no_user_submission = false; in sdma_v7_0_early_init()
1270 adev->sdma.disable_uq = false; in sdma_v7_0_early_init()
1273 adev->sdma.no_user_submission = true; in sdma_v7_0_early_init()
1274 adev->sdma.disable_uq = false; in sdma_v7_0_early_init()
1304 &adev->sdma.trap_irq); in sdma_v7_0_sw_init()
1311 &adev->sdma.fence_irq); in sdma_v7_0_sw_init()
1315 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_sw_init()
1316 ring = &adev->sdma.instance[i].ring; in sdma_v7_0_sw_init()
1320 ring->no_user_submission = adev->sdma.no_user_submission; in sdma_v7_0_sw_init()
1331 &adev->sdma.trap_irq, in sdma_v7_0_sw_init()
1338 adev->sdma.supported_reset = in sdma_v7_0_sw_init()
1339 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); in sdma_v7_0_sw_init()
1341 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; in sdma_v7_0_sw_init()
1347 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL); in sdma_v7_0_sw_init()
1349 adev->sdma.ip_dump = ptr; in sdma_v7_0_sw_init()
1356 if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq) in sdma_v7_0_sw_init()
1371 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v7_0_sw_fini()
1372 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v7_0_sw_fini()
1380 kfree(adev->sdma.ip_dump); in sdma_v7_0_sw_fini()
1392 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_set_userq_trap_interrupts()
1395 r = amdgpu_irq_get(adev, &adev->sdma.trap_irq, in sdma_v7_0_set_userq_trap_interrupts()
1398 r = amdgpu_irq_put(adev, &adev->sdma.trap_irq, in sdma_v7_0_set_userq_trap_interrupts()
1449 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_is_idle()
1561 amdgpu_fence_process(&adev->sdma.instance[instances].ring); in sdma_v7_0_process_trap_irq()
1624 if (!adev->sdma.ip_dump) in sdma_v7_0_print_ip_state()
1627 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances); in sdma_v7_0_print_ip_state()
1628 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_print_ip_state()
1634 adev->sdma.ip_dump[instance_offset + j]); in sdma_v7_0_print_ip_state()
1645 if (!adev->sdma.ip_dump) in sdma_v7_0_dump_ip_state()
1649 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_dump_ip_state()
1652 adev->sdma.ip_dump[instance_offset + j] = in sdma_v7_0_dump_ip_state()
1720 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_set_ring_funcs()
1721 adev->sdma.instance[i].ring.funcs = &sdma_v7_0_ring_funcs; in sdma_v7_0_set_ring_funcs()
1722 adev->sdma.instance[i].ring.me = i; in sdma_v7_0_set_ring_funcs()
1741 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + in sdma_v7_0_set_irq_funcs()
1742 adev->sdma.num_instances; in sdma_v7_0_set_irq_funcs()
1743 adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs; in sdma_v7_0_set_irq_funcs()
1744 adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs; in sdma_v7_0_set_irq_funcs()
1745 adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs; in sdma_v7_0_set_irq_funcs()
1830 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v7_0_set_buffer_funcs()
1845 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_set_vm_pte_funcs()
1847 &adev->sdma.instance[i].ring.sched; in sdma_v7_0_set_vm_pte_funcs()
1849 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v7_0_set_vm_pte_funcs()