Lines Matching refs:kfd

61 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
63 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
65 static int kfd_resume(struct kfd_node *kfd);
67 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) in kfd_device_info_set_sdma_info() argument
69 uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0); in kfd_device_info_set_sdma_info()
81 kfd->device_info.num_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info()
107 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info()
113 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info()
116 bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); in kfd_device_info_set_sdma_info()
130 kfd->device_info.num_reserved_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info()
132 bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0, in kfd_device_info_set_sdma_info()
133 kfd->adev->sdma.num_instances * in kfd_device_info_set_sdma_info()
134 kfd->device_info.num_reserved_sdma_queues_per_engine); in kfd_device_info_set_sdma_info()
141 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) in kfd_device_info_set_event_interrupt_class() argument
143 uint32_t gc_version = KFD_GC_VERSION(kfd); in kfd_device_info_set_event_interrupt_class()
154 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; in kfd_device_info_set_event_interrupt_class()
159 kfd->device_info.event_interrupt_class = in kfd_device_info_set_event_interrupt_class()
175 kfd->device_info.event_interrupt_class = &event_interrupt_class_v10; in kfd_device_info_set_event_interrupt_class()
186 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; in kfd_device_info_set_event_interrupt_class()
191 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; in kfd_device_info_set_event_interrupt_class()
196 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; in kfd_device_info_set_event_interrupt_class()
200 static void kfd_device_info_init(struct kfd_dev *kfd, in kfd_device_info_init() argument
203 uint32_t gc_version = KFD_GC_VERSION(kfd); in kfd_device_info_init()
204 uint32_t asic_type = kfd->adev->asic_type; in kfd_device_info_init()
206 kfd->device_info.max_pasid_bits = 16; in kfd_device_info_init()
207 kfd->device_info.max_no_of_hqd = 24; in kfd_device_info_init()
208 kfd->device_info.num_of_watch_points = 4; in kfd_device_info_init()
209 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; in kfd_device_info_init()
210 kfd->device_info.gfx_target_version = gfx_target_version; in kfd_device_info_init()
212 if (KFD_IS_SOC15(kfd)) { in kfd_device_info_init()
213 kfd->device_info.doorbell_size = 8; in kfd_device_info_init()
214 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); in kfd_device_info_init()
215 kfd->device_info.supports_cwsr = true; in kfd_device_info_init()
217 kfd_device_info_set_sdma_info(kfd); in kfd_device_info_init()
219 kfd_device_info_set_event_interrupt_class(kfd); in kfd_device_info_init()
224 kfd->device_info.no_atomic_fw_version = 14; in kfd_device_info_init()
226 kfd->device_info.no_atomic_fw_version = 3; in kfd_device_info_init()
228 kfd->device_info.no_atomic_fw_version = 92; in kfd_device_info_init()
230 kfd->device_info.no_atomic_fw_version = 145; in kfd_device_info_init()
234 kfd->device_info.needs_pci_atomics = true; in kfd_device_info_init()
241 kfd->device_info.needs_pci_atomics = true; in kfd_device_info_init()
242 kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0; in kfd_device_info_init()
244 kfd->device_info.needs_pci_atomics = true; in kfd_device_info_init()
245 kfd->device_info.no_atomic_fw_version = 2090; in kfd_device_info_init()
247 kfd->device_info.needs_pci_atomics = true; in kfd_device_info_init()
250 kfd->device_info.doorbell_size = 4; in kfd_device_info_init()
251 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); in kfd_device_info_init()
252 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; in kfd_device_info_init()
253 kfd->device_info.num_sdma_queues_per_engine = 2; in kfd_device_info_init()
258 kfd->device_info.supports_cwsr = true; in kfd_device_info_init()
261 kfd->device_info.needs_pci_atomics = true; in kfd_device_info_init()
267 struct kfd_dev *kfd = NULL; in kgd2kfd_probe() local
485 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe()
486 if (!kfd) in kgd2kfd_probe()
489 kfd->adev = adev; in kgd2kfd_probe()
490 kfd_device_info_init(kfd, vf, gfx_target_version); in kgd2kfd_probe()
491 kfd->init_complete = false; in kgd2kfd_probe()
492 kfd->kfd2kgd = f2g; in kgd2kfd_probe()
493 atomic_set(&kfd->compute_profile, 0); in kgd2kfd_probe()
495 mutex_init(&kfd->doorbell_mutex); in kgd2kfd_probe()
497 ida_init(&kfd->doorbell_ida); in kgd2kfd_probe()
499 return kfd; in kgd2kfd_probe()
502 static void kfd_cwsr_init(struct kfd_dev *kfd) in kfd_cwsr_init() argument
504 if (cwsr_enable && kfd->device_info.supports_cwsr) { in kfd_cwsr_init()
505 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { in kfd_cwsr_init()
508 kfd->cwsr_isa = cwsr_trap_gfx8_hex; in kfd_cwsr_init()
509 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); in kfd_cwsr_init()
510 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { in kfd_cwsr_init()
513 kfd->cwsr_isa = cwsr_trap_arcturus_hex; in kfd_cwsr_init()
514 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); in kfd_cwsr_init()
515 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { in kfd_cwsr_init()
518 kfd->cwsr_isa = cwsr_trap_aldebaran_hex; in kfd_cwsr_init()
519 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); in kfd_cwsr_init()
520 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) || in kfd_cwsr_init()
521 KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) { in kfd_cwsr_init()
524 kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex; in kfd_cwsr_init()
525 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex); in kfd_cwsr_init()
526 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) { in kfd_cwsr_init()
528 kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex; in kfd_cwsr_init()
529 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex); in kfd_cwsr_init()
530 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { in kfd_cwsr_init()
533 kfd->cwsr_isa = cwsr_trap_gfx9_hex; in kfd_cwsr_init()
534 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); in kfd_cwsr_init()
535 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { in kfd_cwsr_init()
538 kfd->cwsr_isa = cwsr_trap_nv1x_hex; in kfd_cwsr_init()
539 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); in kfd_cwsr_init()
540 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) { in kfd_cwsr_init()
543 kfd->cwsr_isa = cwsr_trap_gfx10_hex; in kfd_cwsr_init()
544 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); in kfd_cwsr_init()
545 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(12, 0, 0)) { in kfd_cwsr_init()
549 kfd->cwsr_isa = cwsr_trap_gfx11_hex; in kfd_cwsr_init()
550 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); in kfd_cwsr_init()
554 kfd->cwsr_isa = cwsr_trap_gfx12_hex; in kfd_cwsr_init()
555 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex); in kfd_cwsr_init()
558 kfd->cwsr_enabled = true; in kfd_cwsr_init()
565 struct kfd_dev *kfd = node->kfd; in kfd_gws_init() local
573 && kfd->mec2_fw_version >= 0x81b3) || in kfd_gws_init()
575 && kfd->mec2_fw_version >= 0x1b3) || in kfd_gws_init()
577 && kfd->mec2_fw_version >= 0x30) || in kfd_gws_init()
579 && kfd->mec2_fw_version >= 0x28) || in kfd_gws_init()
585 && kfd->mec2_fw_version >= 0x6b) || in kfd_gws_init()
653 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) in kfd_cleanup_nodes() argument
663 flush_workqueue(kfd->ih_wq); in kfd_cleanup_nodes()
664 destroy_workqueue(kfd->ih_wq); in kfd_cleanup_nodes()
667 knode = kfd->nodes[i]; in kfd_cleanup_nodes()
674 kfd->nodes[i] = NULL; in kfd_cleanup_nodes()
713 bool kgd2kfd_device_init(struct kfd_dev *kfd, in kgd2kfd_device_init() argument
723 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, in kgd2kfd_device_init()
725 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, in kgd2kfd_device_init()
727 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, in kgd2kfd_device_init()
729 kfd->shared_resources = *gpu_resources; in kgd2kfd_device_init()
731 kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr); in kgd2kfd_device_init()
733 if (kfd->num_nodes == 0) { in kgd2kfd_device_init()
736 kfd->adev->gfx.num_xcc_per_xcp); in kgd2kfd_device_init()
744 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); in kgd2kfd_device_init()
745 if (!kfd->pci_atomic_requested && in kgd2kfd_device_init()
746 kfd->device_info.needs_pci_atomics && in kgd2kfd_device_init()
747 (!kfd->device_info.no_atomic_fw_version || in kgd2kfd_device_init()
748 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { in kgd2kfd_device_init()
751 kfd->adev->pdev->vendor, kfd->adev->pdev->device, in kgd2kfd_device_init()
752 kfd->mec_fw_version, in kgd2kfd_device_init()
753 kfd->device_info.no_atomic_fw_version); in kgd2kfd_device_init()
770 if (kfd->adev->xcp_mgr) { in kgd2kfd_device_init()
771 partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, in kgd2kfd_device_init()
774 kfd->num_nodes != 1) { in kgd2kfd_device_init()
788 kfd->device_info.mqd_size_aligned; in kgd2kfd_device_init()
794 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? in kgd2kfd_device_init()
808 kfd->adev, size, &kfd->gtt_mem, in kgd2kfd_device_init()
809 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, in kgd2kfd_device_init()
818 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { in kgd2kfd_device_init()
823 if (kfd_doorbell_init(kfd)) { in kgd2kfd_device_init()
830 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; in kgd2kfd_device_init()
838 if (!kfd->hive_id && kfd->num_nodes > 1) in kgd2kfd_device_init()
839 kfd->hive_id = pci_dev_id(kfd->adev->pdev); in kgd2kfd_device_init()
841 kfd->noretry = kfd->adev->gmc.noretry; in kgd2kfd_device_init()
843 kfd_cwsr_init(kfd); in kgd2kfd_device_init()
846 kfd->num_nodes); in kgd2kfd_device_init()
849 for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) { in kgd2kfd_device_init()
855 node->adev = kfd->adev; in kgd2kfd_device_init()
856 node->kfd = kfd; in kgd2kfd_device_init()
857 node->kfd2kgd = kfd->kfd2kgd; in kgd2kfd_device_init()
859 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); in kgd2kfd_device_init()
867 (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; in kgd2kfd_device_init()
877 kfd->num_nodes != 1) { in kgd2kfd_device_init()
901 amdgpu_amdkfd_get_local_mem_info(kfd->adev, in kgd2kfd_device_init()
904 if (kfd->adev->xcp_mgr) in kgd2kfd_device_init()
915 kfd->nodes[i] = node; in kgd2kfd_device_init()
918 svm_range_set_max_pages(kfd->adev); in kgd2kfd_device_init()
920 kfd->init_complete = true; in kgd2kfd_device_init()
921 dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, in kgd2kfd_device_init()
922 kfd->adev->pdev->device); in kgd2kfd_device_init()
931 kfd_cleanup_nodes(kfd, i); in kgd2kfd_device_init()
932 kfd_doorbell_fini(kfd); in kgd2kfd_device_init()
934 kfd_gtt_sa_fini(kfd); in kgd2kfd_device_init()
936 amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); in kgd2kfd_device_init()
940 kfd->adev->pdev->vendor, kfd->adev->pdev->device); in kgd2kfd_device_init()
942 return kfd->init_complete; in kgd2kfd_device_init()
945 void kgd2kfd_device_exit(struct kfd_dev *kfd) in kgd2kfd_device_exit() argument
947 if (kfd->init_complete) { in kgd2kfd_device_exit()
949 kfd_cleanup_nodes(kfd, kfd->num_nodes); in kgd2kfd_device_exit()
951 kfd_doorbell_fini(kfd); in kgd2kfd_device_exit()
952 ida_destroy(&kfd->doorbell_ida); in kgd2kfd_device_exit()
953 kfd_gtt_sa_fini(kfd); in kgd2kfd_device_exit()
954 amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); in kgd2kfd_device_exit()
957 kfree(kfd); in kgd2kfd_device_exit()
960 int kgd2kfd_pre_reset(struct kfd_dev *kfd, in kgd2kfd_pre_reset() argument
966 if (!kfd->init_complete) in kgd2kfd_pre_reset()
969 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_pre_reset()
970 node = kfd->nodes[i]; in kgd2kfd_pre_reset()
974 kgd2kfd_suspend(kfd, true); in kgd2kfd_pre_reset()
976 for (i = 0; i < kfd->num_nodes; i++) in kgd2kfd_pre_reset()
977 kfd_signal_reset_event(kfd->nodes[i]); in kgd2kfd_pre_reset()
988 int kgd2kfd_post_reset(struct kfd_dev *kfd) in kgd2kfd_post_reset() argument
994 if (!kfd->init_complete) in kgd2kfd_post_reset()
997 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_post_reset()
998 ret = kfd_resume(kfd->nodes[i]); in kgd2kfd_post_reset()
1007 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_post_reset()
1008 node = kfd->nodes[i]; in kgd2kfd_post_reset()
1016 bool kfd_is_locked(struct kfd_dev *kfd) in kfd_is_locked() argument
1027 if (kfd) in kfd_is_locked()
1028 return kfd->kfd_dev_lock > 0; in kfd_is_locked()
1035 if (dev->kfd->kfd_dev_lock > 0) in kfd_is_locked()
1042 void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) in kgd2kfd_suspend() argument
1047 if (!kfd->init_complete) in kgd2kfd_suspend()
1051 kgd2kfd_suspend_process(kfd); in kgd2kfd_suspend()
1053 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_suspend()
1054 node = kfd->nodes[i]; in kgd2kfd_suspend()
1059 int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) in kgd2kfd_resume() argument
1063 if (!kfd->init_complete) in kgd2kfd_resume()
1066 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_resume()
1067 ret = kfd_resume(kfd->nodes[i]); in kgd2kfd_resume()
1073 ret = kgd2kfd_resume_process(kfd); in kgd2kfd_resume()
1078 void kgd2kfd_suspend_process(struct kfd_dev *kfd) in kgd2kfd_suspend_process() argument
1080 if (!kfd->init_complete) in kgd2kfd_suspend_process()
1090 int kgd2kfd_resume_process(struct kfd_dev *kfd) in kgd2kfd_resume_process() argument
1094 if (!kfd->init_complete) in kgd2kfd_resume_process()
1120 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) in kgd2kfd_interrupt() argument
1127 if (!kfd->init_complete) in kgd2kfd_interrupt()
1130 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { in kgd2kfd_interrupt()
1135 for (i = 0; i < kfd->num_nodes; i++) { in kgd2kfd_interrupt()
1136 node = kfd->nodes[i]; in kgd2kfd_interrupt()
1144 queue_work(node->kfd->ih_wq, &node->interrupt_work); in kgd2kfd_interrupt()
1242 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, in kfd_gtt_sa_init() argument
1252 kfd->gtt_sa_chunk_size = chunk_size; in kfd_gtt_sa_init()
1253 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; in kfd_gtt_sa_init()
1255 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks, in kfd_gtt_sa_init()
1257 if (!kfd->gtt_sa_bitmap) in kfd_gtt_sa_init()
1261 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); in kfd_gtt_sa_init()
1263 mutex_init(&kfd->gtt_sa_lock); in kfd_gtt_sa_init()
1268 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) in kfd_gtt_sa_fini() argument
1270 mutex_destroy(&kfd->gtt_sa_lock); in kfd_gtt_sa_fini()
1271 bitmap_free(kfd->gtt_sa_bitmap); in kfd_gtt_sa_fini()
1292 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_allocate() local
1297 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) in kfd_gtt_sa_allocate()
1308 mutex_lock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
1312 found = find_next_zero_bit(kfd->gtt_sa_bitmap, in kfd_gtt_sa_allocate()
1313 kfd->gtt_sa_num_of_chunks, in kfd_gtt_sa_allocate()
1319 if (found == kfd->gtt_sa_num_of_chunks) in kfd_gtt_sa_allocate()
1326 kfd->gtt_start_gpu_addr, in kfd_gtt_sa_allocate()
1328 kfd->gtt_sa_chunk_size); in kfd_gtt_sa_allocate()
1330 kfd->gtt_start_cpu_ptr, in kfd_gtt_sa_allocate()
1332 kfd->gtt_sa_chunk_size); in kfd_gtt_sa_allocate()
1338 if (size <= kfd->gtt_sa_chunk_size) { in kfd_gtt_sa_allocate()
1340 __set_bit(found, kfd->gtt_sa_bitmap); in kfd_gtt_sa_allocate()
1345 cur_size = size - kfd->gtt_sa_chunk_size; in kfd_gtt_sa_allocate()
1348 find_next_zero_bit(kfd->gtt_sa_bitmap, in kfd_gtt_sa_allocate()
1349 kfd->gtt_sa_num_of_chunks, ++found); in kfd_gtt_sa_allocate()
1363 if (found == kfd->gtt_sa_num_of_chunks) in kfd_gtt_sa_allocate()
1367 if (cur_size <= kfd->gtt_sa_chunk_size) in kfd_gtt_sa_allocate()
1370 cur_size -= kfd->gtt_sa_chunk_size; in kfd_gtt_sa_allocate()
1378 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start, in kfd_gtt_sa_allocate()
1382 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
1387 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_allocate()
1394 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_free() local
1403 mutex_lock(&kfd->gtt_sa_lock); in kfd_gtt_sa_free()
1406 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start, in kfd_gtt_sa_free()
1409 mutex_unlock(&kfd->gtt_sa_lock); in kfd_gtt_sa_free()
1415 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) in kgd2kfd_set_sram_ecc_flag() argument
1422 if (kfd) in kgd2kfd_set_sram_ecc_flag()
1423 atomic_inc(&kfd->nodes[0]->sram_ecc_flag); in kgd2kfd_set_sram_ecc_flag()
1428 if (atomic_inc_return(&node->kfd->compute_profile) == 1) in kfd_inc_compute_active()
1434 int count = atomic_dec_return(&node->kfd->compute_profile); in kfd_dec_compute_active()
1443 if (atomic_read(&node->kfd->compute_profile)) in kfd_compute_active()
1448 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) in kgd2kfd_smi_event_throttle() argument
1455 if (kfd && kfd->init_complete) in kgd2kfd_smi_event_throttle()
1456 kfd_smi_event_update_thermal_throttling(kfd->nodes[0], in kgd2kfd_smi_event_throttle()
1469 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; in kfd_get_num_sdma_engines()
1471 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); in kfd_get_num_sdma_engines()
1477 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - in kfd_get_num_xgmi_sdma_engines()
1481 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) in kgd2kfd_check_and_lock_kfd() argument
1488 if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd)) in kgd2kfd_check_and_lock_kfd()
1492 if (kfd_is_locked(kfd)) { in kgd2kfd_check_and_lock_kfd()
1506 if (p->pdds[i]->dev->kfd != kfd) in kgd2kfd_check_and_lock_kfd()
1518 ++kfd->kfd_dev_lock; in kgd2kfd_check_and_lock_kfd()
1524 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) in kgd2kfd_unlock_kfd() argument
1527 --kfd->kfd_dev_lock; in kgd2kfd_unlock_kfd()
1531 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id) in kgd2kfd_start_sched() argument
1536 if (!kfd->init_complete) in kgd2kfd_start_sched()
1539 if (node_id >= kfd->num_nodes) { in kgd2kfd_start_sched()
1540 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", in kgd2kfd_start_sched()
1541 node_id, kfd->num_nodes - 1); in kgd2kfd_start_sched()
1544 node = kfd->nodes[node_id]; in kgd2kfd_start_sched()
1553 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) in kgd2kfd_stop_sched() argument
1557 if (!kfd->init_complete) in kgd2kfd_stop_sched()
1560 if (node_id >= kfd->num_nodes) { in kgd2kfd_stop_sched()
1561 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", in kgd2kfd_stop_sched()
1562 node_id, kfd->num_nodes - 1); in kgd2kfd_stop_sched()
1566 node = kfd->nodes[node_id]; in kgd2kfd_stop_sched()
1570 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id) in kgd2kfd_compute_active() argument
1574 if (!kfd->init_complete) in kgd2kfd_compute_active()
1577 if (node_id >= kfd->num_nodes) { in kgd2kfd_compute_active()
1578 dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n", in kgd2kfd_compute_active()
1579 node_id, kfd->num_nodes - 1); in kgd2kfd_compute_active()
1583 node = kfd->nodes[node_id]; in kgd2kfd_compute_active()
1667 if (dev->kfd->shared_resources.enable_mes) { in kfd_debugfs_hang_hws()