Home
last modified time | relevance | path

Searched refs:KFD_GC_VERSION (Results 1 – 17 of 17) sorted by relevance

/drivers/gpu/drm/amd/amdkfd/
A Dkfd_debug.h80 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || in kfd_dbg_is_per_vmid_supported()
81 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) || in kfd_dbg_is_per_vmid_supported()
82 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0) || in kfd_dbg_is_per_vmid_supported()
83 KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)); in kfd_dbg_is_per_vmid_supported()
103 KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); in kfd_dbg_is_rlc_restore_supported()
114 if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) in kfd_dbg_has_gws_support()
116 (KFD_GC_VERSION(dev) >= IP_VERSION(9, 1, 0) in kfd_dbg_has_gws_support()
117 && KFD_GC_VERSION(dev) <= IP_VERSION(9, 2, 2) in kfd_dbg_has_gws_support()
119 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0) in kfd_dbg_has_gws_support()
121 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) in kfd_dbg_has_gws_support()
[all …]
A Dkfd_device.c143 uint32_t gc_version = KFD_GC_VERSION(kfd); in kfd_device_info_set_event_interrupt_class()
203 uint32_t gc_version = KFD_GC_VERSION(kfd); in kfd_device_info_init()
572 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) in kfd_gws_init()
574 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) in kfd_gws_init()
576 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) in kfd_gws_init()
578 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) in kfd_gws_init()
580 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) || in kfd_gws_init()
583 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) in kfd_gws_init()
584 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) in kfd_gws_init()
586 (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0) in kfd_gws_init()
[all …]
A Dkfd_device_queue_manager_v9.c73 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in set_cache_memory_policy_v9()
74 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in set_cache_memory_policy_v9()
77 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) { in set_cache_memory_policy_v9()
106 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in update_qpd_v9()
107 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in update_qpd_v9()
A Dkfd_mqd_manager_v9.c80 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) && in update_cu_mask()
81 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) && in update_cu_mask()
82 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) { in update_cu_mask()
307 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) && in update_mqd()
309 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) in update_mqd()
954 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || in mqd_manager_init_v9()
955 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) || in mqd_manager_init_v9()
956 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) { in mqd_manager_init_v9()
985 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || in mqd_manager_init_v9()
986 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) || in mqd_manager_init_v9()
[all …]
A Dkfd_packet_manager.c295 if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) || in pm_init()
296 KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) || in pm_init()
297 KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4) || in pm_init()
298 KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 5, 0)) in pm_init()
300 else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1)) in pm_init()
455 if (cmd == KFD_DEQUEUE_WAIT_INIT && (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) || in pm_config_dequeue_wait_counts()
456 KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0))) in pm_config_dequeue_wait_counts()
A Dkfd_priv.h205 #define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0)) macro
206 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
208 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
209 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
210 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \
211 (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)))
1157 if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && in kfd_node_by_irq_ids()
1158 KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) && in kfd_node_by_irq_ids()
1159 KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) in kfd_node_by_irq_ids()
1534 return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) || in kfd_flush_tlb_after_unmap()
[all …]
A Dkfd_process_queue_manager.c137 if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && in pqm_set_gws()
138 KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) && in pqm_set_gws()
139 KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) && in pqm_set_gws()
202 if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) && in pqm_clean_queue_resource()
203 KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) && in pqm_clean_queue_resource()
204 KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) && in pqm_clean_queue_resource()
328 if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || in pqm_create_queue()
329 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) || in pqm_create_queue()
330 KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) in pqm_create_queue()
651 KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) { in pqm_update_mqd()
A Dkfd_mqd_manager.c105 bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); in mqd_symmetrically_map_cu_mask()
133 cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) && in mqd_symmetrically_map_cu_mask()
134 KFD_GC_VERSION(mm->dev) < IP_VERSION(13, 0, 0)) ? 2 : 1; in mqd_symmetrically_map_cu_mask()
A Dkfd_topology.c513 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) && in node_show()
1725 if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3) || in fill_in_l2_l3_pcache()
1726 KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4) || in fill_in_l2_l3_pcache()
1727 KFD_GC_VERSION(knode) == IP_VERSION(9, 5, 0)) in fill_in_l2_l3_pcache()
1927 KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { in kfd_topology_set_dbg_firmware_support()
1942 switch (KFD_GC_VERSION(dev->gpu)) { in kfd_topology_set_dbg_firmware_support()
1997 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { in kfd_topology_set_capabilities()
1999 KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 4)) in kfd_topology_set_capabilities()
2008 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) in kfd_topology_set_capabilities()
2141 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1)) in kfd_topology_add_device()
[all …]
A Dkfd_packet_manager_v9.c345 if (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) || in pm_config_dequeue_wait_counts_v9()
346 KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(10, 0, 0)) in pm_config_dequeue_wait_counts_v9()
350 (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3))) in pm_config_dequeue_wait_counts_v9()
A Dkfd_flat_memory.c409 if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) in kfd_init_apertures()
A Dkfd_int_process_v9.c242 switch (KFD_GC_VERSION(dev)) { in context_id_expected()
255 return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 1); in context_id_expected()
A Dkfd_int_process_v10.c341 KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) || in event_interrupt_wq_v10()
A Dkfd_process.c1489 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) in kfd_process_xnack_mode()
2172 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) || in kfd_process_drain_interrupts()
2173 KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) || in kfd_process_drain_interrupts()
2174 KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) { in kfd_process_drain_interrupts()
A Dkfd_crat.c1622 switch (KFD_GC_VERSION(kdev)) { in kfd_get_gpu_cache_info()
2130 (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 3) && in kfd_fill_gpu_direct_io_link_to_cpu()
2133 bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3); in kfd_fill_gpu_direct_io_link_to_cpu()
A Dkfd_device_queue_manager.c3000 if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0)) in device_queue_manager_init()
3002 else if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) in device_queue_manager_init()
3004 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) in device_queue_manager_init()
3006 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) in device_queue_manager_init()
A Dkfd_svm.c1198 uint32_t gc_ip_version = KFD_GC_VERSION(node); in svm_range_get_pte_flags()

Completed in 52 milliseconds