| /drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_device.c | 485 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe() 565 struct kfd_dev *kfd = node->kfd; in kfd_gws_init() local 808 kfd->adev, size, &kfd->gtt_mem, in kgd2kfd_device_init() 838 if (!kfd->hive_id && kfd->num_nodes > 1) in kgd2kfd_device_init() 841 kfd->noretry = kfd->adev->gmc.noretry; in kgd2kfd_device_init() 856 node->kfd = kfd; in kgd2kfd_device_init() 949 kfd_cleanup_nodes(kfd, kfd->num_nodes); in kgd2kfd_device_exit() 1292 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_allocate() local 1394 struct kfd_dev *kfd = node->kfd; in kfd_gtt_sa_free() local 1455 if (kfd && kfd->init_complete) in kgd2kfd_smi_event_throttle() [all …]
|
| A D | kfd_doorbell.c | 52 if (!kfd->shared_resources.enable_mes) in kfd_doorbell_process_slice() 76 if (!kfd->doorbell_bitmap) { in kfd_doorbell_init() 82 r = amdgpu_bo_create_kernel(kfd->adev, in kfd_doorbell_init() 86 &kfd->doorbells, in kfd_doorbell_init() 91 bitmap_free(kfd->doorbell_bitmap); in kfd_doorbell_init() 101 bitmap_free(kfd->doorbell_bitmap); in kfd_doorbell_fini() 155 mutex_lock(&kfd->doorbell_mutex); in kfd_get_kernel_doorbell() 159 mutex_unlock(&kfd->doorbell_mutex); in kfd_get_kernel_doorbell() 165 kfd->doorbells, in kfd_get_kernel_doorbell() 185 mutex_lock(&kfd->doorbell_mutex); in kfd_release_kernel_doorbell() [all …]
|
| A D | kfd_interrupt.c | 58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, in kfd_interrupt_init() 65 if (!node->kfd->ih_wq) { in kfd_interrupt_init() 66 node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND, in kfd_interrupt_init() 67 node->kfd->num_nodes); in kfd_interrupt_init() 68 if (unlikely(!node->kfd->ih_wq)) { in kfd_interrupt_init() 131 node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 132 WARN_ON(count != node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 133 return count == node->kfd->device_info.ih_ring_entry_size; in dequeue_ih_ring_entry() 143 dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, in interrupt_wq() 151 queue_work(dev->kfd->ih_wq, &dev->interrupt_work); in interrupt_wq() [all …]
|
| A D | kfd_device_queue_manager_v9.c | 70 if (dqm->dev->kfd->noretry) in set_cache_memory_policy_v9() 73 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in set_cache_memory_policy_v9() 74 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in set_cache_memory_policy_v9() 77 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0)) { in set_cache_memory_policy_v9() 103 if (dqm->dev->kfd->noretry) in update_qpd_v9() 106 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in update_qpd_v9() 107 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in update_qpd_v9()
|
| A D | kfd_packet_manager_v9.c | 37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9() local 40 struct amdgpu_device *adev = kfd->adev; in pm_map_process_v9() 60 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9() 94 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran() local 99 struct amdgpu_device *adev = kfd->adev; in pm_map_process_aldebaran() 121 for (i = 0; i < kfd->device_info.num_of_watch_points; i++) in pm_map_process_aldebaran() 154 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_v9() local 155 struct amdgpu_device *adev = kfd->adev; in pm_runlist_v9() 171 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum); in pm_runlist_v9() 262 !pm_use_ext_eng(q->device->kfd)) in pm_map_queues_v9() [all …]
|
| A D | kfd_device_queue_manager.c | 498 dev->kfd->device_info.doorbell_size); in allocate_doorbell() 993 (dqm->dev->kfd->cwsr_enabled ? in update_queue() 1172 (dqm->dev->kfd->cwsr_enabled ? in evict_process_queues_nocpsch() 1235 if (!dqm->dev->kfd->shared_resources.enable_mes) in evict_process_queues_cpsch() 1379 if (!dqm->dev->kfd->shared_resources.enable_mes) in restore_process_queues_cpsch() 1804 if (!dqm->dev->kfd->shared_resources.enable_mes) in unhalt_cpsch() 1878 if (!dqm->dev->kfd->shared_resources.enable_mes) in start_cpsch() 1893 if (!dqm->dev->kfd->shared_resources.enable_mes) in stop_cpsch() 1900 if (!dqm->dev->kfd->shared_resources.enable_mes) in stop_cpsch() 1904 if (!dqm->dev->kfd->shared_resources.enable_mes) in stop_cpsch() [all …]
|
| A D | kfd_debug.h | 115 && dev->kfd->mec2_fw_version < 0x81b6) || in kfd_dbg_has_gws_support() 118 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 120 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 122 && dev->kfd->mec2_fw_version < 0x30) || in kfd_dbg_has_gws_support()
|
| A D | kfd_kernel_queue.c | 79 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); in kq_initialize() 117 retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, in kq_initialize() 128 memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size); in kq_initialize() 194 kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); in kq_initialize() 227 kfd_release_kernel_doorbell(kq->dev->kfd, in kq_uninitialize() 312 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_submit_packet() 329 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_rollback_packet()
|
| A D | kfd_priv.h | 311 struct kfd_dev *kfd; member 1101 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1102 int kfd_doorbell_init(struct kfd_dev *kfd); 1103 void kfd_doorbell_fini(struct kfd_dev *kfd); 1106 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1116 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 1118 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1154 struct kfd_dev *dev = adev->kfd.dev; in kfd_node_by_irq_ids() 1480 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1542 bool kfd_is_locked(struct kfd_dev *kfd); [all …]
|
| A D | kfd_debug.c | 438 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_clear_dev_address_watch() 450 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_clear_dev_address_watch() 472 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_set_dev_address_watch() 492 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_dev_address_watch() 544 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 567 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 629 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_deactivate() 746 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_activate() 880 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_wave_launch_override() 912 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_wave_launch_mode() [all …]
|
| A D | kfd_topology.c | 523 dev->gpu->kfd->mec_fw_version); in node_show() 531 dev->gpu->kfd->sdma_fw_version); in node_show() 1228 if (!dev->gpu->kfd->pci_atomic_requested || in kfd_set_iolink_no_atomics() 1589 (dev->gpu->kfd->hive_id && in kfd_dev_create_p2p_links() 1590 dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) in kfd_dev_create_p2p_links() 1954 firmware_supported = dev->gpu->kfd->mec_fw_version >= 60; in kfd_topology_set_dbg_firmware_support() 2083 gpu->kfd->device_info.gfx_target_version; in kfd_topology_add_device() 2091 if (gpu->kfd->num_nodes > 1) in kfd_topology_add_device() 2104 gpu->kfd->shared_resources.drm_render_minor; in kfd_topology_add_device() 2106 dev->node_props.hive_id = gpu->kfd->hive_id; in kfd_topology_add_device() [all …]
|
| A D | kfd_mqd_manager_v11.c | 112 if (node->kfd->shared_resources.enable_mes) in allocate_mqd() 135 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd() 185 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 273 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 406 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd_sdma() 557 if (dev->kfd->shared_resources.enable_mes) { in mqd_manager_init_v11()
|
| A D | kfd_process.c | 1075 kfd_free_process_doorbells(pdd->dev->kfd, pdd); in kfd_process_destroy_pdds() 1077 if (pdd->dev->kfd->shared_resources.enable_mes && in kfd_process_destroy_pdds() 1347 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) in kfd_process_init_cwsr_apu() 1365 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); in kfd_process_init_cwsr_apu() 1390 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) in kfd_process_device_init_cwsr_dgpu() 1403 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); in kfd_process_device_init_cwsr_dgpu() 1477 if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) { in kfd_process_xnack_mode() 1492 if (dev->kfd->noretry) in kfd_process_xnack_mode() 2172 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) || in kfd_process_drain_interrupts() 2173 KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) || in kfd_process_drain_interrupts() [all …]
|
| A D | kfd_flat_memory.c | 331 pdd->dev->kfd->shared_resources.gpuvm_size - 1; in kfd_init_apertures_vi() 350 pdd->dev->kfd->shared_resources.gpuvm_size - 1; in kfd_init_apertures_v9()
|
| A D | kfd_process_queue_manager.c | 94 if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr && in kfd_process_dequeue_from_device() 140 !dev->kfd->shared_resources.enable_mes) { in pqm_set_gws() 205 !dev->kfd->shared_resources.enable_mes) in pqm_clean_queue_resource() 211 if (dev->kfd->shared_resources.enable_mes) { in pqm_clean_queue_resource() 266 if (dev->kfd->shared_resources.enable_mes) { in init_user_queue() 349 max_queues = dev->kfd->device_info.max_no_of_hqd/2; in pqm_create_queue() 369 if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { in pqm_create_queue() 473 pdd->dev->kfd->device_info.doorbell_size); in pqm_create_queue()
|
| A D | kfd_mqd_manager_vi.c | 79 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, in allocate_mqd() argument 84 if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd), in allocate_mqd() 138 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 229 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in __update_mqd()
|
| A D | kfd_mqd_manager_v10.c | 76 static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, in allocate_mqd() argument 81 if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), in allocate_mqd() 131 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 220 if (mm->dev->kfd->cwsr_enabled) in update_mqd()
|
| A D | kfd_crat.c | 1644 kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd, in kfd_get_gpu_cache_info() 1716 kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, in kfd_get_gpu_cache_info() 2187 bool use_ta_info = kdev->kfd->num_nodes == 1; in kfd_fill_gpu_xgmi_link_to_gpu() 2213 bool is_single_hop = kdev->kfd == peer_kdev->kfd; in kfd_fill_gpu_xgmi_link_to_gpu() 2282 (cu_info->number / kdev->kfd->num_nodes); in kfd_create_vcrat_image_gpu() 2357 if (kdev->kfd->hive_id) { in kfd_create_vcrat_image_gpu() 2362 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) in kfd_create_vcrat_image_gpu()
|
| A D | kfd_mqd_manager.c | 75 dev->kfd->device_info.num_sdma_queues_per_engine + in allocate_sdma_mqd() 112 cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes; in mqd_symmetrically_map_cu_mask()
|
| A D | kfd_packet_manager_vi.c | 81 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_vi() local 96 kfd->max_proc_per_quantum); in pm_runlist_vi()
|
| A D | kfd_mqd_manager_v9.c | 44 if (mm->dev->kfd->cwsr_enabled && in mqd_stride_v9() 134 if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 212 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 304 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in update_mqd() 701 if (mm->dev->kfd->cwsr_enabled && in init_mqd_v9_4_3()
|
| A D | kfd_int_process_v9.c | 308 dev->kfd->device_info.ih_ring_entry_size); in event_interrupt_isr_v9() 338 if (context_id == 0 && context_id_expected(dev->kfd)) in event_interrupt_isr_v9()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_amdkfd.h | 413 bool kgd2kfd_device_init(struct kfd_dev *kfd, 415 void kgd2kfd_device_exit(struct kfd_dev *kfd); 418 void kgd2kfd_suspend_process(struct kfd_dev *kfd); 419 int kgd2kfd_resume_process(struct kfd_dev *kfd); 420 int kgd2kfd_pre_reset(struct kfd_dev *kfd, 422 int kgd2kfd_post_reset(struct kfd_dev *kfd); 424 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 426 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); 427 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); 451 bool kgd2kfd_device_init(struct kfd_dev *kfd, in kgd2kfd_device_init() argument [all …]
|
| A D | amdgpu_amdkfd.c | 152 if (!adev->kfd.init_complete || adev->kfd.client.dev) in amdgpu_amdkfd_drm_client_create() 175 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 226 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, in amdgpu_amdkfd_device_init() 247 if (adev->kfd.dev) in amdgpu_amdkfd_interrupt() 253 if (adev->kfd.dev) in amdgpu_amdkfd_suspend() 261 if (adev->kfd.dev) in amdgpu_amdkfd_resume() 269 if (adev->kfd.dev) in amdgpu_amdkfd_suspend_process() 277 if (adev->kfd.dev) in amdgpu_amdkfd_resume_process() 288 if (adev->kfd.dev) in amdgpu_amdkfd_pre_reset() 298 if (adev->kfd.dev) in amdgpu_amdkfd_post_reset() [all …]
|
| A D | amdgpu_amdkfd_gpuvm.c | 235 adev->kfd.vram_used[xcp_id] += vram_needed; in amdgpu_amdkfd_reserve_mem_limit() 236 adev->kfd.vram_used_aligned[xcp_id] += in amdgpu_amdkfd_reserve_mem_limit() 264 adev->kfd.vram_used[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 266 adev->kfd.vram_used_aligned[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 270 adev->kfd.vram_used_aligned[xcp_id] -= in amdgpu_amdkfd_unreserve_mem_limit() 282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit() 1630 - adev->kfd.vram_used_aligned[xcp_id] in amdgpu_amdkfd_get_available_memory() 1828 drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1943 drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); in amdgpu_amdkfd_gpuvm_free_memory_of_gpu() 2402 obj = drm_gem_object_lookup(adev->kfd.client.file, handle); in amdgpu_amdkfd_gpuvm_import_dmabuf_fd() [all …]
|