/linux-6.3-rc2/drivers/gpu/drm/amd/amdkfd/ |
A D | kfd_device_queue_manager.c | 547 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_nocpsch() 1180 retval = dqm->asic_ops.update_qpd(dqm, qpd); in register_process() 1316 r = pm_init(&dqm->packet_mgr, dqm); in start_nocpsch() 1493 retval = pm_init(&dqm->packet_mgr, dqm); in start_cpsch() 1556 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); in stop_cpsch() 1645 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_cpsch() 1774 if (dqm->is_hws_hang || dqm->is_resetting) in unmap_queues_cpsch() 1979 dqm, in set_cache_memory_policy() 2265 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); in device_queue_manager_init() 2381 if (!dqm->ops.initialize(dqm)) in device_queue_manager_init() [all …]
|
A D | kfd_device_queue_manager.h | 131 int (*create_queue)(struct device_queue_manager *dqm, 142 int (*update_queue)(struct device_queue_manager *dqm, 151 int (*initialize)(struct device_queue_manager *dqm); 152 int (*start)(struct device_queue_manager *dqm); 153 int (*stop)(struct device_queue_manager *dqm); 154 void (*pre_reset)(struct device_queue_manager *dqm); 198 int (*update_qpd)(struct device_queue_manager *dqm, 300 mutex_lock(&dqm->lock_hidden); in dqm_lock() 301 dqm->saved_flags = memalloc_noreclaim_save(); in dqm_lock() 305 memalloc_noreclaim_restore(dqm->saved_flags); in dqm_unlock() [all …]
|
A D | kfd_process_queue_manager.c | 89 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device() 138 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws() 277 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue() 333 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue() 381 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue() 393 dqm = NULL; in pqm_destroy_queue() 419 dqm = pqn->kq->dev->dqm; in pqm_destroy_queue() 420 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); in pqm_destroy_queue() 426 dqm = pqn->q->device->dqm; in pqm_destroy_queue() 427 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); in pqm_destroy_queue() [all …]
|
A D | kfd_device_queue_manager_vi.c | 30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 36 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 42 static int update_qpd_vi(struct device_queue_manager *dqm, 44 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, 46 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 48 static void init_sdma_vm_tonga(struct device_queue_manager *dqm, 98 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument 154 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument 195 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, in update_qpd_vi_tonga() argument 229 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument [all …]
|
A D | kfd_device_queue_manager_v9.c | 31 static int update_qpd_v9(struct device_queue_manager *dqm, 33 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, 53 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument 65 if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) in update_qpd_v9() 72 if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { in update_qpd_v9() 87 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
|
A D | kfd_packet_manager.c | 48 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size() 50 process_count = pm->dqm->processes_count; in pm_calc_rlib_size() 51 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size() 52 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size() 53 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size() 146 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib() 152 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib() 227 switch (dqm->dev->adev->asic_type) { in pm_init() 247 dqm->dev->adev->asic_type); in pm_init() 252 pm->dqm = dqm; in pm_init() [all …]
|
A D | kfd_device_queue_manager_cik.c | 30 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 36 static int update_qpd_cik(struct device_queue_manager *dqm, 38 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 40 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 42 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, 91 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument 117 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument 151 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, in update_qpd_cik_hawaii() argument 181 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument 197 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, in init_sdma_vm_hawaii() argument
|
A D | kfd_mqd_manager.c | 57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd() 58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd() 59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd() 77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd() 79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in allocate_sdma_mqd() 81 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd() 83 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd() 85 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
|
A D | kfd_device_queue_manager_v11.c | 29 static int update_qpd_v11(struct device_queue_manager *dqm, 31 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, 51 static int update_qpd_v11(struct device_queue_manager *dqm, in update_qpd_v11() argument 76 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v11() argument
|
A D | kfd_device_queue_manager_v10.c | 30 static int update_qpd_v10(struct device_queue_manager *dqm, 32 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, 52 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument 76 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
|
A D | kfd_device.c | 478 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init() 599 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init() 600 if (!kfd->dqm) { in kgd2kfd_device_init() 647 kfd->dqm->sched_policy); in kgd2kfd_device_init() 655 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init() 677 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit() 698 kfd->dqm->ops.pre_reset(kfd->dqm); in kgd2kfd_pre_reset() 748 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend() 790 err = kfd->dqm->ops.start(kfd->dqm); in kfd_resume() 1154 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws() [all …]
|
A D | kfd_process.c | 120 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker() 122 if (!dqm || !qpd) in kfd_sdma_activity_worker() 151 dqm_lock(dqm); in kfd_sdma_activity_worker() 160 dqm_unlock(dqm); in kfd_sdma_activity_worker() 177 dqm_unlock(dqm); in kfd_sdma_activity_worker() 181 dqm_unlock(dqm); in kfd_sdma_activity_worker() 211 dqm_lock(dqm); in kfd_sdma_activity_worker() 233 dqm_unlock(dqm); in kfd_sdma_activity_worker() 1510 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data() 1764 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues() [all …]
|
A D | kfd_int_process_v9.c | 114 ret = kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_poison_consumption_v9() 204 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9() 211 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9() 364 kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_wq_v9()
|
A D | kfd_int_process_v11.c | 210 if (dev->dqm->ops.reset_queues) in event_interrupt_poison_consumption_v11() 211 ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); in event_interrupt_poison_consumption_v11()
|
A D | kfd_priv.h | 299 struct device_queue_manager *dqm; member 611 struct device_queue_manager *dqm; member 1189 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1193 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1247 struct device_queue_manager *dqm; member 1289 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1392 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
|
A D | kfd_packet_manager_v9.c | 122 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9() 133 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9() 266 packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ? in pm_unmap_queues_v9()
|
A D | kfd_packet_manager_vi.c | 80 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi() 94 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
|
A D | cik_event_interrupt.c | 113 kfd_dqm_evict_pasid(dev->dqm, pasid); in cik_event_interrupt_wq()
|
A D | kfd_kernel_queue.c | 65 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in kq_initialize() 68 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in kq_initialize()
|
A D | kfd_topology.c | 1942 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device() 1944 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device() 2216 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device() 2241 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()
|
A D | kfd_chardev.c | 597 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy() 911 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va() 1461 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws()
|