Lines Matching refs:pdd

71 	struct kfd_process_device *pdd;  in kfd_lock_pdd_by_id()  local
74 pdd = kfd_process_device_data_by_id(p, gpu_id); in kfd_lock_pdd_by_id()
76 if (pdd) in kfd_lock_pdd_by_id()
77 return pdd; in kfd_lock_pdd_by_id()
83 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) in kfd_unlock_pdd() argument
85 mutex_unlock(&pdd->process->mutex); in kfd_unlock_pdd()
312 struct kfd_process_device *pdd; in kfd_ioctl_create_queue() local
328 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_create_queue()
329 if (!pdd) { in kfd_ioctl_create_queue()
334 dev = pdd->dev; in kfd_ioctl_create_queue()
336 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_create_queue()
337 if (IS_ERR(pdd)) { in kfd_ioctl_create_queue()
354 if (!pdd->qpd.proc_doorbells) { in kfd_ioctl_create_queue()
355 err = kfd_alloc_process_doorbells(dev->kfd, pdd); in kfd_ioctl_create_queue()
362 err = kfd_queue_acquire_buffers(pdd, &q_properties); in kfd_ioctl_create_queue()
406 kfd_queue_unref_bo_vas(pdd, &q_properties); in kfd_ioctl_create_queue()
407 kfd_queue_release_buffers(pdd, &q_properties); in kfd_ioctl_create_queue()
569 struct kfd_process_device *pdd; in kfd_ioctl_set_memory_policy() local
583 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_memory_policy()
584 if (!pdd) { in kfd_ioctl_set_memory_policy()
590 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_memory_policy()
591 if (IS_ERR(pdd)) { in kfd_ioctl_set_memory_policy()
603 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
604 &pdd->qpd, in kfd_ioctl_set_memory_policy()
624 struct kfd_process_device *pdd; in kfd_ioctl_set_trap_handler() local
628 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_trap_handler()
629 if (!pdd) { in kfd_ioctl_set_trap_handler()
634 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_trap_handler()
635 if (IS_ERR(pdd)) { in kfd_ioctl_set_trap_handler()
640 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler()
678 struct kfd_process_device *pdd; in kfd_ioctl_get_clock_counters() local
681 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_clock_counters()
683 if (pdd) in kfd_ioctl_get_clock_counters()
685 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); in kfd_ioctl_get_clock_counters()
715 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures() local
719 pAperture->gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures()
720 pAperture->lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures()
721 pAperture->lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures()
722 pAperture->gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures()
723 pAperture->gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures()
724 pAperture->scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures()
725 pAperture->scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures()
730 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures()
732 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures()
734 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures()
736 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures()
738 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures()
740 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures()
742 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures()
791 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures_new() local
793 pa[i].gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures_new()
794 pa[i].lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures_new()
795 pa[i].lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures_new()
796 pa[i].gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures_new()
797 pa[i].gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures_new()
798 pa[i].scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures_new()
799 pa[i].scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures_new()
802 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures_new()
804 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures_new()
806 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures_new()
808 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures_new()
810 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures_new()
812 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures_new()
814 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures_new()
897 struct kfd_process_device *pdd; in kfd_ioctl_set_scratch_backing_va() local
902 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
903 if (!pdd) { in kfd_ioctl_set_scratch_backing_va()
907 dev = pdd->dev; in kfd_ioctl_set_scratch_backing_va()
909 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_scratch_backing_va()
910 if (IS_ERR(pdd)) { in kfd_ioctl_set_scratch_backing_va()
911 err = PTR_ERR(pdd); in kfd_ioctl_set_scratch_backing_va()
915 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
920 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) in kfd_ioctl_set_scratch_backing_va()
922 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
936 struct kfd_process_device *pdd; in kfd_ioctl_get_tile_config() local
941 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_tile_config()
943 if (!pdd) in kfd_ioctl_get_tile_config()
946 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); in kfd_ioctl_get_tile_config()
980 struct kfd_process_device *pdd; in kfd_ioctl_acquire_vm() local
989 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_acquire_vm()
990 if (!pdd) { in kfd_ioctl_acquire_vm()
995 if (pdd->drm_file) { in kfd_ioctl_acquire_vm()
996 ret = pdd->drm_file == drm_file ? 0 : -EBUSY; in kfd_ioctl_acquire_vm()
1000 ret = kfd_process_device_init_vm(pdd, drm_file); in kfd_ioctl_acquire_vm()
1041 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); in kfd_ioctl_get_available_memory() local
1043 if (!pdd) in kfd_ioctl_get_available_memory()
1045 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, in kfd_ioctl_get_available_memory()
1046 pdd->dev->node_id); in kfd_ioctl_get_available_memory()
1047 kfd_unlock_pdd(pdd); in kfd_ioctl_get_available_memory()
1055 struct kfd_process_device *pdd; in kfd_ioctl_alloc_memory_of_gpu() local
1098 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1099 if (!pdd) { in kfd_ioctl_alloc_memory_of_gpu()
1104 dev = pdd->dev; in kfd_ioctl_alloc_memory_of_gpu()
1114 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_alloc_memory_of_gpu()
1115 if (IS_ERR(pdd)) { in kfd_ioctl_alloc_memory_of_gpu()
1116 err = PTR_ERR(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1125 offset = kfd_get_process_doorbells(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1144 pdd->drm_priv, (struct kgd_mem **) &mem, &offset, in kfd_ioctl_alloc_memory_of_gpu()
1150 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_alloc_memory_of_gpu()
1162 atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); in kfd_ioctl_alloc_memory_of_gpu()
1181 pdd->drm_priv, NULL); in kfd_ioctl_alloc_memory_of_gpu()
1193 struct kfd_process_device *pdd; in kfd_ioctl_free_memory_of_gpu() local
1209 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1210 if (!pdd) { in kfd_ioctl_free_memory_of_gpu()
1217 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1223 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, in kfd_ioctl_free_memory_of_gpu()
1224 (struct kgd_mem *)mem, pdd->drm_priv, &size); in kfd_ioctl_free_memory_of_gpu()
1231 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1233 atomic64_sub(size, &pdd->vram_usage); in kfd_ioctl_free_memory_of_gpu()
1245 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_map_memory_to_gpu() local
1275 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1276 if (!pdd) { in kfd_ioctl_map_memory_to_gpu()
1280 dev = pdd->dev; in kfd_ioctl_map_memory_to_gpu()
1282 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_map_memory_to_gpu()
1283 if (IS_ERR(pdd)) { in kfd_ioctl_map_memory_to_gpu()
1284 err = PTR_ERR(pdd); in kfd_ioctl_map_memory_to_gpu()
1288 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_map_memory_to_gpu()
1363 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_unmap_memory_from_gpu() local
1392 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1393 if (!pdd) { in kfd_ioctl_unmap_memory_from_gpu()
1398 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_unmap_memory_from_gpu()
1420 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); in kfd_ioctl_unmap_memory_from_gpu()
1422 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, in kfd_ioctl_unmap_memory_from_gpu()
1562 struct kfd_process_device *pdd; in kfd_ioctl_import_dmabuf() local
1569 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_import_dmabuf()
1570 if (!pdd) { in kfd_ioctl_import_dmabuf()
1575 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_import_dmabuf()
1576 if (IS_ERR(pdd)) { in kfd_ioctl_import_dmabuf()
1577 r = PTR_ERR(pdd); in kfd_ioctl_import_dmabuf()
1581 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd, in kfd_ioctl_import_dmabuf()
1582 args->va_addr, pdd->drm_priv, in kfd_ioctl_import_dmabuf()
1588 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_import_dmabuf()
1601 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_import_dmabuf()
1602 pdd->drm_priv, NULL); in kfd_ioctl_import_dmabuf()
1612 struct kfd_process_device *pdd; in kfd_ioctl_export_dmabuf() local
1624 pdd = kfd_get_process_device_data(dev, p); in kfd_ioctl_export_dmabuf()
1625 if (!pdd) { in kfd_ioctl_export_dmabuf()
1630 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_export_dmabuf()
1665 struct kfd_process_device *pdd; in kfd_ioctl_smi_events() local
1669 pdd = kfd_process_device_data_by_id(p, args->gpuid); in kfd_ioctl_smi_events()
1671 if (!pdd) in kfd_ioctl_smi_events()
1674 return kfd_smi_event_open(pdd->dev, &args->anon_fd); in kfd_ioctl_smi_events()
1793 struct kfd_process_device *pdd = p->pdds[i]; in criu_checkpoint_devices() local
1795 device_buckets[i].user_gpu_id = pdd->user_gpu_id; in criu_checkpoint_devices()
1796 device_buckets[i].actual_gpu_id = pdd->dev->id; in criu_checkpoint_devices()
1833 struct kfd_process_device *pdd = p->pdds[i]; in get_process_num_bos() local
1837 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in get_process_num_bos()
1840 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) in get_process_num_bos()
1923 struct kfd_process_device *pdd = p->pdds[pdd_index]; in criu_checkpoint_bos() local
1927 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in criu_checkpoint_bos()
1939 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) in criu_checkpoint_bos()
1945 bo_bucket->gpu_id = pdd->user_gpu_id; in criu_checkpoint_bos()
1973 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
1977 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
2217 struct kfd_process_device *pdd; in criu_restore_devices() local
2236 pdd = kfd_get_process_device_data(dev, p); in criu_restore_devices()
2237 if (!pdd) { in criu_restore_devices()
2243 pdd->user_gpu_id = device_buckets[i].user_gpu_id; in criu_restore_devices()
2253 if (pdd->drm_file) { in criu_restore_devices()
2259 if (kfd_process_device_init_vm(pdd, drm_file)) { in criu_restore_devices()
2271 pdd = kfd_bind_process_to_device(dev, p); in criu_restore_devices()
2272 if (IS_ERR(pdd)) { in criu_restore_devices()
2273 ret = PTR_ERR(pdd); in criu_restore_devices()
2277 if (!pdd->qpd.proc_doorbells) { in criu_restore_devices()
2278 ret = kfd_alloc_process_doorbells(dev->kfd, pdd); in criu_restore_devices()
2295 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, in criu_restore_memory_of_gpu() argument
2307 kfd_doorbell_process_slice(pdd->dev->kfd)) in criu_restore_memory_of_gpu()
2310 offset = kfd_get_process_doorbells(pdd); in criu_restore_memory_of_gpu()
2319 offset = pdd->dev->adev->rmmio_remap.bus_addr; in criu_restore_memory_of_gpu()
2328 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, in criu_restore_memory_of_gpu()
2329 bo_bucket->size, pdd->drm_priv, kgd_mem, in criu_restore_memory_of_gpu()
2340 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle, in criu_restore_memory_of_gpu()
2345 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, in criu_restore_memory_of_gpu()
2351 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2353 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2359 atomic64_add(bo_bucket->size, &pdd->vram_usage); in criu_restore_memory_of_gpu()
2369 struct kfd_process_device *pdd; in criu_restore_bo() local
2378 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id); in criu_restore_bo()
2379 if (!pdd) { in criu_restore_bo()
2384 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem); in criu_restore_bo()
2771 struct kfd_process_device *pdd = p->pdds[i]; in runtime_enable() local
2773 if (pdd->qpd.queue_count) in runtime_enable()
2783 if (pdd->dev->kfd->shared_resources.enable_mes) in runtime_enable()
2784 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_enable()
2793 struct kfd_process_device *pdd = p->pdds[i]; in runtime_enable() local
2795 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { in runtime_enable()
2796 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); in runtime_enable()
2797 pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2798 pdd->dev->adev, in runtime_enable()
2800 pdd->dev->vm_info.last_vmid_kfd); in runtime_enable()
2801 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_enable()
2802 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2803 pdd->dev->adev, in runtime_enable()
2855 struct kfd_process_device *pdd = p->pdds[i]; in runtime_disable() local
2857 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) in runtime_disable()
2858 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); in runtime_disable()
2866 struct kfd_process_device *pdd = p->pdds[i]; in runtime_disable() local
2868 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_disable()
2869 pdd->spi_dbg_override = in runtime_disable()
2870 pdd->dev->kfd2kgd->disable_debug_trap( in runtime_disable()
2871 pdd->dev->adev, in runtime_disable()
2873 pdd->dev->vm_info.last_vmid_kfd); in runtime_disable()
2875 if (!pdd->dev->kfd->shared_resources.enable_mes) in runtime_disable()
2876 debug_refresh_runlist(pdd->dev->dqm); in runtime_disable()
2878 kfd_dbg_set_mes_debug_mode(pdd, in runtime_disable()
2879 !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_disable()
2911 struct kfd_process_device *pdd = NULL; in kfd_ioctl_set_debug_trap() local
2997 pdd = kfd_process_device_data_by_id(target, user_gpu_id); in kfd_ioctl_set_debug_trap()
2998 if (user_gpu_id == -EINVAL || !pdd) { in kfd_ioctl_set_debug_trap()
3055 r = kfd_dbg_trap_set_dev_address_watch(pdd, in kfd_ioctl_set_debug_trap()
3062 r = kfd_dbg_trap_clear_dev_address_watch(pdd, in kfd_ioctl_set_debug_trap()