Lines Matching refs:pdd
70 struct kfd_process_device *pdd; in kfd_lock_pdd_by_id() local
73 pdd = kfd_process_device_data_by_id(p, gpu_id); in kfd_lock_pdd_by_id()
75 if (pdd) in kfd_lock_pdd_by_id()
76 return pdd; in kfd_lock_pdd_by_id()
82 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) in kfd_unlock_pdd() argument
84 mutex_unlock(&pdd->process->mutex); in kfd_unlock_pdd()
299 struct kfd_process_device *pdd; in kfd_ioctl_create_queue() local
316 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_create_queue()
317 if (!pdd) { in kfd_ioctl_create_queue()
322 dev = pdd->dev; in kfd_ioctl_create_queue()
324 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_create_queue()
325 if (IS_ERR(pdd)) { in kfd_ioctl_create_queue()
330 if (!pdd->doorbell_index && in kfd_ioctl_create_queue()
331 kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { in kfd_ioctl_create_queue()
345 wptr_vm = drm_priv_to_vm(pdd->drm_priv); in kfd_ioctl_create_queue()
563 struct kfd_process_device *pdd; in kfd_ioctl_set_memory_policy() local
577 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_memory_policy()
578 if (!pdd) { in kfd_ioctl_set_memory_policy()
584 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_memory_policy()
585 if (IS_ERR(pdd)) { in kfd_ioctl_set_memory_policy()
597 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
598 &pdd->qpd, in kfd_ioctl_set_memory_policy()
617 struct kfd_process_device *pdd; in kfd_ioctl_set_trap_handler() local
621 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_trap_handler()
622 if (!pdd) { in kfd_ioctl_set_trap_handler()
627 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_trap_handler()
628 if (IS_ERR(pdd)) { in kfd_ioctl_set_trap_handler()
633 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler()
671 struct kfd_process_device *pdd; in kfd_ioctl_get_clock_counters() local
674 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_clock_counters()
676 if (pdd) in kfd_ioctl_get_clock_counters()
678 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); in kfd_ioctl_get_clock_counters()
708 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures() local
712 pAperture->gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures()
713 pAperture->lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures()
714 pAperture->lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures()
715 pAperture->gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures()
716 pAperture->gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures()
717 pAperture->scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures()
718 pAperture->scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures()
723 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures()
725 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures()
727 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures()
729 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures()
731 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures()
733 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures()
735 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures()
783 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures_new() local
785 pa[i].gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures_new()
786 pa[i].lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures_new()
787 pa[i].lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures_new()
788 pa[i].gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures_new()
789 pa[i].gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures_new()
790 pa[i].scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures_new()
791 pa[i].scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures_new()
794 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures_new()
796 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures_new()
798 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures_new()
800 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures_new()
802 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures_new()
804 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures_new()
806 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures_new()
889 struct kfd_process_device *pdd; in kfd_ioctl_set_scratch_backing_va() local
894 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
895 if (!pdd) { in kfd_ioctl_set_scratch_backing_va()
899 dev = pdd->dev; in kfd_ioctl_set_scratch_backing_va()
901 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_scratch_backing_va()
902 if (IS_ERR(pdd)) { in kfd_ioctl_set_scratch_backing_va()
903 err = PTR_ERR(pdd); in kfd_ioctl_set_scratch_backing_va()
907 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
912 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) in kfd_ioctl_set_scratch_backing_va()
914 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
928 struct kfd_process_device *pdd; in kfd_ioctl_get_tile_config() local
933 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_tile_config()
935 if (!pdd) in kfd_ioctl_get_tile_config()
938 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); in kfd_ioctl_get_tile_config()
972 struct kfd_process_device *pdd; in kfd_ioctl_acquire_vm() local
981 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_acquire_vm()
982 if (!pdd) { in kfd_ioctl_acquire_vm()
987 if (pdd->drm_file) { in kfd_ioctl_acquire_vm()
988 ret = pdd->drm_file == drm_file ? 0 : -EBUSY; in kfd_ioctl_acquire_vm()
992 ret = kfd_process_device_init_vm(pdd, drm_file); in kfd_ioctl_acquire_vm()
1029 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); in kfd_ioctl_get_available_memory() local
1031 if (!pdd) in kfd_ioctl_get_available_memory()
1033 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev); in kfd_ioctl_get_available_memory()
1034 kfd_unlock_pdd(pdd); in kfd_ioctl_get_available_memory()
1042 struct kfd_process_device *pdd; in kfd_ioctl_alloc_memory_of_gpu() local
1085 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1086 if (!pdd) { in kfd_ioctl_alloc_memory_of_gpu()
1091 dev = pdd->dev; in kfd_ioctl_alloc_memory_of_gpu()
1101 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_alloc_memory_of_gpu()
1102 if (IS_ERR(pdd)) { in kfd_ioctl_alloc_memory_of_gpu()
1103 err = PTR_ERR(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1112 offset = kfd_get_process_doorbells(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1131 pdd->drm_priv, (struct kgd_mem **) &mem, &offset, in kfd_ioctl_alloc_memory_of_gpu()
1137 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_alloc_memory_of_gpu()
1149 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); in kfd_ioctl_alloc_memory_of_gpu()
1168 pdd->drm_priv, NULL); in kfd_ioctl_alloc_memory_of_gpu()
1180 struct kfd_process_device *pdd; in kfd_ioctl_free_memory_of_gpu() local
1196 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1197 if (!pdd) { in kfd_ioctl_free_memory_of_gpu()
1204 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1210 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, in kfd_ioctl_free_memory_of_gpu()
1211 (struct kgd_mem *)mem, pdd->drm_priv, &size); in kfd_ioctl_free_memory_of_gpu()
1218 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1220 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); in kfd_ioctl_free_memory_of_gpu()
1232 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_map_memory_to_gpu() local
1262 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1263 if (!pdd) { in kfd_ioctl_map_memory_to_gpu()
1267 dev = pdd->dev; in kfd_ioctl_map_memory_to_gpu()
1269 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_map_memory_to_gpu()
1270 if (IS_ERR(pdd)) { in kfd_ioctl_map_memory_to_gpu()
1271 err = PTR_ERR(pdd); in kfd_ioctl_map_memory_to_gpu()
1275 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_map_memory_to_gpu()
1350 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_unmap_memory_from_gpu() local
1378 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1379 if (!pdd) { in kfd_ioctl_unmap_memory_from_gpu()
1384 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_unmap_memory_from_gpu()
1408 if (kfd_flush_tlb_after_unmap(pdd->dev)) { in kfd_ioctl_unmap_memory_from_gpu()
1409 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, in kfd_ioctl_unmap_memory_from_gpu()
1536 struct kfd_process_device *pdd; in kfd_ioctl_import_dmabuf() local
1548 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_import_dmabuf()
1549 if (!pdd) { in kfd_ioctl_import_dmabuf()
1554 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_import_dmabuf()
1555 if (IS_ERR(pdd)) { in kfd_ioctl_import_dmabuf()
1556 r = PTR_ERR(pdd); in kfd_ioctl_import_dmabuf()
1560 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, in kfd_ioctl_import_dmabuf()
1561 args->va_addr, pdd->drm_priv, in kfd_ioctl_import_dmabuf()
1567 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_import_dmabuf()
1581 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_import_dmabuf()
1582 pdd->drm_priv, NULL); in kfd_ioctl_import_dmabuf()
1594 struct kfd_process_device *pdd; in kfd_ioctl_smi_events() local
1598 pdd = kfd_process_device_data_by_id(p, args->gpuid); in kfd_ioctl_smi_events()
1600 if (!pdd) in kfd_ioctl_smi_events()
1603 return kfd_smi_event_open(pdd->dev, &args->anon_fd); in kfd_ioctl_smi_events()
1722 struct kfd_process_device *pdd = p->pdds[i]; in criu_checkpoint_devices() local
1724 device_buckets[i].user_gpu_id = pdd->user_gpu_id; in criu_checkpoint_devices()
1725 device_buckets[i].actual_gpu_id = pdd->dev->id; in criu_checkpoint_devices()
1762 struct kfd_process_device *pdd = p->pdds[i]; in get_process_num_bos() local
1766 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in get_process_num_bos()
1769 if ((uint64_t)kgd_mem->va > pdd->gpuvm_base) in get_process_num_bos()
1825 struct kfd_process_device *pdd = p->pdds[pdd_index]; in criu_checkpoint_bos() local
1829 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in criu_checkpoint_bos()
1842 if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base) in criu_checkpoint_bos()
1848 bo_bucket->gpu_id = pdd->user_gpu_id; in criu_checkpoint_bos()
1876 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
1880 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
2126 struct kfd_process_device *pdd; in criu_restore_devices() local
2145 pdd = kfd_get_process_device_data(dev, p); in criu_restore_devices()
2146 if (!pdd) { in criu_restore_devices()
2152 pdd->user_gpu_id = device_buckets[i].user_gpu_id; in criu_restore_devices()
2162 if (pdd->drm_file) { in criu_restore_devices()
2168 if (kfd_process_device_init_vm(pdd, drm_file)) { in criu_restore_devices()
2180 pdd = kfd_bind_process_to_device(dev, p); in criu_restore_devices()
2181 if (IS_ERR(pdd)) { in criu_restore_devices()
2182 ret = PTR_ERR(pdd); in criu_restore_devices()
2186 if (!pdd->doorbell_index && in criu_restore_devices()
2187 kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { in criu_restore_devices()
2204 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, in criu_restore_memory_of_gpu() argument
2215 if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev)) in criu_restore_memory_of_gpu()
2218 offset = kfd_get_process_doorbells(pdd); in criu_restore_memory_of_gpu()
2227 offset = pdd->dev->adev->rmmio_remap.bus_addr; in criu_restore_memory_of_gpu()
2236 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, in criu_restore_memory_of_gpu()
2237 bo_bucket->size, pdd->drm_priv, kgd_mem, in criu_restore_memory_of_gpu()
2248 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle, in criu_restore_memory_of_gpu()
2253 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, in criu_restore_memory_of_gpu()
2259 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2261 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2267 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); in criu_restore_memory_of_gpu()
2276 struct kfd_process_device *pdd; in criu_restore_bo() local
2285 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id); in criu_restore_bo()
2286 if (!pdd) { in criu_restore_bo()
2291 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem); in criu_restore_bo()