| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_xcp.c | 69 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) in amdgpu_xcp_run_transition() 72 xcp = &xcp_mgr->xcp[xcp_id]; in amdgpu_xcp_run_transition() 85 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, in amdgpu_xcp_prepare_suspend() 89 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) in amdgpu_xcp_suspend() argument 96 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, in amdgpu_xcp_prepare_resume() 100 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) in amdgpu_xcp_resume() argument 113 xcp = &xcp_mgr->xcp[xcp_id]; in __amdgpu_xcp_add_block() 397 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION; in amdgpu_xcp_open_device() 410 fpriv->xcp_id = i; in amdgpu_xcp_open_device() 416 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; in amdgpu_xcp_open_device() [all …]
|
| A D | amdgpu_xcp.h | 106 int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, 112 int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 113 int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 114 int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 115 int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 122 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 123 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 124 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); 125 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
|
| A D | aqua_vanjaram.c | 73 int xcp_id; in aqua_vanjaram_set_xcp_id() local 77 ring->xcp_id = AMDGPU_XCP_NO_PARTITION; in aqua_vanjaram_set_xcp_id() 79 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; in aqua_vanjaram_set_xcp_id() 103 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { in aqua_vanjaram_set_xcp_id() 105 ring->xcp_id = xcp_id; in aqua_vanjaram_set_xcp_id() 107 ring->xcp_id); in aqua_vanjaram_set_xcp_id() 109 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; in aqua_vanjaram_set_xcp_id() 192 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { in aqua_vanjaram_select_scheds() 195 fpriv->xcp_id = 0; in aqua_vanjaram_select_scheds() 201 fpriv->xcp_id = i; in aqua_vanjaram_select_scheds() [all …]
|
| A D | amdgpu_amdkfd.h | 256 uint32_t *flags, int8_t *xcp_id); 308 uint8_t xcp_id); 354 uint64_t size, u32 alloc_flag, int8_t xcp_id); 356 uint64_t size, u32 alloc_flag, int8_t xcp_id); 358 u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); 360 #define KFD_XCP_MEM_ID(adev, xcp_id) \ argument 361 ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ 362 (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) 364 #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) argument
|
| A D | amdgpu_amdkfd_gpuvm.c | 196 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) in amdgpu_amdkfd_reserve_mem_limit() 223 (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > in amdgpu_amdkfd_reserve_mem_limit() 234 if (adev && xcp_id >= 0) { in amdgpu_amdkfd_reserve_mem_limit() 260 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) in amdgpu_amdkfd_unreserve_mem_limit() 282 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit() 300 bo->xcp_id); in amdgpu_amdkfd_release_notify() 1675 uint8_t xcp_id) in amdgpu_amdkfd_get_available_memory() argument 1729 int8_t xcp_id = -1; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() local 1752 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1753 0 : fpriv->xcp_id; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() [all …]
|
| A D | amdgpu_amdkfd.c | 506 uint32_t *flags, int8_t *xcp_id) in amdgpu_amdkfd_get_dmabuf_info() argument 550 if (xcp_id) in amdgpu_amdkfd_get_dmabuf_info() 551 *xcp_id = bo->xcp_id; in amdgpu_amdkfd_get_dmabuf_info() 797 u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) in amdgpu_amdkfd_xcp_memory_size() argument 799 s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); in amdgpu_amdkfd_xcp_memory_size() 802 if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { in amdgpu_amdkfd_xcp_memory_size()
|
| A D | amdgpu_gfx.c | 1444 static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id) in amdgpu_gfx_run_cleaner_shader() argument 1459 if ((ring->xcp_id == xcp_id) && ring->sched.ready) { in amdgpu_gfx_run_cleaner_shader() 1777 if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION) in amdgpu_gfx_enforce_isolation_handler() 1780 idx = isolation_work->xcp_id; in amdgpu_gfx_enforce_isolation_handler() 1787 if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler() 1791 if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id) in amdgpu_gfx_enforce_isolation_handler() 1817 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION) in amdgpu_gfx_enforce_isolation_ring_begin_use() 1820 idx = ring->xcp_id; in amdgpu_gfx_enforce_isolation_ring_begin_use() 1841 if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION) in amdgpu_gfx_enforce_isolation_ring_end_use() 1844 idx = ring->xcp_id; in amdgpu_gfx_enforce_isolation_ring_end_use()
|
| A D | amdgpu_vm_pt.c | 440 int32_t xcp_id) in amdgpu_vm_pt_create() argument 471 bp.xcp_id_plus1 = xcp_id + 1; in amdgpu_vm_pt_create() 508 vm->root.bo->xcp_id); in amdgpu_vm_pt_alloc()
|
| A D | amdgpu_vm.h | 480 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); 576 int32_t xcp_id);
|
| A D | amdgpu_ids.c | 489 (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ? in amdgpu_vmid_uses_reserved() 490 vm->root.bo->xcp_id : 0] && in amdgpu_vmid_uses_reserved()
|
| A D | amdgpu_gem.c | 376 flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_create_ioctl() 446 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_gem_userptr_ioctl() 971 ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); in amdgpu_mode_dumb_create()
|
| A D | amdgpu_object.h | 125 int8_t xcp_id; member
|
| A D | amdgpu_object.c | 117 int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); in amdgpu_bo_placement_from_domain() 582 bo->xcp_id = bp->xcp_id_plus1 - 1; in amdgpu_bo_create() 585 bo->xcp_id = 0; in amdgpu_bo_create()
|
| A D | amdgpu_gfx.h | 355 u32 xcp_id; member
|
| A D | amdgpu_kms.c | 622 fpriv->xcp_id < adev->xcp_mgr->num_xcps) { in amdgpu_info_ioctl() 623 xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id]; in amdgpu_info_ioctl() 1337 r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); in amdgpu_driver_open_kms()
|
| A D | amdgpu_ring.h | 263 u32 xcp_id; member
|
| A D | amdgpu_ttm.c | 1109 if (adev->gmc.mem_partitions && abo->xcp_id >= 0) in amdgpu_ttm_tt_create() 1110 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); in amdgpu_ttm_tt_create() 1112 gtt->pool_id = abo->xcp_id; in amdgpu_ttm_tt_create()
|
| A D | amdgpu_vm.c | 2419 int32_t xcp_id) in amdgpu_vm_init() argument 2473 false, &root, xcp_id); in amdgpu_vm_init()
|
| A D | amdgpu.h | 498 uint32_t xcp_id; member
|
| A D | amdgpu_cs.c | 299 p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id]; in amdgpu_cs_pass1()
|
| A D | gmc_v9_0.c | 1200 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); in gmc_v9_0_get_coherence_flags()
|
| A D | amdgpu_device.c | 4133 adev->gfx.enforce_isolation[i].xcp_id = i; in amdgpu_device_init()
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_chardev.c | 1502 int8_t xcp_id; in kfd_ioctl_get_dmabuf_info() local 1523 &args->metadata_size, &flags, &xcp_id); in kfd_ioctl_get_dmabuf_info() 1527 if (xcp_id >= 0) in kfd_ioctl_get_dmabuf_info() 1528 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; in kfd_ioctl_get_dmabuf_info()
|