| /drivers/net/ipa/ |
| A D | ipa_mem.c | 38 if (mem->id == mem_id) in ipa_mem_find() 131 switch (mem_id) { in ipa_mem_id_valid() 190 switch (mem_id) { in ipa_mem_id_required() 222 enum ipa_mem_id mem_id = mem->id; in ipa_mem_valid_one() local 227 if (!ipa_mem_id_valid(ipa, mem_id)) { in ipa_mem_valid_one() 238 size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8; in ipa_mem_valid_one() 241 mem_id, size_multiple); in ipa_mem_valid_one() 246 mem_id, mem->canary_count); in ipa_mem_valid_one() 260 enum ipa_mem_id mem_id; in ipa_mem_valid() local 284 if (ipa_mem_id_required(ipa, mem_id)) in ipa_mem_valid() [all …]
|
| A D | ipa_table.c | 147 enum ipa_mem_id mem_id; in ipa_table_mem() local 149 mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED in ipa_table_mem() 158 return ipa_mem_find(ipa, mem_id); in ipa_table_mem()
|
| A D | ipa_mem.h | 98 const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
|
| /drivers/accel/habanalabs/common/ |
| A D | memory_mgr.c | 171 buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT); in hl_mmap_mem_buf_alloc() 329 static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats) argument 334 switch (mem_id) { 360 u64 mem_id; local 369 mem_id = buf->behavior->mem_id; 374 hl_mem_mgr_fini_stats_inc(mem_id, stats);
|
| A D | command_buffer.c | 260 .mem_id = HL_MMAP_TYPE_CB,
|
| A D | memory.c | 2149 .mem_id = HL_MMAP_TYPE_TS_BUFF,
|
| A D | habanalabs.h | 931 u64 mem_id; member
|
| /drivers/soc/ti/ |
| A D | pruss.c | 112 int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id, in pruss_request_mem_region() argument 115 if (!pruss || !region || mem_id >= PRUSS_MEM_MAX) in pruss_request_mem_region() 120 if (pruss->mem_in_use[mem_id]) { in pruss_request_mem_region() 125 *region = pruss->mem_regions[mem_id]; in pruss_request_mem_region() 126 pruss->mem_in_use[mem_id] = region; in pruss_request_mem_region()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | aqua_vanjaram.c | 443 int xcc_id, uint8_t *mem_id) in __aqua_vanjaram_get_xcp_mem_id() argument 446 *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; in __aqua_vanjaram_get_xcp_mem_id() 447 *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition; in __aqua_vanjaram_get_xcp_mem_id() 453 struct amdgpu_xcp *xcp, uint8_t *mem_id) in aqua_vanjaram_get_xcp_mem_id() argument 470 *mem_id = 0; in aqua_vanjaram_get_xcp_mem_id() 480 return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id); in aqua_vanjaram_get_xcp_mem_id() 490 *mem_id = i; in aqua_vanjaram_get_xcp_mem_id()
|
| A D | amdgpu_xcp.h | 104 uint8_t mem_id; member 141 struct amdgpu_xcp *xcp, uint8_t *mem_id);
|
| A D | amdgpu_xcp.c | 127 uint8_t mem_id; in amdgpu_xcp_init() local 155 xcp_mgr, &xcp_mgr->xcp[i], &mem_id); in amdgpu_xcp_init() 159 xcp_mgr->xcp[i].mem_id = mem_id; in amdgpu_xcp_init() 439 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 : in amdgpu_xcp_open_device() 440 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; in amdgpu_xcp_open_device()
|
| A D | amdgpu_amdkfd.c | 779 s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); in amdgpu_amdkfd_xcp_memory_size() local 782 if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { in amdgpu_amdkfd_xcp_memory_size() 794 tmp = adev->gmc.mem_partitions[mem_id].size; in amdgpu_amdkfd_xcp_memory_size()
|
| A D | amdgpu_object.c | 120 int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); in amdgpu_bo_placement_from_domain() local 122 if (adev->gmc.mem_partitions && mem_id >= 0) { in amdgpu_bo_placement_from_domain() 123 places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; in amdgpu_bo_placement_from_domain() 128 places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; in amdgpu_bo_placement_from_domain()
|
| A D | gmc_v9_0.c | 1200 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); in gmc_v9_0_get_coherence_flags() 1297 if (adev->gmc.is_app_apu && vm->mem_id >= 0) { in gmc_v9_0_override_vm_pte_flags() 1298 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; in gmc_v9_0_override_vm_pte_flags() 1313 vm->mem_id, local_node, nid); in gmc_v9_0_override_vm_pte_flags()
|
| A D | amdgpu_amdkfd.h | 359 (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
|
| A D | amdgpu_vm.h | 426 int8_t mem_id; member
|
| /drivers/net/ethernet/chelsio/cxgb3/ |
| A D | cxgb3_ioctl.h | 78 uint32_t mem_id; member
|
| A D | cxgb3_offload.c | 315 if (t->mem_id == MEM_CM) in cxgb_rdma_ctl() 317 else if (t->mem_id == MEM_PMRX) in cxgb_rdma_ctl() 319 else if (t->mem_id == MEM_PMTX) in cxgb_rdma_ctl()
|
| A D | cxgb3_main.c | 2460 if (t.mem_id == MEM_CM) in cxgb_siocdevprivate() 2462 else if (t.mem_id == MEM_PMRX) in cxgb_siocdevprivate() 2464 else if (t.mem_id == MEM_PMTX) in cxgb_siocdevprivate()
|
| /drivers/infiniband/ulp/rtrs/ |
| A D | rtrs-clt.h | 87 unsigned int mem_id; member
|
| A D | rtrs-clt.c | 89 WARN_ON(permit->mem_id != bit); in __rtrs_get_permit() 99 clear_bit_unlock(permit->mem_id, clt->permits_map); in __rtrs_put_permit() 154 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) in rtrs_clt_put_permit() 992 req = &clt_path->reqs[permit->mem_id]; in rtrs_clt_get_req() 1008 req = &alive_path->reqs[fail_req->permit->mem_id]; in rtrs_clt_get_copy_req() 1117 buf_id = req->permit->mem_id; in rtrs_clt_write_req() 1247 buf_id = req->permit->mem_id; in rtrs_clt_read_req() 1426 permit->mem_id = i; in alloc_permits()
|
| /drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_device.c | 872 node->node_id, node->xcp->mem_id, in kgd2kfd_device_init()
|
| A D | kfd_svm.c | 1255 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) in svm_range_get_pte_flags()
|