| /drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
| A D | dr_arg.c | 28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member 201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj() 226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj() 245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create() 246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create() 254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create() 262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local 268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy() 270 dr_arg_pool_destroy(pools[i]); in mlx5dr_arg_mgr_destroy()
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| A D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
| A D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
| /drivers/net/ethernet/chelsio/libcxgb/ |
| A D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool __percpu *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
| /drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-xsk.c | 162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin() 163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin() 166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin() 167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin() 168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
|
| A D | dpni.c | 179 cpu_to_le16(cfg->pools[i].dpbp_id); in dpni_set_pools() 181 cfg->pools[i].priority_mask; in dpni_set_pools() 183 cpu_to_le16(cfg->pools[i].buffer_size); in dpni_set_pools() 185 DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpni_set_pools()
|
| A D | dpsw.c | 1158 cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); in dpsw_ctrl_if_set_pools() 1160 cpu_to_le16(cfg->pools[i].buffer_size); in dpsw_ctrl_if_set_pools() 1162 DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i); in dpsw_ctrl_if_set_pools()
|
| A D | dpsw.h | 210 } pools[DPSW_MAX_DPBP]; member
|
| A D | dpni.h | 119 } pools[DPNI_MAX_DPBP]; member
|
| A D | dpaa2-switch.c | 2729 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id; in dpaa2_switch_setup_dpbp() 2730 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE; in dpaa2_switch_setup_dpbp() 2731 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0; in dpaa2_switch_setup_dpbp()
|
| A D | dpaa2-eth.c | 4385 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; in dpaa2_eth_bind_dpni() 4386 pools_params.pools[0].backup_pool = 0; in dpaa2_eth_bind_dpni() 4387 pools_params.pools[0].buffer_size = priv->rx_buf_size; in dpaa2_eth_bind_dpni()
|
| /drivers/soc/ti/ |
| A D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
| A D | knav_qmss_queue.c | 817 node = ®ion->pools; in knav_pool_create() 818 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create() 832 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1034 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1119 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1361 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1808 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
| /drivers/net/ethernet/wangxun/libwx/ |
| A D | wx_hw.c | 772 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); in wx_set_rar() 775 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); in wx_set_rar() 950 wx->mac_table[i].pools, in wx_sync_mac_table() 968 wx->mac_table[i].pools, in wx_full_sync_mac_table() 981 wx->mac_table[0].pools = BIT(VMDQ_P(0)); in wx_mac_set_default_filter() 984 wx->mac_table[0].pools, in wx_mac_set_default_filter() 1000 wx->mac_table[i].pools = 0; in wx_flush_sw_mac_table() 1018 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() 1030 wx->mac_table[i].pools |= (1ULL << pool); in wx_add_mac_filter() 1050 wx->mac_table[i].pools &= ~(1ULL << pool); in wx_del_mac_filter() [all …]
|
| A D | wx_type.h | 905 u64 pools; member
|
| /drivers/nvme/host/ |
| A D | pci.c | 437 if (pools->small) in nvme_setup_descriptor_pools() 442 if (!pools->large) in nvme_setup_descriptor_pools() 450 if (!pools->small) { in nvme_setup_descriptor_pools() 451 dma_pool_destroy(pools->large); in nvme_setup_descriptor_pools() 452 pools->large = NULL; in nvme_setup_descriptor_pools() 456 return pools; in nvme_setup_descriptor_pools() 466 dma_pool_destroy(pools->large); in nvme_release_descriptor_pools() 467 dma_pool_destroy(pools->small); in nvme_release_descriptor_pools() 482 if (IS_ERR(pools)) in nvme_init_hctx_common() 483 return PTR_ERR(pools); in nvme_init_hctx_common() [all …]
|
| /drivers/md/ |
| A D | dm-table.c | 1058 struct dm_md_mempools *pools; in dm_table_alloc_md_mempools() local 1066 pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_table_alloc_md_mempools() 1067 if (!pools) in dm_table_alloc_md_mempools() 1091 if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags)) in dm_table_alloc_md_mempools() 1094 if (bioset_init(&pools->bs, pool_size, front_pad, 0)) in dm_table_alloc_md_mempools() 1097 t->mempools = pools; in dm_table_alloc_md_mempools() 1101 dm_free_md_mempools(pools); in dm_table_alloc_md_mempools()
|
| A D | dm.h | 226 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
| A D | dm.c | 3359 void dm_free_md_mempools(struct dm_md_mempools *pools) in dm_free_md_mempools() argument 3361 if (!pools) in dm_free_md_mempools() 3364 bioset_exit(&pools->bs); in dm_free_md_mempools() 3365 bioset_exit(&pools->io_bs); in dm_free_md_mempools() 3367 kfree(pools); in dm_free_md_mempools()
|
| A D | dm-thin.c | 526 struct list_head pools; member 532 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init() 543 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert() 558 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup() 574 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
|
| /drivers/soc/fsl/qbman/ |
| A D | qman_priv.h | 177 u32 pools; member
|
| A D | qman_portal.c | 249 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
|
| A D | qman.c | 1763 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) in qman_p_static_dequeue_add() argument 1768 pools &= p->config->pools; in qman_p_static_dequeue_add() 1769 p->sdqcr |= pools; in qman_p_static_dequeue_add()
|
| /drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en.h | 858 struct xsk_buff_pool **pools; member
|
| /drivers/message/fusion/lsi/ |
| A D | mpi_history.txt | 309 * Added generic defines for hot spare pools and RAID
|