| /net/sunrpc/xprtrdma/ |
| A D | frwr_ops.c | 61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap() 62 mr->mr_dir); in frwr_mr_unmap() 76 frwr_mr_unmap(mr); in frwr_mr_release() 81 kfree(mr->mr_sg); in frwr_mr_release() 82 kfree(mr); in frwr_mr_release() 92 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); in frwr_mr_put() 111 frwr_mr_put(mr); in frwr_reset() 146 mr->mr_sg = sg; in frwr_mr_init() 399 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send() 677 if (!mr) in frwr_wp_create() [all …]
|
| A D | rpc_rdma.c | 275 xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset); in encode_rdma_segment() 290 xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length, in encode_read_segment() 291 mr->mr_offset); in encode_read_segment() 302 if (!*mr) { in rpcrdma_mr_prepare() 304 if (!*mr) in rpcrdma_mr_prepare() 306 (*mr)->mr_req = req; in rpcrdma_mr_prepare() 340 struct rpcrdma_mr *mr; in rpcrdma_encode_read_list() local 366 nsegs -= mr->mr_nents; in rpcrdma_encode_read_list() 398 struct rpcrdma_mr *mr; in rpcrdma_encode_write_list() local 432 nsegs -= mr->mr_nents; in rpcrdma_encode_write_list() [all …]
|
| A D | verbs.c | 761 mr = kzalloc_node(sizeof(*mr), XPRTRDMA_GFP_FLAGS, in rpcrdma_mrs_create() 763 if (!mr) in rpcrdma_mrs_create() 768 kfree(mr); in rpcrdma_mrs_create() 907 struct rpcrdma_mr *mr; in rpcrdma_req_reset() local 929 frwr_mr_release(mr); in rpcrdma_req_reset() 1096 struct rpcrdma_mr *mr; in rpcrdma_req_destroy() local 1107 frwr_mr_release(mr); in rpcrdma_req_destroy() 1126 struct rpcrdma_mr *mr; in rpcrdma_mrs_destroy() local 1138 frwr_mr_release(mr); in rpcrdma_mrs_destroy() 1179 struct rpcrdma_mr *mr; in rpcrdma_mr_get() local [all …]
|
| A D | xprt_rdma.h | 343 rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list) in rpcrdma_mr_push() argument 345 list_add(&mr->mr_list, list); in rpcrdma_mr_push() 351 struct rpcrdma_mr *mr; in rpcrdma_mr_pop() local 353 mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list); in rpcrdma_mr_pop() 354 if (mr) in rpcrdma_mr_pop() 355 list_del_init(&mr->mr_list); in rpcrdma_mr_pop() 356 return mr; in rpcrdma_mr_pop() 536 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr); 537 void frwr_mr_release(struct rpcrdma_mr *mr); 541 struct rpcrdma_mr *mr);
|
| /net/rds/ |
| A D | rdma.c | 81 return mr; in rds_mr_tree_walk() 102 mr->r_key, kref_read(&mr->r_kref)); in rds_destroy_mr() 112 mr->r_trans->free_mr(trans_private, mr->r_invalidate); in rds_destroy_mr() 120 kfree(mr); in __rds_put_mr_final() 238 if (!mr) { in __rds_rdma_map() 354 if (mr) in __rds_rdma_map() 425 if (mr) { in rds_free_mr() 433 if (!mr) in rds_free_mr() 834 if (!mr) in rds_cmsg_rdma_dest() 840 if (mr) { in rds_cmsg_rdma_dest() [all …]
|
| A D | ib_frmr.c | 78 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG, in rds_ib_alloc_frmr() 80 if (IS_ERR(frmr->mr)) { in rds_ib_alloc_frmr() 82 err = PTR_ERR(frmr->mr); in rds_ib_alloc_frmr() 150 ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++); in rds_ib_post_reg_frmr() 157 reg_wr.mr = frmr->mr; in rds_ib_post_reg_frmr() 158 reg_wr.key = frmr->mr->rkey; in rds_ib_post_reg_frmr() 273 if (!i_cm_id || !i_cm_id->qp || !frmr->mr) in rds_ib_post_inv() 290 s_wr->ex.invalidate_rkey = frmr->mr->rkey; in rds_ib_post_inv() 392 if (frmr->mr) in rds_ib_unreg_frmr() 393 ib_dereg_mr(frmr->mr); in rds_ib_unreg_frmr() [all …]
|
| A D | ib_mr.h | 53 struct ib_mr *mr; member 84 struct ib_mr *mr; member
|
| A D | ib_rdma.c | 543 return ibmr->u.mr->lkey; in rds_ib_get_lkey() 594 ibmr->u.mr = ib_mr; in rds_ib_get_mr() 695 ib_dereg_mr(ibmr->u.mr); in rds_ib_odp_mr_worker()
|
| /net/xdp/ |
| A D | xdp_umem.c | 157 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) in xdp_umem_reg() argument 159 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xdp_umem_reg() 160 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; in xdp_umem_reg() 161 u64 addr = mr->addr, size = mr->len; in xdp_umem_reg() 176 if (mr->flags & ~XDP_UMEM_FLAGS_VALID) in xdp_umem_reg() 208 if (mr->flags & XDP_UMEM_TX_METADATA_LEN) { in xdp_umem_reg() 209 if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) in xdp_umem_reg() 211 umem->tx_metadata_len = mr->tx_metadata_len; in xdp_umem_reg() 221 umem->flags = mr->flags; in xdp_umem_reg() 247 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) in xdp_umem_create() argument [all …]
|
| A D | xdp_umem.h | 13 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
|
| A D | xsk.c | 1381 struct xdp_umem_reg mr = {}; in xsk_setsockopt() local 1386 else if (optlen < sizeof(mr)) in xsk_setsockopt() 1400 if (copy_from_sockptr(&mr, optval, mr_size)) in xsk_setsockopt() 1409 umem = xdp_umem_create(&mr); in xsk_setsockopt()
|
| /net/netfilter/ |
| A D | xt_MASQUERADE.c | 21 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in masquerade_tg_check() local 23 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in masquerade_tg_check() 27 if (mr->rangesize != 1) { in masquerade_tg_check() 28 pr_debug("bad rangesize %u\n", mr->rangesize); in masquerade_tg_check() 38 const struct nf_nat_ipv4_multi_range_compat *mr; in masquerade_tg() local 40 mr = par->targinfo; in masquerade_tg() 41 range.flags = mr->range[0].flags; in masquerade_tg() 42 range.min_proto = mr->range[0].min; in masquerade_tg() 43 range.max_proto = mr->range[0].max; in masquerade_tg()
|
| A D | xt_NETMAP.c | 74 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4() local 83 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); in netmap_tg4() 90 new_ip |= mr->range[0].min_ip & netmask; in netmap_tg4() 94 newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; in netmap_tg4() 97 newrange.min_proto = mr->range[0].min; in netmap_tg4() 98 newrange.max_proto = mr->range[0].max; in netmap_tg4() 106 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in netmap_tg4_check() local 108 if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) { in netmap_tg4_check() 112 if (mr->rangesize != 1) { in netmap_tg4_check() 113 pr_debug("bad rangesize %u.\n", mr->rangesize); in netmap_tg4_check()
|
| A D | xt_REDIRECT.c | 51 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in redirect_tg4_check() local 53 if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { in redirect_tg4_check() 57 if (mr->rangesize != 1) { in redirect_tg4_check() 58 pr_debug("bad rangesize %u.\n", mr->rangesize); in redirect_tg4_check() 67 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in redirect_tg4() local 69 .flags = mr->range[0].flags, in redirect_tg4() 70 .min_proto = mr->range[0].min, in redirect_tg4() 71 .max_proto = mr->range[0].max, in redirect_tg4()
|
| A D | xt_nat.c | 18 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_nat_checkentry_v0() local 20 if (mr->rangesize != 1) { in xt_nat_checkentry_v0() 54 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_snat_target_v0() local 64 xt_nat_convert_range(&range, &mr->range[0]); in xt_snat_target_v0() 71 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; in xt_dnat_target_v0() local 80 xt_nat_convert_range(&range, &mr->range[0]); in xt_dnat_target_v0()
|
| /net/smc/ |
| A D | smc_ib.c | 695 void smc_ib_put_memory_region(struct ib_mr *mr) in smc_ib_put_memory_region() argument 697 ib_dereg_mr(mr); in smc_ib_put_memory_region() 706 sg_num = ib_map_mr_sg(buf_slot->mr[link_idx], in smc_ib_map_mr_sg() 718 if (buf_slot->mr[link_idx]) in smc_ib_get_memory_region() 721 buf_slot->mr[link_idx] = in smc_ib_get_memory_region() 723 if (IS_ERR(buf_slot->mr[link_idx])) { in smc_ib_get_memory_region() 726 rc = PTR_ERR(buf_slot->mr[link_idx]); in smc_ib_get_memory_region() 727 buf_slot->mr[link_idx] = NULL; in smc_ib_get_memory_region()
|
| A D | smc_wr.c | 366 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) in smc_wr_reg_send() argument 373 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; in smc_wr_reg_send() 374 link->wr_reg.mr = mr; in smc_wr_reg_send() 375 link->wr_reg.key = mr->rkey; in smc_wr_reg_send()
|
| A D | smc_wr.h | 137 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr);
|
| A D | smc_ib.h | 103 void smc_ib_put_memory_region(struct ib_mr *mr);
|
| A D | smc_llc.c | 512 htonl(rmb_desc->mr[link->link_idx]->rkey); in smc_llc_send_confirm_rkey() 523 htonl(rmb_desc->mr[send_link->link_idx]->rkey); in smc_llc_send_confirm_rkey() 554 rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey); in smc_llc_send_delete_rkey() 629 ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey); in smc_llc_fill_ext_v2() 630 ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey); in smc_llc_fill_ext_v2() 868 addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey); in smc_llc_add_link_cont() 869 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey); in smc_llc_add_link_cont()
|
| A D | smc_core.c | 1290 buf_desc->mr[lnk->link_idx]) { in smcr_buf_unmap_link() 1291 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]); in smcr_buf_unmap_link() 1292 buf_desc->mr[lnk->link_idx] = NULL; in smcr_buf_unmap_link() 2213 buf_desc->mr[link->link_idx]->iova = in smcr_link_reg_buf() 2215 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) { in smcr_link_reg_buf()
|
| A D | smc_core.h | 203 struct ib_mr *mr[SMC_LINKS_PER_LGR_MAX]; member
|
| A D | smc_tx.c | 391 conn->sndbuf_desc->mr[link->link_idx]->lkey; in smcr_tx_rdma_writes()
|
| A D | smc_clc.c | 1112 htonl(conn->rmb_desc->mr[link->link_idx]->rkey); in smcr_clc_prep_confirm_accept()
|