Lines Matching refs:ibmr

36 rds_transition_frwr_state(struct rds_ib_mr *ibmr,  in rds_transition_frwr_state()  argument
40 if (cmpxchg(&ibmr->u.frmr.fr_state, in rds_transition_frwr_state()
47 atomic_dec(&ibmr->ic->i_fastreg_inuse_count); in rds_transition_frwr_state()
57 struct rds_ib_mr *ibmr = NULL; in rds_ib_alloc_frmr() local
66 ibmr = rds_ib_try_reuse_ibmr(pool); in rds_ib_alloc_frmr()
67 if (ibmr) in rds_ib_alloc_frmr()
68 return ibmr; in rds_ib_alloc_frmr()
70 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, in rds_ib_alloc_frmr()
72 if (!ibmr) { in rds_ib_alloc_frmr()
77 frmr = &ibmr->u.frmr; in rds_ib_alloc_frmr()
86 ibmr->pool = pool; in rds_ib_alloc_frmr()
98 return ibmr; in rds_ib_alloc_frmr()
101 kfree(ibmr); in rds_ib_alloc_frmr()
106 static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop) in rds_ib_free_frmr() argument
108 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr()
111 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_frmr()
113 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_frmr()
114 atomic_add(ibmr->sg_len, &pool->free_pinned); in rds_ib_free_frmr()
123 static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) in rds_ib_post_reg_frmr() argument
125 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_post_reg_frmr()
129 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { in rds_ib_post_reg_frmr()
130 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_reg_frmr()
134 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, in rds_ib_post_reg_frmr()
136 if (unlikely(ret != ibmr->sg_dma_len)) in rds_ib_post_reg_frmr()
143 atomic_inc(&ibmr->ic->i_fastreg_inuse_count); in rds_ib_post_reg_frmr()
150 ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++); in rds_ib_post_reg_frmr()
154 reg_wr.wr.wr_id = (unsigned long)(void *)ibmr; in rds_ib_post_reg_frmr()
164 ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL); in rds_ib_post_reg_frmr()
167 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); in rds_ib_post_reg_frmr()
169 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_reg_frmr()
189 struct rds_ib_mr *ibmr, in rds_ib_map_frmr() argument
193 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_map_frmr()
201 rds_ib_teardown_mr(ibmr); in rds_ib_map_frmr()
203 ibmr->sg = sg; in rds_ib_map_frmr()
204 ibmr->sg_len = sg_len; in rds_ib_map_frmr()
205 ibmr->sg_dma_len = 0; in rds_ib_map_frmr()
207 WARN_ON(ibmr->sg_dma_len); in rds_ib_map_frmr()
208 ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
210 if (unlikely(!ibmr->sg_dma_len)) { in rds_ib_map_frmr()
220 for (i = 0; i < ibmr->sg_dma_len; ++i) { in rds_ib_map_frmr()
221 unsigned int dma_len = sg_dma_len(&ibmr->sg[i]); in rds_ib_map_frmr()
222 u64 dma_addr = sg_dma_address(&ibmr->sg[i]); in rds_ib_map_frmr()
233 if (i < ibmr->sg_dma_len - 1) in rds_ib_map_frmr()
243 if (frmr->dma_npages > ibmr->pool->max_pages) { in rds_ib_map_frmr()
248 ret = rds_ib_post_reg_frmr(ibmr); in rds_ib_map_frmr()
252 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_map_frmr()
260 ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
262 ibmr->sg_dma_len = 0; in rds_ib_map_frmr()
266 static int rds_ib_post_inv(struct rds_ib_mr *ibmr) in rds_ib_post_inv() argument
269 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_post_inv()
270 struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id; in rds_ib_post_inv()
279 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { in rds_ib_post_inv()
280 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_inv()
288 s_wr->wr_id = (unsigned long)(void *)ibmr; in rds_ib_post_inv()
295 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); in rds_ib_post_inv()
301 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_inv()
324 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id; in rds_ib_mr_cqe_handler() local
325 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_mr_cqe_handler()
328 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE); in rds_ib_mr_cqe_handler()
340 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE); in rds_ib_mr_cqe_handler()
360 struct rds_ib_mr *ibmr, *next; in rds_ib_unreg_frmr() local
366 list_for_each_entry(ibmr, list, unmap_list) { in rds_ib_unreg_frmr()
367 if (ibmr->sg_dma_len) { in rds_ib_unreg_frmr()
368 ret2 = rds_ib_post_inv(ibmr); in rds_ib_unreg_frmr()
378 list_for_each_entry_safe(ibmr, next, list, unmap_list) { in rds_ib_unreg_frmr()
379 *unpinned += ibmr->sg_len; in rds_ib_unreg_frmr()
380 frmr = &ibmr->u.frmr; in rds_ib_unreg_frmr()
381 __rds_ib_teardown_mr(ibmr); in rds_ib_unreg_frmr()
387 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_unreg_frmr()
391 list_del(&ibmr->unmap_list); in rds_ib_unreg_frmr()
394 kfree(ibmr); in rds_ib_unreg_frmr()
406 struct rds_ib_mr *ibmr = NULL; in rds_ib_reg_frmr() local
416 if (ibmr) in rds_ib_reg_frmr()
417 rds_ib_free_frmr(ibmr, true); in rds_ib_reg_frmr()
418 ibmr = rds_ib_alloc_frmr(rds_ibdev, nents); in rds_ib_reg_frmr()
419 if (IS_ERR(ibmr)) in rds_ib_reg_frmr()
420 return ibmr; in rds_ib_reg_frmr()
421 frmr = &ibmr->u.frmr; in rds_ib_reg_frmr()
424 ibmr->ic = ic; in rds_ib_reg_frmr()
425 ibmr->device = rds_ibdev; in rds_ib_reg_frmr()
426 ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents); in rds_ib_reg_frmr()
430 rds_ib_free_frmr(ibmr, false); in rds_ib_reg_frmr()
431 ibmr = ERR_PTR(ret); in rds_ib_reg_frmr()
434 return ibmr; in rds_ib_reg_frmr()
437 void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr) in rds_ib_free_frmr_list() argument
439 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr_list()
440 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_free_frmr_list()
443 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_frmr_list()
445 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_frmr_list()