Home
last modified time | relevance | path

Searched refs:rdma (Results 1 – 13 of 13) sorted by relevance

/net/9p/
A Dtrans_rdma.c260 if (rdma) in p9_cm_event_handler()
364 if (!rdma) in rdma_destroy_trans()
367 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans()
370 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans()
373 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans()
376 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
559 if (!rdma) in rdma_close()
586 sema_init(&rdma->sq_sem, rdma->sq_depth); in alloc_rdma()
587 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
704 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans()
[all …]
/net/sunrpc/xprtrdma/
A Dsvc_rdma_sendto.c146 ctxt->sc_rdma = rdma; in svc_rdma_send_ctxt_alloc()
154 rdma->sc_max_req_size); in svc_rdma_send_ctxt_alloc()
185 rdma->sc_max_req_size, in svc_rdma_send_ctxts_destroy()
205 spin_lock(&rdma->sc_send_lock); in svc_rdma_send_ctxt_get()
207 spin_unlock(&rdma->sc_send_lock); in svc_rdma_send_ctxt_get()
294 wake_up(&rdma->sc_send_wait); in svc_rdma_wake_send_waiters()
378 wait_event(rdma->sc_send_wait, in svc_rdma_post_send()
835 .md_rdma = rdma, in svc_rdma_map_reply_msg()
952 *p++ = rdma->sc_fc_credits; in svc_rdma_send_error_msg()
1002 struct svcxprt_rdma *rdma = in svc_rdma_sendto() local
[all …]
A Dsvc_rdma_recvfrom.c248 struct svcxprt_rdma *rdma = in svc_rdma_release_ctxt() local
275 rdma->sc_pending_recvs++; in svc_rdma_refresh_recvs()
313 total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch; in svc_rdma_post_recvs()
323 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests); in svc_rdma_post_recvs()
338 rdma->sc_pending_recvs--; in svc_rdma_wc_receive()
356 if (rdma->sc_pending_recvs < rdma->sc_max_requests) in svc_rdma_wc_receive()
357 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch)) in svc_rdma_wc_receive()
363 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
367 spin_unlock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_receive()
369 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_receive()
[all …]
A Dsvc_rdma_rw.c62 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
64 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
138 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
144 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
160 svc_rdma_send_cid_init(rdma, cid); in svc_rdma_cc_init()
211 info->wi_rdma = rdma; in svc_rdma_write_info_alloc()
333 spin_lock(&rdma->sc_rq_dto_lock); in svc_rdma_wc_read_done()
338 svc_xprt_enqueue(&rdma->sc_xprt); in svc_rdma_wc_read_done()
400 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
413 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
[all …]
A Dsvc_rdma_transport.c588 struct svcxprt_rdma *rdma = in svc_rdma_detach() local
596 struct svcxprt_rdma *rdma = in __svc_rdma_free() local
601 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) in __svc_rdma_free()
602 ib_drain_qp(rdma->sc_qp); in __svc_rdma_free()
612 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) in __svc_rdma_free()
615 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) in __svc_rdma_free()
618 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) in __svc_rdma_free()
621 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) in __svc_rdma_free()
629 kfree(rdma); in __svc_rdma_free()
634 struct svcxprt_rdma *rdma = in svc_rdma_free() local
[all …]
A Dsvc_rdma_backchannel.c75 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, in svc_rdma_bc_sendto() argument
83 ret = svc_rdma_map_reply_msg(rdma, sctxt, &empty_pcl, &empty_pcl, in svc_rdma_bc_sendto()
93 return svc_rdma_post_send(rdma, sctxt); in svc_rdma_bc_sendto()
136 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) in rpcrdma_bc_send_request() argument
144 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request()
160 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request()
166 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request()
183 struct svcxprt_rdma *rdma = in xprt_rdma_bc_send_request() local
190 ret = rpcrdma_bc_send_request(rdma, rqst); in xprt_rdma_bc_send_request()
/net/rds/
A Dsend.c280 (rm->rdma.op_active && in rds_send_xmit()
309 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
310 rm->m_final_op = &rm->rdma; in rds_send_xmit()
315 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
355 if (rm->rdma.op_active && !rm->rdma.op_silent) in rds_send_xmit()
508 ro = &rm->rdma; in rds_rdma_send_complete()
579 ro = &rm->rdma; in __rds_send_complete()
645 struct rm_rdma_op *ro = &rm->rdma; in rds_send_remove_from_sock()
658 rm->rdma.op_notifier = NULL; in rds_send_remove_from_sock()
1318 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
[all …]
A Dmessage.c162 if (rm->rdma.op_active) in rds_message_purge()
163 rds_rdma_free_op(&rm->rdma); in rds_message_purge()
164 if (rm->rdma.op_rdma_mr) in rds_message_purge()
165 kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final); in rds_message_purge()
A Dib_send.c114 rds_ib_send_complete(container_of(op, struct rds_message, rdma), in rds_ib_send_unmap_rdma()
167 rm = container_of(send->s_op, struct rds_message, rdma); in rds_ib_send_unmap_op()
579 if (rm->rdma.op_active) { in rds_ib_xmit()
582 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); in rds_ib_xmit()
615 if (rm->rdma.op_active && rm->rdma.op_fence) in rds_ib_xmit()
975 rds_message_addref(container_of(op, struct rds_message, rdma)); in rds_ib_xmit_rdma()
A DMakefile5 loop.o page.o rdma.o
A Drdma.c616 struct rm_rdma_op *op = &rm->rdma; in rds_cmsg_rdma_args()
626 || rm->rdma.op_active) in rds_cmsg_rdma_args()
843 rm->rdma.op_rdma_mr = mr; in rds_cmsg_rdma_dest()
862 &rm->rdma.op_rdma_mr, rm->m_conn_path); in rds_cmsg_rdma_map()
A Drecv.c172 struct rds_ext_header_rdma rdma; in rds_recv_incoming_exthdrs() member
184 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0); in rds_recv_incoming_exthdrs()
A Drds.h475 } rdma; member

Completed in 36 milliseconds