Home
last modified time | relevance | path

Searched refs:wc (Results 1 – 17 of 17) sorted by relevance

/net/sunrpc/xprtrdma/
A Dfrwr_ops.c361 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg()
365 trace_xprtrdma_wc_fastreg(wc, &mr->mr_cid); in frwr_wc_fastreg()
444 if (likely(wc->status == IB_WC_SUCCESS)) in frwr_mr_done()
456 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv()
460 trace_xprtrdma_wc_li(wc, &mr->mr_cid); in frwr_wc_localinv()
461 frwr_mr_done(wc, mr); in frwr_wc_localinv()
475 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake()
479 trace_xprtrdma_wc_li_wake(wc, &mr->mr_cid); in frwr_wc_localinv_wake()
480 frwr_mr_done(wc, mr); in frwr_wc_localinv_wake()
572 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done()
[all …]
A Dsvc_rdma_rw.c257 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_reply_done()
262 switch (wc->status) { in svc_rdma_reply_done()
270 trace_svcrdma_wc_reply_err(wc, &cc->cc_cid); in svc_rdma_reply_done()
286 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_write_done()
292 switch (wc->status) { in svc_rdma_write_done()
300 trace_svcrdma_wc_write_err(wc, &cc->cc_cid); in svc_rdma_write_done()
305 if (unlikely(wc->status != IB_WC_SUCCESS)) in svc_rdma_write_done()
320 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_read_done()
328 switch (wc->status) { in svc_rdma_wc_read_done()
341 trace_svcrdma_wc_read_flush(wc, &cc->cc_cid); in svc_rdma_wc_read_done()
[all …]
A Dverbs.c132 if (wc->status != IB_WC_SUCCESS) in rpcrdma_flush_disconnect()
144 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
150 trace_xprtrdma_wc_send(wc, &sc->sc_cid); in rpcrdma_wc_send()
152 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_send()
163 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
169 trace_xprtrdma_wc_receive(wc, &rep->rr_cid); in rpcrdma_wc_receive()
171 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
176 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
177 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
181 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
[all …]
A Dsvc_rdma_recvfrom.c109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
332 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_receive() argument
335 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive()
343 if (wc->status != IB_WC_SUCCESS) in svc_rdma_wc_receive()
345 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
361 ctxt->rc_byte_len = wc->byte_len; in svc_rdma_wc_receive()
373 if (wc->status == IB_WC_WR_FLUSH_ERR) in svc_rdma_wc_receive()
374 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
376 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid); in svc_rdma_wc_receive()
A Dsvc_rdma_sendto.c114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
305 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_send() argument
308 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_send()
314 if (unlikely(wc->status != IB_WC_SUCCESS)) in svc_rdma_wc_send()
322 if (wc->status != IB_WC_WR_FLUSH_ERR) in svc_rdma_wc_send()
323 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); in svc_rdma_wc_send()
325 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); in svc_rdma_wc_send()
A Dxprt_rdma.h466 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
/net/smc/
A Dsmc_wr.c83 link = wc->qp->qp_context; in smc_wr_tx_process_cqe()
85 if (wc->opcode == IB_WC_REG_MR) { in smc_wr_tx_process_cqe()
86 if (wc->status) in smc_wr_tx_process_cqe()
121 if (wc->status) { in smc_wr_tx_process_cqe()
146 memset(&wc, 0, sizeof(wc)); in smc_wr_tx_tasklet_fn()
156 smc_wr_tx_process_cqe(&wc[i]); in smc_wr_tx_tasklet_fn()
440 temp_wr_id = wc->wr_id; in smc_wr_rx_demultiplex()
445 handler->handler(wc, wr_rx); in smc_wr_rx_demultiplex()
455 link = wc[i].qp->qp_context; in smc_wr_rx_process_cqes()
463 switch (wc[i].status) { in smc_wr_rx_process_cqes()
[all …]
A Dsmc_cdc.c469 static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) in smc_cdc_rx_handler() argument
471 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; in smc_cdc_rx_handler()
477 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) in smc_cdc_rx_handler()
A Dsmc_llc.c2094 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) in smc_llc_rx_handler() argument
2096 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; in smc_llc_rx_handler()
2099 if (wc->byte_len < sizeof(*llc)) in smc_llc_rx_handler()
/net/rds/
A Dib_cm.c263 struct ib_wc *wc; in poll_scq() local
267 wc = wcs + i; in poll_scq()
269 (unsigned long long)wc->wr_id, wc->status, in poll_scq()
270 wc->byte_len, be32_to_cpu(wc->ex.imm_data)); in poll_scq()
273 wc->wr_id == RDS_IB_ACK_WR_ID) in poll_scq()
274 rds_ib_send_cqe_handler(ic, wc); in poll_scq()
276 rds_ib_mr_cqe_handler(ic, wc); in poll_scq()
308 struct ib_wc *wc; in poll_rcq() local
312 wc = wcs + i; in poll_rcq()
314 (unsigned long long)wc->wr_id, wc->status, in poll_rcq()
[all …]
A Dib_send.c243 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) in rds_ib_send_cqe_handler() argument
255 (unsigned long long)wc->wr_id, wc->status, in rds_ib_send_cqe_handler()
256 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_send_cqe_handler()
257 be32_to_cpu(wc->ex.imm_data)); in rds_ib_send_cqe_handler()
260 if (wc->wr_id == RDS_IB_ACK_WR_ID) { in rds_ib_send_cqe_handler()
269 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest); in rds_ib_send_cqe_handler()
276 rm = rds_ib_send_unmap_op(ic, send, wc->status); in rds_ib_send_cqe_handler()
303 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { in rds_ib_send_cqe_handler()
306 conn->c_tos, wc->status, in rds_ib_send_cqe_handler()
307 ib_wc_status_msg(wc->status), wc->vendor_err); in rds_ib_send_cqe_handler()
A Dib_recv.c985 struct ib_wc *wc, in rds_ib_recv_cqe_handler() argument
992 (unsigned long long)wc->wr_id, wc->status, in rds_ib_recv_cqe_handler()
993 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_recv_cqe_handler()
994 be32_to_cpu(wc->ex.imm_data)); in rds_ib_recv_cqe_handler()
1005 if (wc->status == IB_WC_SUCCESS) { in rds_ib_recv_cqe_handler()
1006 rds_ib_process_recv(conn, recv, wc->byte_len, state); in rds_ib_recv_cqe_handler()
1012 conn->c_tos, wc->status, in rds_ib_recv_cqe_handler()
1013 ib_wc_status_msg(wc->status), in rds_ib_recv_cqe_handler()
1014 wc->vendor_err); in rds_ib_recv_cqe_handler()
A Dib_frmr.c322 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) in rds_ib_mr_cqe_handler() argument
324 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id; in rds_ib_mr_cqe_handler()
327 if (wc->status != IB_WC_SUCCESS) { in rds_ib_mr_cqe_handler()
334 wc->status, in rds_ib_mr_cqe_handler()
335 ib_wc_status_msg(wc->status), in rds_ib_mr_cqe_handler()
336 wc->vendor_err); in rds_ib_mr_cqe_handler()
A Dib.h389 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
400 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
426 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
/net/9p/
A Dtrans_rdma.c290 recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
295 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done()
304 if (wc->status != IB_WC_SUCCESS) in recv_done()
307 c->rc.size = wc->byte_len; in recv_done()
334 req, err, wc->status); in recv_done()
341 send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
346 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done()
/net/sched/
A Dsch_dualpi2.c209 struct dualpi2_sched_data *q, u32 wc) in dualpi2_calculate_c_protection() argument
211 q->c_protection_wc = wc; in dualpi2_calculate_c_protection()
212 q->c_protection_wl = MAX_WC - wc; in dualpi2_calculate_c_protection()
853 u8 wc = nla_get_u8(tb[TCA_DUALPI2_C_PROTECTION]); in dualpi2_change() local
855 dualpi2_calculate_c_protection(sch, q, wc); in dualpi2_change()
/net/netfilter/
A Dnfnetlink_osf.c81 if (f->wss.wc >= OSF_WSS_MAX) in nf_osf_match_one()
95 check_WSS = f->wss.wc; in nf_osf_match_one()

Completed in 55 milliseconds