| /net/9p/ |
| A D | trans_rdma.c | 79 struct ib_cq *cq; member 290 recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument 292 struct p9_client *client = cq->cq_context; in recv_done() 341 send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument 343 struct p9_client *client = cq->cq_context; in send_done() 373 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans() 374 ib_free_cq(rdma->cq); in rdma_destroy_trans() 697 rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, in rdma_create_trans() 700 if (IS_ERR(rdma->cq)) in rdma_create_trans() 718 qp_attr.send_cq = rdma->cq; in rdma_create_trans() [all …]
|
| /net/sunrpc/ |
| A D | cache.c | 980 struct cache_queue *cq; in cache_poll() local 992 for (cq= &rp->q; &cq->list != &cd->queue; in cache_poll() 993 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_poll() 994 if (!cq->reader) { in cache_poll() 1018 for (cq= &rp->q; &cq->list != &cd->queue; in cache_ioctl() 1019 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_ioctl() 1020 if (!cq->reader) { in cache_ioctl() 1067 for (cq= &rp->q; &cq->list != &cd->queue; in cache_release() 1068 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_release() 1069 if (!cq->reader) { in cache_release() [all …]
|
| /net/sunrpc/xprtrdma/ |
| A D | frwr_ops.c | 359 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_fastreg() argument 367 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_fastreg() 454 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv() argument 463 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv() 473 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_wake() argument 483 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_wake() 570 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_done() argument 586 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_done()
|
| A D | svc_rdma_rw.c | 17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); 255 static void svc_rdma_reply_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_reply_done() argument 260 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_reply_done() 283 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_write_done() argument 285 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_write_done() 317 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_read_done() argument 319 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_read_done()
|
| A D | svc_rdma_recvfrom.c | 109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); 332 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_receive() argument 334 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_receive()
|
| A D | svc_rdma_sendto.c | 114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 305 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_send() argument 307 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_send()
|
| A D | verbs.c | 142 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument 147 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send() 161 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument 166 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
|
| /net/rds/ |
| A D | ib_cm.c | 247 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_recv() argument 252 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_recv() 259 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, in poll_scq() argument 265 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { in poll_scq() 303 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, in poll_rcq() argument 310 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { in poll_rcq() 375 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_send() argument 380 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_send()
|
| /net/xdp/ |
| A D | xsk_buff_pool.c | 101 pool->cq = xs->cq_tmp; in xp_create_and_assign_umem() 258 if (!pool->fq || !pool->cq) in xp_assign_dev_shared() 297 if (pool->cq) { in xp_release_deferred() 298 xskq_destroy(pool->cq); in xp_release_deferred() 299 pool->cq = NULL; in xp_release_deferred()
|
| A D | xsk_diag.c | 73 if (!err && pool && pool->cq) in xsk_diag_put_umem() 74 err = xsk_diag_put_ring(pool->cq, in xsk_diag_put_umem()
|
| A D | xsk.c | 408 xskq_prod_submit_n(pool->cq, nb_entries); in xsk_tx_completed() 449 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) in xsk_tx_peek_desc() 508 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); in xsk_tx_peek_release_desc_batch() 519 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); in xsk_tx_peek_release_desc_batch() 541 ret = xskq_prod_reserve_addr(pool->cq, addr); in xsk_cq_reserve_addr_locked() 552 xskq_prod_submit_n(pool->cq, n); in xsk_cq_submit_locked() 561 xskq_prod_cancel_n(pool->cq, n); in xsk_cq_cancel_locked() 1641 READ_ONCE(xs->pool->cq); in xsk_mmap()
|
| /net/ipv4/ |
| A D | ipconfig.c | 1413 char *cp, *cq; in root_nfs_parse_addr() local 1415 cp = cq = name; in root_nfs_parse_addr() 1419 if (cp == cq || cp - cq > 3) in root_nfs_parse_addr() 1425 cq = cp; in root_nfs_parse_addr()
|