/linux-6.3-rc2/drivers/infiniband/hw/hfi1/ |
A D | vnic_sdma.c | 29 struct sdma_txreq txreq; member 41 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() 44 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 56 &tx->txreq, in build_vnic_ulp_payload() 67 &tx->txreq, in build_vnic_ulp_payload() 92 &tx->txreq, in build_vnic_tx_desc() 107 &tx->txreq, in build_vnic_tx_desc() 155 &tx->txreq, vnic_sdma->pkts_sent); in hfi1_vnic_send_dma() 167 sdma_txclean(dd, &tx->txreq); in hfi1_vnic_send_dma() 187 struct sdma_txreq *txreq, in hfi1_vnic_sdma_sleep() argument [all …]
|
A D | ipoib_tx.c | 190 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() 203 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload() local 218 txreq, in hfi1_ipoib_build_ulp_payload() 233 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_tx_desc() local 245 txreq, in hfi1_ipoib_build_tx_desc() 376 tx->txreq.num_desc = 0; in hfi1_ipoib_send_dma_common() 445 &tx->txreq, in hfi1_ipoib_submit_tx() 614 struct sdma_txreq *txreq, in hfi1_ipoib_sdma_sleep() argument 768 struct sdma_txreq *txreq; in hfi1_ipoib_drain_tx_list() local 773 container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_drain_tx_list() [all …]
|
A D | verbs_txreq.h | 18 struct sdma_txreq txreq; member 53 tx->txreq.num_desc = 0; in get_txreq() 56 tx->txreq.flags = 0; in get_txreq() 66 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
|
A D | user_sdma.c | 64 struct sdma_txreq *txreq, 87 struct sdma_txreq *txreq, in defer_packet_queue() argument 96 if (sdma_progress(sde, seq, txreq)) in defer_packet_queue() 688 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg() 695 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg() 851 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() 887 list_add_tail(&tx->txreq.list, &req->txps); in user_sdma_send_pkts() 914 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_send_pkts() 1342 sdma_txinit_ahg(&tx->txreq, in set_txreq_header_ahg() 1364 container_of(txreq, struct user_sdma_txreq, txreq); in user_sdma_txreq_cb() [all …]
|
A D | verbs.c | 590 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 637 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem() 671 &tx->txreq, in build_verbs_ulp_payload() 740 &tx->txreq, in build_verbs_tx_desc() 754 &tx->txreq, in build_verbs_tx_desc() 761 &tx->txreq, in build_verbs_tx_desc() 781 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, in build_verbs_tx_desc() 823 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma() 852 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); in hfi1_verbs_send_dma() 900 list_add_tail(&ps->s_txreq->txreq.list, in pio_wait() [all …]
|
A D | ipoib.h | 57 struct sdma_txreq txreq; member
|
A D | verbs_txreq.c | 27 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
|
A D | user_sdma.h | 198 struct sdma_txreq txreq; member
|
A D | qp.c | 118 container_of(tx, struct verbs_txreq, txreq)); in flush_list_head() 443 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|
A D | rc.c | 349 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_rc_ack()
|
A D | tid_rdma.c | 5318 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_tid_rdma_ack()
|
/linux-6.3-rc2/drivers/net/xen-netback/ |
A D | netback.c | 882 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 911 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 914 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 921 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 957 XEN_NETBACK_TX_COPY_LEN : txreq.size; in xenvif_tx_build_gops() 967 if (unlikely(txreq.size < ETH_HLEN)) { in xenvif_tx_build_gops() 969 "Bad packet size: %d\n", txreq.size); in xenvif_tx_build_gops() 975 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { in xenvif_tx_build_gops() 978 txreq.offset, txreq.size, in xenvif_tx_build_gops() 979 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); in xenvif_tx_build_gops() [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/qib/ |
A D | qib_sdma.c | 496 tx->txreq.start_idx = 0; in complete_sdma_err_req() 497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 603 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) in qib_sdma_verbs_send() 607 tx->txreq.next_descq_idx = tail; in qib_sdma_verbs_send() 609 ppd->sdma_descq_added += tx->txreq.sg_count; in qib_sdma_verbs_send() [all …]
|
A D | qib_verbs.c | 628 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 630 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 638 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 686 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 688 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 804 tx->txreq.callback = sdma_complete; in qib_verbs_send_dma() 808 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; in qib_verbs_send_dma() 828 tx->txreq.sg_count = ndesc; in qib_verbs_send_dma() 829 tx->txreq.addr = dev->pio_hdrs_phys + in qib_verbs_send_dma() 851 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; in qib_verbs_send_dma() [all …]
|
A D | qib.h | 248 struct qib_sdma_txreq txreq; member
|