| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en_tx.c | 50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err() 426 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { in mlx5e_tx_check_stop() 467 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); in mlx5e_txwqe_complete() 599 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_mpwqe_session_complete() 653 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe() 658 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe() 816 sqcc = sq->cc; in mlx5e_poll_tx_cq() 865 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_tx_cq() 883 sq->cc = sqcc; in mlx5e_poll_tx_cq() 888 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && in mlx5e_poll_tx_cq() [all …]
|
| A D | en_txrx.c | 57 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim() 74 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq() 76 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq() 83 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq() 84 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_trigger_irq() 148 if (sq) in mlx5e_napi_poll() 207 mlx5e_handle_tx_dim(&c->sq[i]); in mlx5e_napi_poll() 208 mlx5e_cq_arm(&c->sq[i].cq); in mlx5e_napi_poll() 214 if (sq) { in mlx5e_napi_poll() 215 mlx5e_handle_tx_dim(sq); in mlx5e_napi_poll() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | xdp.c | 185 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_xdpsq_get_next_pi() 250 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, in mlx5e_xmit_xdp_frame_check_mpwqe() 301 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { in mlx5e_xmit_xdp_frame_check() 304 sq->stats->full++; in mlx5e_xmit_xdp_frame_check() 357 sq->pc++; in mlx5e_xmit_xdp_frame() 422 sqcc = sq->cc; in mlx5e_poll_xdpsq_cq() 448 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_xdpsq_cq() 466 sq->cc = sqcc; in mlx5e_poll_xdpsq_cq() 479 while (sq->cc != sq->pc) { in mlx5e_free_xdpsq_descs() 483 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); in mlx5e_free_xdpsq_descs() [all …]
|
| A D | reporter_tx.c | 17 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush() 25 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush() 32 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc() 34 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc() 35 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc() 48 sq = ctx; in mlx5e_tx_reporter_err_cqe_recover() 105 sq = to_ctx->sq; in mlx5e_tx_reporter_timeout_recover() 379 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_diagnose() local 509 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_dump_all_sqs() local 569 to_ctx.sq = sq; in mlx5e_reporter_tx_timeout() [all …]
|
| A D | xdp.h | 52 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); 54 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 55 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); 99 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument 101 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell() 102 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell() 103 sq->doorbell_cseg = NULL; in mlx5e_xmit_xdp_doorbell() 112 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; in mlx5e_xdp_get_inline_state() 140 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, in mlx5e_xdp_mpwqe_add_dseg() argument 144 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg() [all …]
|
| A D | txrx.h | 93 #define MLX5E_TX_FETCH_WQE(sq, pi) \ argument 144 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txqsq_get_next_pi() 152 wi = &sq->db.wqe_info[pi]; in mlx5e_txqsq_get_next_pi() 160 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi() 162 sq->stats->nop += contig_wqebbs; in mlx5e_txqsq_get_next_pi() 199 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_icosq_get_next_pi() 207 wi = &sq->db.wqe_info[pi]; in mlx5e_icosq_get_next_pi() 216 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_icosq_get_next_pi() 254 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; in mlx5e_dma_get() 261 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); in mlx5e_dma_push() [all …]
|
| /linux/tools/io_uring/ |
| A D | queue.c | 70 struct io_uring_sq *sq = &ring->sq; in io_uring_submit() local 81 if (*sq->khead != *sq->ktail) { in io_uring_submit() 86 if (sq->sqe_head == sq->sqe_tail) in io_uring_submit() 94 to_submit = sq->sqe_tail - sq->sqe_head; in io_uring_submit() 99 sq->array[ktail & mask] = sq->sqe_head & mask; in io_uring_submit() 102 sq->sqe_head++; in io_uring_submit() 117 *sq->ktail = ktail; in io_uring_submit() 143 struct io_uring_sq *sq = &ring->sq; in io_uring_get_sqe() local 150 if (next - sq->sqe_head > *sq->kring_entries) in io_uring_get_sqe() 153 sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; in io_uring_get_sqe() [all …]
|
| A D | setup.c | 22 sq->khead = ptr + p->sq_off.head; in io_uring_mmap() 23 sq->ktail = ptr + p->sq_off.tail; in io_uring_mmap() 24 sq->kring_mask = ptr + p->sq_off.ring_mask; in io_uring_mmap() 26 sq->kflags = ptr + p->sq_off.flags; in io_uring_mmap() 27 sq->kdropped = ptr + p->sq_off.dropped; in io_uring_mmap() 28 sq->array = ptr + p->sq_off.array; in io_uring_mmap() 34 if (sq->sqes == MAP_FAILED) { in io_uring_mmap() 37 munmap(sq->khead, sq->ring_sz); in io_uring_mmap() 100 struct io_uring_sq *sq = &ring->sq; in io_uring_queue_exit() local 103 munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); in io_uring_queue_exit() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| A D | ktls_tx.c | 181 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params() 185 sq->pc += num_wqebbs; in post_static_params() 199 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0, in post_progress_params() 202 sq->pc += num_wqebbs; in post_progress_params() 319 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in tx_post_resync_dump() 339 dseg->lkey = sq->mkey_be; in tx_post_resync_dump() 357 stats = sq->stats; in mlx5e_ktls_tx_handle_resync_dump_comp() 370 tx_fill_wi(sq, pi, 1, 0, NULL); in tx_post_fence_nop() 372 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); in tx_post_fence_nop() 377 struct mlx5e_txqsq *sq, in mlx5e_ktls_tx_handle_ooo() argument [all …]
|
| A D | ktls_rx.c | 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params() 159 sq->pc += num_wqebbs; in post_static_params() 179 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, in post_progress_params() 190 sq->pc += num_wqebbs; in post_progress_params() 204 sq = &c->async_icosq; in post_rx_param_wqes() 214 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in post_rx_param_wqes() 312 sq->pc++; in resync_post_get_progress_params() 313 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in resync_post_get_progress_params() 346 sq = &c->async_icosq; in resync_handle_work() 371 sq = &c->async_icosq; in resync_handle_seq_match() [all …]
|
| /linux/sound/oss/dmasound/ |
| A D | dmasound_core.c | 485 sq->max_count = sq->numBufs ; in sq_setup() 486 sq->max_active = sq->numBufs ; in sq_setup() 487 sq->block_size = sq->bufSize; in sq_setup() 489 sq->user_frags = sq->numBufs ; in sq_setup() 490 sq->user_frag_size = sq->bufSize ; in sq_setup() 511 if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { in sq_setup() 515 sq->block_size = sq->bufSize ; in sq_setup() 518 sq->max_count = sq->user_frags ; in sq_setup() 520 sq->max_active = (sq->max_active <= sq->max_count) ? in sq_setup() 527 sq->max_active = sq->numBufs ; in sq_setup() [all …]
|
| /linux/drivers/net/ethernet/cavium/thunder/ |
| A D | nicvf_queues.c | 513 sq->desc = sq->dmem.base; in nicvf_init_snd_queue() 586 sq->tso_hdrs, sq->tso_hdrs_phys); in nicvf_free_snd_queue() 592 while (sq->head != sq->tail) { in nicvf_free_snd_queue() 593 skb = (struct sk_buff *)sq->skbuff[sq->head]; in nicvf_free_snd_queue() 612 nicvf_unmap_sndq_buffers(nic, sq, sq->head, in nicvf_free_snd_queue() 619 sq->head &= (sq->dmem.q_len - 1); in nicvf_free_snd_queue() 865 sq = &qs->sq[qidx]; in nicvf_snd_queue_config() 884 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; in nicvf_snd_queue_config() 1128 sq->tail &= (sq->dmem.q_len - 1); in nicvf_get_sq_desc() 1149 sq->head &= (sq->dmem.q_len - 1); in nicvf_put_sq_desc() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| A D | tx.c | 55 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err() 62 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err() 64 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err() 69 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx() 81 sq); in mlx5e_xsk_tx() 108 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 109 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx() 111 mlx5e_xsk_tx_post_err(sq, &xdpi); in mlx5e_xsk_tx() 118 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 119 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx() [all …]
|
| A D | tx.h | 14 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); 16 static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) in mlx5e_xsk_update_tx_wakeup() argument 18 if (!xsk_uses_need_wakeup(sq->xsk_pool)) in mlx5e_xsk_update_tx_wakeup() 21 if (sq->pc != sq->cc) in mlx5e_xsk_update_tx_wakeup() 22 xsk_clear_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup() 24 xsk_set_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup()
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| A D | otx2_txrx.c | 543 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush() 556 sq->sg[sq->head].num_segs = 0; in otx2_sqe_add_sg() 584 sq->sg[sq->head].size[seg] = len; in otx2_sqe_add_sg() 585 sq->sg[sq->head].num_segs++; in otx2_sqe_add_sg() 588 sq->sg[sq->head].skb = (u64)skb; in otx2_sqe_add_sg() 957 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; in otx2_sq_append_skb() 1056 sq = &pfvf->qset.sq[cq->cint_idx]; in otx2_cleanup_tx_cqes() 1119 sq->sg[sq->head].size[0] = len; in otx2_xdp_sqe_add_sg() 1120 sq->sg[sq->head].num_segs = 1; in otx2_xdp_sqe_add_sg() 1129 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_controlq.c | 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() 106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); in ice_alloc_ctrlq_sq_ring() 239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs() 260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); in ice_alloc_sq_bufs() 868 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq() local 950 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; in ice_sq_done() 1043 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd() 1063 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd() 1065 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd() [all …]
|
| /linux/drivers/soc/qcom/ |
| A D | qmi_interface.c | 167 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local 177 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup() 178 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup() 181 msg.msg_name = &sq; in qmi_send_new_lookup() 242 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_server() 243 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_server() 246 msg.msg_name = &sq; in qmi_send_new_server() 461 qmi->sq = sq; in qmi_handle_net_reset() 527 struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; in qmi_data_ready_work() 557 if (sq.sq_node == qmi->sq.sq_node && in qmi_data_ready_work() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| A D | hinic_hw_qp.c | 59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 226 if (!sq->saved_skb) in alloc_sq_skb_arr() 238 vfree(sq->saved_skb); in free_sq_skb_arr() 286 sq->hwif = hwif; in hinic_init_sq() 288 sq->wq = wq; in hinic_init_sq() 290 sq->irq = entry->vector; in hinic_init_sq() 307 free_sq_skb_arr(sq); in hinic_clean_sq() 620 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in sq_prepare_db() [all …]
|
| A D | hinic_tx.c | 504 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() 565 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 669 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local 701 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll() 704 struct hinic_sq *sq = txq->sq; in free_tx_poll() local 746 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { in free_tx_poll() 805 struct hinic_sq *sq = txq->sq; in tx_request_irq() local 809 qp = container_of(sq, struct hinic_qp, sq); in tx_request_irq() 844 struct hinic_sq *sq = txq->sq; in tx_free_irq() local 861 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in hinic_init_txq() [all …]
|
| A D | hinic_hw_qp.h | 57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument 58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) 122 struct hinic_sq sq; member 133 struct hinic_sq *sq, u16 global_qid); 142 void hinic_clean_sq(struct hinic_sq *sq); 149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); 178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, 185 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, 190 void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, 194 struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, [all …]
|
| /linux/net/qrtr/ |
| A D | ns.c | 52 struct sockaddr_qrtr sq; member 333 ret = say_hello(sq); in ctrl_cmd_hello() 346 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local 399 sq.sq_node = srv->node; in ctrl_cmd_bye() 400 sq.sq_port = srv->port; in ctrl_cmd_bye() 427 struct sockaddr_qrtr sq; in ctrl_cmd_del_client() local 486 sq.sq_node = srv->node; in ctrl_cmd_del_client() 588 lookup->sq = *from; in ctrl_cmd_new_lookup() 700 sq.sq_node, sq.sq_port); in qrtr_ns_worker() 747 sq.sq_node, sq.sq_port); in qrtr_ns_worker() [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| A D | qp.c | 135 sq->phys_addr = virt_to_phys(sq->queue); in alloc_host_sq() 136 dma_unmap_addr_set(sq, mapping, sq->dma_addr); in alloc_host_sq() 226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), in create_qp() 260 memset(wq->sq.queue, 0, wq->sq.memsize); in create_qp() 430 if (dstp == (u8 *)&sq->queue[sq->size]) in build_immd() 535 (__be64 *)&sq->queue[sq->size], in build_rdma_send() 586 (__be64 *)&sq->queue[sq->size], in build_rdma_write() 647 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], in build_rdma_write_cmpl() 651 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], in build_rdma_write_cmpl() 728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size) in post_write_cmpl() [all …]
|
| A D | cq.c | 248 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq() 249 idx = wq->sq.flush_cidx; in c4iw_flush_sq() 262 if (wq->sq.flush_cidx >= wq->sq.size) in c4iw_flush_sq() 263 wq->sq.flush_cidx -= wq->sq.size; in c4iw_flush_sq() 273 wq->sq.flush_cidx = wq->sq.cidx; in flush_completed_wrs() 315 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; in advance_oldest_read() 317 if (rptr == wq->sq.size) in advance_oldest_read() 320 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; in advance_oldest_read() 711 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; in poll_cq() 713 wq->sq.in_use -= idx - wq->sq.cidx; in poll_cq() [all …]
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| A D | qplib_fp.c | 177 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local 199 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_alloc_qp_hdr_buf() local 813 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_create_qp1() local 901 sq->dbinfo.hwq = &sq->hwq; in bnxt_qplib_create_qp1() 938 sq = &qp->sq; in bnxt_qplib_init_psn_ptr() 954 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_create_qp() local 1110 sq->dbinfo.hwq = &sq->hwq; in bnxt_qplib_create_qp() 1508 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_get_qp1_sq_buf() local 2345 sq = &qp->sq; in bnxt_qplib_cq_process_req() 2363 swq = &sq->swq[sq->swq_last]; in bnxt_qplib_cq_process_req() [all …]
|
| /linux/drivers/net/ |
| A D | virtio_net.c | 521 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), in __virtnet_xdp_xmit_one() 1520 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx() local 1595 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open() 1698 struct send_queue *sq = &vi->sq[qnum]; in start_xmit() local 1879 struct send_queue *sq = &vi->sq[i]; in virtnet_stats() local 2295 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats() local 2685 struct send_queue *sq = &priv->sq[txqueue]; in virtnet_tx_timeout() local 2693 txqueue, sq->name, sq->vq->index, sq->vq->name, in virtnet_tx_timeout() 2770 kfree(vi->sq); in virtnet_free_queues() 2958 if (!vi->sq) in virtnet_alloc_queues() [all …]
|