| /drivers/net/wwan/t7xx/ |
| A D | t7xx_hif_dpmaif_tx.c | 54 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_update_drb_rd_idx() local 83 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; in t7xx_dpmaif_release_tx_buffer() local 177 queue_work(dpmaif_ctrl->txq[txq->index].worker, in t7xx_dpmaif_tx_done() 178 &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); in t7xx_dpmaif_tx_done() 253 txq = &dpmaif_ctrl->txq[skb_cb->txq_number]; in t7xx_dpmaif_add_skb_to_ring() 273 if (txq->drb_wr_idx >= txq->drb_size_cnt) in t7xx_dpmaif_add_skb_to_ring() 274 txq->drb_wr_idx -= txq->drb_size_cnt; in t7xx_dpmaif_add_skb_to_ring() 351 txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE]; in t7xx_select_tx_queue() 556 txq->drb_base, txq->drb_bus_addr); in t7xx_dpmaif_tx_drb_buf_init() 592 txq->drb_base, txq->drb_bus_addr); in t7xx_dpmaif_tx_drb_buf_rel() [all …]
|
| /drivers/infiniband/hw/hfi1/ |
| A D | ipoib_tx.c | 57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq() 82 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && in hfi1_ipoib_check_queue_depth() 353 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_common() local 378 tx->txq = txq; in hfi1_ipoib_send_dma_common() 450 iowait_starve_clear(txq->pkts_sent, &txq->wait); in hfi1_ipoib_submit_tx() 460 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_single() local 511 struct hfi1_ipoib_txq *txq = txp->txq; in hfi1_ipoib_send_dma_list() local 739 txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); in hfi1_ipoib_txreq_init() 854 txq->sde ? txq->sde->this_idx : 0); in hfi1_ipoib_tx_timeout() [all …]
|
| A D | trace_tx.h | 914 __entry->txq = txq; 949 TP_ARGS(txq) 955 TP_ARGS(txq) 961 TP_ARGS(txq) 967 TP_ARGS(txq) 973 TP_ARGS(txq) 979 TP_ARGS(txq) 985 TP_ARGS(txq) 991 TP_ARGS(txq) 1015 __entry->txq = tx->txq; [all …]
|
| /drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/ |
| A D | tx.c | 402 &txq->entries[txq->read_ptr].meta; in iwl_pcie_txq_unmap() 462 txq->tfds, txq->dma_addr); in iwl_pcie_txq_free() 477 memset(txq, 0, sizeof(*txq)); in iwl_pcie_txq_free() 682 txq->read_ptr, txq->write_ptr); in iwl_txq_log_scd_error() 695 txq->read_ptr, txq->write_ptr, in iwl_txq_log_scd_error() 1095 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { in iwl_pcie_cmdq_reclaim() 1100 txq->write_ptr, txq->read_ptr); in iwl_pcie_cmdq_reclaim() 1596 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) in iwl_pcie_enqueue_hcmd() 2280 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { in iwl_trans_pcie_tx() 2492 txq->read_ptr = txq->write_ptr; in iwl_pcie_set_q_ptrs() [all …]
|
| A D | tx-gen2.c | 742 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_txq_gen2_tx() 760 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_tx() 797 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) in iwl_txq_gen2_tx() 823 while (txq->write_ptr != txq->read_ptr) { in iwl_txq_gen2_unmap() 862 txq->tfds, txq->dma_addr); in iwl_txq_gen2_free_memory() 865 txq->first_tb_bufs, txq->first_tb_dma); in iwl_txq_gen2_free_memory() 871 txq->bc_tbl.addr, txq->bc_tbl.dma); in iwl_txq_gen2_free_memory() 893 txq = trans_pcie->txqs.txq[txq_id]; in iwl_txq_gen2_free() 931 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in iwl_txq_dyn_alloc_dma() 1001 trans_pcie->txqs.txq[qid] = txq; in iwl_pcie_txq_alloc_response() [all …]
|
| /drivers/net/ethernet/huawei/hinic/ |
| A D | hinic_tx.c | 533 hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame() 617 hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame() 810 netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll, in tx_request_irq() 832 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); in tx_request_irq() 866 txq->netdev = netdev; in hinic_init_txq() 867 txq->sq = sq; in hinic_init_txq() 869 txq_stats_init(txq); in hinic_init_txq() 873 txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, in hinic_init_txq() 875 if (!txq->sges) in hinic_init_txq() 878 txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, in hinic_init_txq() [all …]
|
| /drivers/net/wireless/ath/ath9k/ |
| A D | xmit.c | 134 struct ath_txq *txq = tid->txq; in ath9k_wake_tx_queue() local 239 .txq = tid->txq, in ath_tid_pull() 280 struct ath_txq *txq = tid->txq; in ath_tx_flush_tid() local 1584 txq = txtid->txq; in ath_tx_aggr_start() 1616 struct ath_txq *txq = txtid->txq; in ath_tx_aggr_stop() local 1655 txq = tid->txq; in ath_tx_aggr_wakeup() 1964 txq = &sc->tx.txq[i]; in ath_drain_all_txq() 2063 txq->axq_qnum, txq->axq_depth); in ath_tx_txqaddbuf() 2075 txq->axq_qnum, txq->axq_link, in ath_tx_txqaddbuf() 2749 txq = &sc->tx.txq[ts.qid]; in ath_tx_edma_tasklet() [all …]
|
| /drivers/net/ethernet/huawei/hinic3/ |
| A D | hinic3_tx.c | 25 struct hinic3_txq *txq; in hinic3_alloc_txqs() local 37 txq = &nic_dev->txqs[q_id]; in hinic3_alloc_txqs() 38 txq->netdev = netdev; in hinic3_alloc_txqs() 39 txq->q_id = q_id; in hinic3_alloc_txqs() 42 txq->dev = &pdev->dev; in hinic3_alloc_txqs() 64 struct hinic3_txq *txq, in hinic3_tx_map_skb() argument 506 txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs); in hinic3_send_one_skb() 510 txq->tx_start_thrs); in hinic3_send_one_skb() 532 tx_info = &txq->tx_info[pi]; in hinic3_send_one_skb() 547 txq->tx_stop_thrs, in hinic3_send_one_skb() [all …]
|
| /drivers/net/ethernet/marvell/ |
| A D | mv643xx_eth.c | 448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); in txq_to_mp() 736 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_data_tso() 814 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_put_hdr_tso() 955 if (txq->tx_curr_desc == txq->tx_ring_size) in txq_submit_skb() 1001 txq = mp->txq + queue; in mv643xx_eth_xmit() 1262 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats() local 2033 struct tx_queue *txq = mp->txq + index; in txq_init() local 2124 txq_reclaim(txq, txq->tx_ring_size, 1); in txq_deinit() 2139 txq->tso_hdrs, txq->tso_hdrs_dma); in txq_deinit() 2209 struct tx_queue *txq = mp->txq + i; in handle_link_event() local [all …]
|
| A D | mvneta.c | 135 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) argument 777 if (txq->txq_get_index == txq->size) in mvneta_txq_inc_get() 785 if (txq->txq_put_index == txq->size) in mvneta_txq_inc_put() 996 txq->next_desc_to_proc = txq->last_desc - 1; in mvneta_txq_desc_put() 1504 for (txq = 0; txq < txq_number; txq++) in mvneta_defaults_set() 1928 if (txq->count <= txq->tx_wake_threshold) in mvneta_txq_done() 2103 if (txq->count + num_frames >= txq->size) in mvneta_xdp_submit_frame() 2985 if (txq->count >= txq->tx_stop_threshold) in mvneta_tx() 3549 txq->last_desc = txq->size - 1; in mvneta_txq_sw_init() 3551 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); in mvneta_txq_sw_init() [all …]
|
| /drivers/net/ethernet/qlogic/qede/ |
| A D | qede_fp.c | 90 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt() 295 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer() 313 txq->stopped_cnt++; in qede_xdp_xmit() 329 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit() 334 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit() 427 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int() 428 txq->xmit_pkts++; in qede_xdp_tx_int() 457 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int() 458 txq->xmit_pkts++; in qede_tx_int() 1709 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit() [all …]
|
| A D | qede_main.c | 551 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, in qede_txq_fp_log_metadata() 557 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, in qede_txq_fp_log_metadata() 579 txq->index, le16_to_cpu(*txq->hw_cons_ptr), in qede_tx_log_print() 608 txq = &fp->txq[cos]; in qede_tx_timeout() 1677 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; in qede_alloc_mem_txq() 1682 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers; in qede_alloc_mem_txq() 1800 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), in qede_empty_tx_queue() 1887 struct qede_tx_queue *txq = &fp->txq[cos]; in qede_init_fp() local 2062 while (txq->sw_tx_cons != txq->sw_tx_prod) { in qede_drain_txq() 2075 txq->index, txq->sw_tx_prod, in qede_drain_txq() [all …]
|
| /drivers/net/ethernet/atheros/alx/ |
| A D | main.c | 159 if (txq->write_idx >= txq->read_idx) in alx_tpd_avail() 160 return txq->count + txq->read_idx - txq->write_idx - 1; in alx_tpd_avail() 200 alx_tpd_avail(txq) > txq->count / 4) in alx_clean_tx_irq() 765 np->txq = txq; in alx_alloc_napis() 1470 if (++txq->write_idx == txq->count) in alx_map_tx_skb() 1473 tpd = &txq->tpd[txq->write_idx]; in alx_map_tx_skb() 1494 if (++txq->write_idx == txq->count) in alx_map_tx_skb() 1496 tpd = &txq->tpd[txq->write_idx]; in alx_map_tx_skb() 1514 txq->bufs[txq->write_idx].skb = skb; in alx_map_tx_skb() 1516 if (++txq->write_idx == txq->count) in alx_map_tx_skb() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | selq.h | 30 static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix() argument 32 while (unlikely(txq >= num_channels)) in mlx5e_txq_to_ch_ix() 33 txq -= num_channels; in mlx5e_txq_to_ch_ix() 34 return txq; in mlx5e_txq_to_ch_ix() 37 static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels) in mlx5e_txq_to_ch_ix_htb() argument 39 if (unlikely(txq >= num_channels)) { in mlx5e_txq_to_ch_ix_htb() 40 if (unlikely(txq >= num_channels << 3)) in mlx5e_txq_to_ch_ix_htb() 41 txq %= num_channels; in mlx5e_txq_to_ch_ix_htb() 44 txq -= num_channels; in mlx5e_txq_to_ch_ix_htb() 45 while (txq >= num_channels); in mlx5e_txq_to_ch_ix_htb() [all …]
|
| /drivers/net/ethernet/freescale/ |
| A D | fec_main.c | 311 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 340 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; in fec_enet_get_free_txdesc_num() 365 bdp = txq->bd.base; in fec_dump() 590 bdp = txq->bd.cur; in fec_enet_txq_submit_frag_skb() 628 bdp = txq->bd.cur; in fec_enet_txq_submit_skb() 716 txq->bd.cur = bdp; in fec_enet_txq_submit_skb() 912 txq->bd.cur = bdp; in fec_enet_txq_submit_tso() 3375 txq->tso_hdrs, txq->tso_hdrs_dma); in fec_enet_free_queue() 3392 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in fec_enet_alloc_queue() 3393 if (!txq) { in fec_enet_alloc_queue() [all …]
|
| /drivers/net/wireless/intel/iwlwifi/mld/ |
| A D | tx.c | 72 u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid; in iwl_mld_allocate_txq() 125 struct ieee80211_txq *txq; in iwl_mld_add_txq_list() local 145 iwl_mld_tx_from_txq(mld, txq); in iwl_mld_add_txq_list() 197 txq->tid; in iwl_mld_remove_txq() 608 if (txq && txq->sta) in iwl_mld_get_tx_queue_id() 730 struct ieee80211_sta *sta = txq ? txq->sta : NULL; in iwl_mld_tx_mpdu() 873 struct ieee80211_txq *txq) in iwl_mld_tx_tso() argument 881 if (WARN_ON(!txq || !txq->sta)) in iwl_mld_tx_tso() 931 struct ieee80211_txq *txq) in iwl_mld_tx_tso() argument 992 txq->sta ? txq->sta->addr : zero_addr, txq->tid); in iwl_mld_tx_from_txq() [all …]
|
| /drivers/net/ethernet/chelsio/cxgb4vf/ |
| A D | sge.c | 1135 netif_tx_stop_queue(txq->txq); in txq_stop() 1263 wr = (void *)&txq->q.desc[txq->q.pidx]; in t4vf_eth_xmit() 1319 txq->tso++; in t4vf_eth_xmit() 2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb() 2141 __netif_tx_unlock(txq->txq); in sge_tx_timer_cb() 2430 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, in t4vf_sge_alloc_eth_txq() 2433 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); in t4vf_sge_alloc_eth_txq() 2482 txq->q.desc, txq->q.phys_addr); in t4vf_sge_alloc_eth_txq() 2490 txq->q.stat = (void *)&txq->q.desc[txq->q.size]; in t4vf_sge_alloc_eth_txq() 2498 txq->txq = devq; in t4vf_sge_alloc_eth_txq() [all …]
|
| /drivers/net/wireless/mediatek/mt76/ |
| A D | tx.c | 11 if (!txq->sta) in mt76_txq_get_qid() 14 return txq->ac; in mt76_txq_get_qid() 21 struct ieee80211_txq *txq; in mt76_tx_check_agg_ssn() local 30 txq = sta->txq[tid]; in mt76_tx_check_agg_ssn() 417 struct ieee80211_txq *txq = sta->txq[i]; in mt76_release_buffered_frames() local 480 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, in mt76_txq_send_burst() 502 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, in mt76_txq_send_burst() 535 if (!txq) in mt76_txq_schedule_list() 558 u8 tid = txq->tid; in mt76_txq_schedule_list() 737 struct ieee80211_txq *txq = sta->txq[i]; in mt76_stop_tx_queues() local [all …]
|
| /drivers/net/ethernet/hisilicon/ |
| A D | hisi_femac.c | 155 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_xmit_reclaim() local 163 skb = txq->skb[txq->tail]; in hisi_femac_xmit_reclaim() 177 txq->skb[txq->tail] = NULL; in hisi_femac_xmit_reclaim() 178 txq->tail = (txq->tail + 1) % txq->num; in hisi_femac_xmit_reclaim() 387 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_free_skb_rings() local 412 pos = txq->tail; in hisi_femac_free_skb_rings() 503 struct hisi_femac_queue *txq = &priv->txq; in hisi_femac_net_xmit() local 517 if (unlikely(!CIRC_SPACE(txq->head, txq->tail, in hisi_femac_net_xmit() 533 txq->dma_phys[txq->head] = addr; in hisi_femac_net_xmit() 535 txq->skb[txq->head] = skb; in hisi_femac_net_xmit() [all …]
|
| /drivers/net/ethernet/intel/idpf/ |
| A D | idpf_txrx.h | 117 #define IDPF_DESC_UNUSED(txq) \ argument 118 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ 119 (txq)->next_to_clean - (txq)->next_to_use - 1) 121 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) argument 122 #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ argument 123 (txq)->desc_count >> 2) 129 #define IDPF_TX_COMPLQ_PENDING(txq) \ argument 130 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \ 132 (txq)->num_completions_pending - (txq)->complq->num_completions) 137 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ [all …]
|
| A D | idpf_txrx.c | 83 .dev = txq->dev, in idpf_tx_buf_rel_all() 90 if (!txq->tx_buf) in idpf_tx_buf_rel_all() 97 kfree(txq->tx_buf); in idpf_tx_buf_rel_all() 98 txq->tx_buf = NULL; in idpf_tx_buf_rel_all() 137 netdev_tx_reset_subqueue(txq->netdev, txq->idx); in idpf_tx_desc_rel() 139 if (!txq->desc_ring) in idpf_tx_desc_rel() 142 dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); in idpf_tx_desc_rel() 144 txq->next_to_use = 0; in idpf_tx_desc_rel() 1716 .dev = txq->dev, in idpf_tx_clean_stashed_bufs() 2361 txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); in idpf_tx_splitq_bump_ntu() [all …]
|
| /drivers/net/wireless/ath/ath5k/ |
| A D | base.c | 877 txq->txq_len++; in ath5k_txbuf_setup() 1056 if (!txq->setup) { in ath5k_txq_setup() 1057 txq->qnum = qnum; in ath5k_txq_setup() 1058 txq->link = NULL; in ath5k_txq_setup() 1062 txq->txq_len = 0; in ath5k_txq_setup() 1173 txq->txq_len--; in ath5k_drain_tx_buffs() 1190 if (txq->setup) { in ath5k_txq_release() 1676 if (txq->txq_len >= txq->txq_max && in ath5k_tx_queue() 1822 txq->txq_len--; in ath5k_tx_processq() 1827 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4) in ath5k_tx_processq() [all …]
|
| /drivers/net/ethernet/chelsio/cxgb3/ |
| A D | sge.c | 691 if (q->txq[i].desc) { in t3_free_qset() 701 q->txq[i].size * in t3_free_qset() 703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset() 1282 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit() 2256 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in check_ring_db() local 2258 if (txq->cleaned + txq->in_use != txq->processed && in check_ring_db() 2267 struct sge_txq *txq = &qs->txq[TXQ_OFLD]; in check_ring_db() local 2269 if (txq->cleaned + txq->in_use != txq->processed && in check_ring_db() 3064 if (!q->txq[i].desc) in t3_sge_alloc_qset() 3067 q->txq[i].gen = 1; in t3_sge_alloc_qset() [all …]
|
| /drivers/net/ethernet/fungible/funeth/ |
| A D | funeth_trace.h | 15 TP_PROTO(const struct funeth_txq *txq, 20 TP_ARGS(txq, len, sqe_idx, ngle), 27 __string(devname, txq->netdev->name) 31 __entry->qidx = txq->qidx; 45 TP_PROTO(const struct funeth_txq *txq, 50 TP_ARGS(txq, sqe_idx, num_sqes, hw_head), 57 __string(devname, txq->netdev->name) 61 __entry->qidx = txq->qidx;
|
| /drivers/net/wireless/intel/iwlegacy/ |
| A D | common.c | 2752 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_unmap() local 2776 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_free() local 2791 txq->tfds, txq->q.dma_addr); in il_tx_queue_free() 2804 memset(txq, 0, sizeof(*txq)); in il_tx_queue_free() 2871 txq->tfds, txq->q.dma_addr); in il_cmd_queue_free() 2880 memset(txq, 0, sizeof(*txq)); in il_cmd_queue_free() 3008 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_init() local 3031 if (!txq->meta || !txq->cmd) in il_tx_queue_init() 3084 struct il_tx_queue *txq = &il->txq[txq_id]; in il_tx_queue_reset() local 3251 struct il_tx_queue *txq = &il->txq[txq_id]; in il_hcmd_queue_reclaim() local [all …]
|