Lines Matching refs:txq

2607 static void netif_setup_tc(struct net_device *dev, unsigned int txq)  in netif_setup_tc()  argument
2613 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2624 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2632 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) in netdev_txq_to_tc() argument
2640 if ((txq - tc->offset) < tc->count) in netdev_txq_to_tc()
3049 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_all_sb_channels() local
3052 while (txq-- != &dev->_tx[0]) { in netdev_unbind_all_sb_channels()
3053 if (txq->sb_dev) in netdev_unbind_all_sb_channels()
3054 netdev_unbind_sb_channel(dev, txq->sb_dev); in netdev_unbind_all_sb_channels()
3104 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_sb_channel() local
3112 while (txq-- != &dev->_tx[0]) { in netdev_unbind_sb_channel()
3113 if (txq->sb_dev == sb_dev) in netdev_unbind_sb_channel()
3114 txq->sb_dev = NULL; in netdev_unbind_sb_channel()
3169 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) in netif_set_real_num_tx_queues() argument
3174 disabling = txq < dev->real_num_tx_queues; in netif_set_real_num_tx_queues()
3176 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
3184 txq); in netif_set_real_num_tx_queues()
3189 netif_setup_tc(dev, txq); in netif_set_real_num_tx_queues()
3191 net_shaper_set_real_num_tx_queues(dev, txq); in netif_set_real_num_tx_queues()
3193 dev_qdisc_change_real_num_tx(dev, txq); in netif_set_real_num_tx_queues()
3195 dev->real_num_tx_queues = txq; in netif_set_real_num_tx_queues()
3199 qdisc_reset_all_tx_gt(dev, txq); in netif_set_real_num_tx_queues()
3201 netif_reset_xps_queues_gt(dev, txq); in netif_set_real_num_tx_queues()
3205 dev->real_num_tx_queues = txq; in netif_set_real_num_tx_queues()
3253 unsigned int txq, unsigned int rxq) in netif_set_real_num_queues() argument
3258 if (txq < 1 || txq > dev->num_tx_queues || in netif_set_real_num_queues()
3270 if (txq > dev->real_num_tx_queues) { in netif_set_real_num_queues()
3271 err = netif_set_real_num_tx_queues(dev, txq); in netif_set_real_num_queues()
3277 if (txq < dev->real_num_tx_queues) in netif_set_real_num_queues()
3278 WARN_ON(netif_set_real_num_tx_queues(dev, txq)); in netif_set_real_num_queues()
3390 void netif_schedule_queue(struct netdev_queue *txq) in netif_schedule_queue() argument
3393 if (!netif_xmit_stopped(txq)) { in netif_schedule_queue()
3394 struct Qdisc *q = rcu_dereference(txq->qdisc); in netif_schedule_queue()
3817 struct netdev_queue *txq, bool more) in xmit_one() argument
3827 rc = netdev_start_xmit(skb, dev, txq, more); in xmit_one()
3834 struct netdev_queue *txq, int *ret) in dev_hard_start_xmit() argument
3843 rc = xmit_one(skb, dev, txq, next != NULL); in dev_hard_start_xmit()
3850 if (netif_tx_queue_stopped(txq) && skb) { in dev_hard_start_xmit()
4064 struct netdev_queue *txq) in dev_qdisc_enqueue() argument
4070 trace_qdisc_enqueue(q, txq, skb); in dev_qdisc_enqueue()
4076 struct netdev_queue *txq) in __dev_xmit_skb() argument
4094 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); in __dev_xmit_skb()
4102 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) && in __dev_xmit_skb()
4110 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); in __dev_xmit_skb()
4152 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { in __dev_xmit_skb()
4164 rc = dev_qdisc_enqueue(skb, q, &to_free, txq); in __dev_xmit_skb()
4623 struct netdev_queue *txq = NULL; in __dev_queue_xmit() local
4661 txq = netdev_tx_queue_mapping(dev, skb); in __dev_queue_xmit()
4672 if (!txq) in __dev_queue_xmit()
4673 txq = netdev_core_pick_tx(dev, skb, sb_dev); in __dev_queue_xmit()
4675 q = rcu_dereference_bh(txq->qdisc); in __dev_queue_xmit()
4679 rc = __dev_xmit_skb(skb, q, dev, txq); in __dev_queue_xmit()
4701 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { in __dev_queue_xmit()
4709 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit()
4711 if (!netif_xmit_stopped(txq)) { in __dev_queue_xmit()
4713 skb = dev_hard_start_xmit(skb, dev, txq, &rc); in __dev_queue_xmit()
4716 HARD_TX_UNLOCK(dev, txq); in __dev_queue_xmit()
4720 HARD_TX_UNLOCK(dev, txq); in __dev_queue_xmit()
4749 struct netdev_queue *txq; in __dev_direct_xmit() local
4762 txq = skb_get_tx_queue(dev, skb); in __dev_direct_xmit()
4767 HARD_TX_LOCK(dev, txq, smp_processor_id()); in __dev_direct_xmit()
4768 if (!netif_xmit_frozen_or_drv_stopped(txq)) in __dev_direct_xmit()
4769 ret = netdev_start_xmit(skb, dev, txq, false); in __dev_direct_xmit()
4770 HARD_TX_UNLOCK(dev, txq); in __dev_direct_xmit()
5406 struct netdev_queue *txq; in generic_xdp_tx() local
5410 txq = netdev_core_pick_tx(dev, skb, NULL); in generic_xdp_tx()
5412 HARD_TX_LOCK(dev, txq, cpu); in generic_xdp_tx()
5413 if (!netif_xmit_frozen_or_drv_stopped(txq)) { in generic_xdp_tx()
5414 rc = netdev_start_xmit(skb, dev, txq, 0); in generic_xdp_tx()
5418 HARD_TX_UNLOCK(dev, txq); in generic_xdp_tx()
7063 struct netdev_queue *txq; in netif_queue_set_napi() local
7075 txq = netdev_get_tx_queue(dev, queue_index); in netif_queue_set_napi()
7076 txq->napi = napi; in netif_queue_set_napi()
11008 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_stop_all_queues() local
11010 netif_tx_stop_queue(txq); in netif_tx_stop_all_queues()