| /net/sched/ |
| A D | sch_generic.c | 88 txq = skb_get_tx_queue(txq->dev, skb); in __skb_dequeue_bad_txq() 100 qdisc_maybe_clear_missed(q, txq); in __skb_dequeue_bad_txq() 261 txq = skb_get_tx_queue(txq->dev, skb); in dequeue_skb() 348 HARD_TX_UNLOCK(dev, txq); in sch_direct_xmit() 393 struct netdev_queue *txq; in qdisc_restart() local 461 __netif_tx_lock(txq, cpu); in netif_freeze_queues() 463 __netif_tx_unlock(txq); in netif_freeze_queues() 486 netif_schedule_queue(txq); in netif_unfreeze_queues() 513 struct netdev_queue *txq; in dev_watchdog() local 516 if (!netif_xmit_stopped(txq)) in dev_watchdog() [all …]
|
| A D | sch_teql.c | 148 struct netdev_queue *txq; in teql_destroy() local 151 txq = netdev_get_tx_queue(master->dev, 0); in teql_destroy() 154 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); in teql_destroy() 156 qdisc_reset(rtnl_dereference(txq->qdisc)); in teql_destroy() 219 struct net_device *dev, struct netdev_queue *txq, in __teql_resolve() argument 259 struct netdev_queue *txq) in teql_resolve() argument 264 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) in teql_resolve() 271 res = __teql_resolve(skb, skb_res, dev, txq, dst); in teql_resolve()
|
| A D | sch_taprio.c | 705 static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, in taprio_dequeue_from_txq() argument 711 struct Qdisc *child = q->qdiscs[txq]; in taprio_dequeue_from_txq() 762 static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq) in taprio_next_tc_txq() argument 767 (*txq)++; in taprio_next_tc_txq() 768 if (*txq == offset + count) in taprio_next_tc_txq() 769 *txq = offset; in taprio_next_tc_txq()
|
| /net/core/ |
| A D | netpoll.c | 63 struct netdev_queue *txq) in netpoll_start_xmit() argument 97 struct netdev_queue *txq; in queue_process() local 116 skb_queue_head(&npinfo->txq, skb); in queue_process() 117 HARD_TX_UNLOCK(dev, txq); in queue_process() 123 HARD_TX_UNLOCK(dev, txq); in queue_process() 335 struct netdev_queue *txq; in __netpoll_send_skb() local 342 if (HARD_TX_TRYLOCK(dev, txq)) { in __netpoll_send_skb() 343 if (!netif_xmit_stopped(txq)) in __netpoll_send_skb() 346 HARD_TX_UNLOCK(dev, txq); in __netpoll_send_skb() 798 skb_queue_purge(&npinfo->txq); in rcu_cleanup_netpoll_info() [all …]
|
| A D | dev.c | 3053 if (txq->sb_dev) in netdev_unbind_all_sb_channels() 3114 txq->sb_dev = NULL; in netdev_unbind_sb_channel() 3176 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues() 3184 txq); in netif_set_real_num_tx_queues() 3258 if (txq < 1 || txq > dev->num_tx_queues || in netif_set_real_num_queues() 4672 if (!txq) in __dev_queue_xmit() 4749 struct netdev_queue *txq; in __dev_direct_xmit() local 4770 HARD_TX_UNLOCK(dev, txq); in __dev_direct_xmit() 5406 struct netdev_queue *txq; in generic_xdp_tx() local 5418 HARD_TX_UNLOCK(dev, txq); in generic_xdp_tx() [all …]
|
| A D | dev.h | 76 unsigned int txq); 80 unsigned int txq) {} in net_shaper_set_real_num_tx_queues() argument
|
| A D | netdev-genl.c | 395 struct netdev_queue *txq; in netdev_nl_queue_fill_one() local 425 txq = netdev_get_tx_queue(netdev, q_idx); in netdev_nl_queue_fill_one() 426 if (nla_put_napi_id(rsp, txq->napi)) in netdev_nl_queue_fill_one() 429 if (txq->pool) in netdev_nl_queue_fill_one()
|
| A D | pktgen.c | 3515 struct netdev_queue *txq; in pktgen_xmit() local 3634 txq = skb_get_tx_queue(odev, pkt_dev->skb); in pktgen_xmit() 3638 HARD_TX_LOCK(odev, txq, smp_processor_id()); in pktgen_xmit() 3640 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { in pktgen_xmit() 3648 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); in pktgen_xmit() 3659 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) in pktgen_xmit() 3681 HARD_TX_UNLOCK(odev, txq); in pktgen_xmit()
|
| /net/mac80211/ |
| A D | tx.c | 1315 txq = sta->sta.txq[IEEE80211_NUM_TIDS]; in ieee80211_get_txq() 1323 txq = sta->sta.txq[tid]; in ieee80211_get_txq() 1325 txq = vif->txq; in ieee80211_get_txq() 1328 if (!txq) in ieee80211_get_txq() 1508 sdata->vif.txq = &txqi->txq; in ieee80211_txq_init() 1533 sta->sta.txq[tid] = &txqi->txq; in ieee80211_txq_init() 3400 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_amsdu_aggregate() local 3428 if (!txq) in ieee80211_amsdu_aggregate() 3801 struct txq_info *txqi = container_of(txq, struct txq_info, txq); in ieee80211_tx_dequeue() 3861 if (txq->sta) { in ieee80211_tx_dequeue() [all …]
|
| A D | agg-tx.c | 183 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_stop_txq() local 188 if (!txq) in ieee80211_agg_stop_txq() 191 txqi = to_txq_info(txq); in ieee80211_agg_stop_txq() 192 sdata = vif_to_sdata(txq->vif); in ieee80211_agg_stop_txq() 204 struct ieee80211_txq *txq = sta->sta.txq[tid]; in ieee80211_agg_start_txq() local 209 if (!txq) in ieee80211_agg_start_txq() 212 txqi = to_txq_info(txq); in ieee80211_agg_start_txq() 975 struct ieee80211_txq *txq; in ieee80211_process_addba_resp() local 994 txq = sta->sta.txq[tid]; in ieee80211_process_addba_resp() 995 if (!amsdu && txq) in ieee80211_process_addba_resp() [all …]
|
| A D | sta_info.c | 120 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in ieee80211_purge_sta_txqs() 123 if (!sta->sta.txq[i]) in ieee80211_purge_sta_txqs() 126 txqi = to_txq_info(sta->sta.txq[i]); in ieee80211_purge_sta_txqs() 486 kfree(to_txq_info(sta->sta.txq[0])); in sta_info_free() 686 struct txq_info *txq = txq_data + i * size; in __sta_info_alloc() local 689 ieee80211_txq_init(sdata, sta, txq, i); in __sta_info_alloc() 758 kfree(to_txq_info(sta->sta.txq[0])); in __sta_info_alloc() 1796 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) in ieee80211_sta_ps_deliver_wakeup() 2215 if (!sta->sta.txq[tid] || in ieee80211_sta_ps_deliver_response() 2217 txq_has_queue(sta->sta.txq[tid])) in ieee80211_sta_ps_deliver_response() [all …]
|
| A D | util.c | 304 struct ieee80211_txq *txq) in ieee80211_handle_wake_tx_queue() argument 313 ieee80211_txq_schedule_start(hw, txq->ac); in ieee80211_handle_wake_tx_queue() 318 ieee80211_txq_schedule_end(hw, txq->ac); in ieee80211_handle_wake_tx_queue() 346 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in __ieee80211_wake_txqs() 347 struct ieee80211_txq *txq = sta->sta.txq[i]; in __ieee80211_wake_txqs() local 349 if (!txq) in __ieee80211_wake_txqs() 352 txqi = to_txq_info(txq); in __ieee80211_wake_txqs() 354 if (ac != txq->ac) in __ieee80211_wake_txqs() 367 if (!vif->txq) in __ieee80211_wake_txqs() 370 txqi = to_txq_info(vif->txq); in __ieee80211_wake_txqs() [all …]
|
| A D | driver-ops.h | 1380 struct txq_info *txq) in drv_wake_tx_queue() argument 1382 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); in drv_wake_tx_queue() 1386 set_bit(IEEE80211_TXQ_DIRTY, &txq->flags); in drv_wake_tx_queue() 1393 trace_drv_wake_tx_queue(local, sdata, txq); in drv_wake_tx_queue() 1394 local->ops->wake_tx_queue(&local->hw, &txq->txq); in drv_wake_tx_queue() 1400 ieee80211_schedule_txq(&local->hw, &txqi->txq); in schedule_and_wake_txq()
|
| A D | tdls.c | 278 struct ieee80211_tx_queue_params *txq; in ieee80211_tdls_add_wmm_param_ie() local 299 txq = &sdata->deflink.tx_conf[ieee80211_ac_from_wmm(i)]; in ieee80211_tdls_add_wmm_param_ie() 300 wmm->ac[i].aci_aifsn = ieee80211_wmm_aci_aifsn(txq->aifs, in ieee80211_tdls_add_wmm_param_ie() 301 txq->acm, i); in ieee80211_tdls_add_wmm_param_ie() 302 wmm->ac[i].cw = ieee80211_wmm_ecw(txq->cw_min, txq->cw_max); in ieee80211_tdls_add_wmm_param_ie() 303 wmm->ac[i].txop_limit = cpu_to_le16(txq->txop); in ieee80211_tdls_add_wmm_param_ie()
|
| A D | debugfs_sta.c | 157 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { in sta_aqm_read() 158 if (!sta->sta.txq[i]) in sta_aqm_read() 160 txqi = to_txq_info(sta->sta.txq[i]); in sta_aqm_read() 163 txqi->txq.tid, in sta_aqm_read() 164 txqi->txq.ac, in sta_aqm_read()
|
| A D | ieee80211_i.h | 966 struct ieee80211_txq txq; member 1871 static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq) in to_txq_info() argument 1873 return container_of(txq, struct txq_info, txq); in to_txq_info() 1876 static inline bool txq_has_queue(struct ieee80211_txq *txq) in txq_has_queue() argument 1878 struct txq_info *txqi = to_txq_info(txq); in txq_has_queue() 2597 struct txq_info *txq, int tid);
|
| A D | trace.h | 2378 struct txq_info *txq), 2380 TP_ARGS(local, sdata, txq), 2391 struct ieee80211_sta *sta = txq->txq.sta; 2396 __entry->ac = txq->txq.ac; 2397 __entry->tid = txq->txq.tid;
|
| A D | ht.c | 394 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); in ieee80211_ba_session_work() 396 vif_to_sdata(txqi->txq.vif); in ieee80211_ba_session_work()
|
| A D | debugfs_netdev.c | 622 if (!sdata->vif.txq) in ieee80211_if_fmt_aqm() 625 txqi = to_txq_info(sdata->vif.txq); in ieee80211_if_fmt_aqm() 634 txqi->txq.ac, in ieee80211_if_fmt_aqm()
|
| A D | iface.c | 665 if (sdata->vif.txq) in ieee80211_do_stop() 666 ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); in ieee80211_do_stop() 2326 if (sdata->vif.txq) in ieee80211_if_remove() 2327 ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); in ieee80211_if_remove()
|
| /net/xfrm/ |
| A D | xfrm_device.c | 471 struct netdev_queue *txq; in xfrm_dev_resume() local 476 txq = netdev_core_pick_tx(dev, skb, NULL); in xfrm_dev_resume() 478 HARD_TX_LOCK(dev, txq, smp_processor_id()); in xfrm_dev_resume() 479 if (!netif_xmit_frozen_or_stopped(txq)) in xfrm_dev_resume() 480 skb = dev_hard_start_xmit(skb, dev, txq, &ret); in xfrm_dev_resume() 481 HARD_TX_UNLOCK(dev, txq); in xfrm_dev_resume()
|
| /net/caif/ |
| A D | caif_dev.c | 174 struct netdev_queue *txq; in transmit() local 194 txq = netdev_get_tx_queue(skb->dev, 0); in transmit() 195 sch = rcu_dereference_bh(txq->qdisc); in transmit()
|
| /net/tipc/ |
| A D | msg.c | 206 int mss, struct sk_buff_head *txq) in tipc_msg_append() argument 213 skb = skb_peek_tail(txq); in tipc_msg_append() 228 __skb_queue_tail(txq, skb); in tipc_msg_append()
|
| A D | socket.c | 1272 struct sk_buff_head *txq = &tsk->sk.sk_write_queue; in tipc_sk_push_backlog() local 1273 struct sk_buff *skb = skb_peek_tail(txq); in tipc_sk_push_backlog() 1279 tsk->pkt_cnt += skb_queue_len(txq); in tipc_sk_push_backlog() 1309 tsk->pkt_cnt += skb_queue_len(txq); in tipc_sk_push_backlog() 1312 rc = tipc_node_xmit(net, txq, dnode, tsk->portid); in tipc_sk_push_backlog() 1557 struct sk_buff_head *txq = &sk->sk_write_queue; in __tipc_sendstream() local 1592 rc = tipc_msg_append(hdr, m, send, maxnagle, txq); in __tipc_sendstream() 1602 tsk->pkt_cnt += skb_queue_len(txq); in __tipc_sendstream() 1604 skb = skb_peek_tail(txq); in __tipc_sendstream() 1620 trace_tipc_sk_sendstream(sk, skb_peek(txq), in __tipc_sendstream() [all …]
|
| /net/batman-adv/ |
| A D | mesh-interface.c | 714 struct netdev_queue *txq, in batadv_set_lockdep_class_one() argument 717 lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); in batadv_set_lockdep_class_one()
|