| /net/sched/ |
| A D | sch_multiq.c | 25 struct Qdisc **queues; member 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 166 qdisc_put(q->queues[band]); in multiq_destroy() 168 kfree(q->queues); in multiq_destroy() 220 old = q->queues[i]; in multiq_tune() 221 q->queues[i] = child; in multiq_tune() 241 q->queues = NULL; in multiq_init() 253 if (!q->queues) in multiq_init() 299 return q->queues[band]; in multiq_leaf() [all …]
|
| A D | sch_prio.c | 26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 137 qdisc_reset(q->queues[prio]); in prio_reset() 173 qdisc_put(q->queues[prio]); in prio_destroy() 201 if (!queues[i]) { in prio_tune() 203 qdisc_put(queues[--i]); in prio_tune() 217 q->queues[i] = queues[i]; in prio_tune() 225 qdisc_put(q->queues[i]); in prio_tune() 322 return q->queues[band]; in prio_leaf() [all …]
|
| A D | sch_ets.c | 585 struct Qdisc *queues[TCQ_ETS_MAX_BANDS]; in ets_qdisc_change() local 642 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in ets_qdisc_change() 645 if (!queues[i]) { in ets_qdisc_change() 647 qdisc_put(queues[--i]); in ets_qdisc_change() 674 q->classes[i].qdisc = queues[i]; in ets_qdisc_change()
|
| A D | Kconfig | 84 to support devices that have multiple hardware transmit queues.
|
| /net/mac80211/ |
| A D | util.c | 440 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue() 506 if (WARN_ON(queue >= hw->queues)) in __ieee80211_stop_queue() 588 for (i = 0; i < hw->queues; i++) in ieee80211_add_pending_skbs() 596 unsigned long queues, in ieee80211_stop_queues_by_reason() argument 606 for_each_set_bit(i, &queues, hw->queues) in ieee80211_stop_queues_by_reason() 638 unsigned long queues, in ieee80211_wake_queues_by_reason() argument 648 for_each_set_bit(i, &queues, hw->queues) in ieee80211_wake_queues_by_reason() 666 unsigned int queues; in ieee80211_get_vif_queues() local 671 queues = 0; in ieee80211_get_vif_queues() 680 queues = BIT(local->hw.queues) - 1; in ieee80211_get_vif_queues() [all …]
|
| A D | debugfs.c | 608 for (q = 0; q < local->hw.queues; q++) in queues_read() 617 DEBUGFS_READONLY_FILE_OPS(queues); 688 DEBUGFS_ADD(queues); in debugfs_hw_add()
|
| A D | main.c | 937 local->hw.queues = 1; in ieee80211_alloc_hw_nm() 1119 local->hw.offchannel_tx_hw_queue >= local->hw.queues)) in ieee80211_register_hw() 1307 if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS && in ieee80211_register_hw() 1472 if (hw->queues > IEEE80211_MAX_QUEUES) in ieee80211_register_hw() 1473 hw->queues = IEEE80211_MAX_QUEUES; in ieee80211_register_hw()
|
| A D | wme.c | 129 local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_select_queue_80211()
|
| A D | trace.h | 1262 u32 queues, bool drop), 1264 TP_ARGS(local, queues, drop), 1269 __field(u32, queues) 1275 __entry->queues = queues; 1280 LOCAL_PR_ARG, __entry->queues, __entry->drop
|
| A D | driver-ops.h | 719 u32 queues, bool drop) in drv_flush() argument 732 trace_drv_flush(local, queues, drop); in drv_flush() 734 local->ops->flush(&local->hw, vif, queues, drop); in drv_flush()
|
| A D | ieee80211_i.h | 2495 unsigned long queues, 2499 unsigned long queues, 2553 unsigned int queues, bool drop);
|
| A D | iface.c | 402 int n_queues = sdata->local->hw.queues; in ieee80211_check_queues() 899 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_monitor_select_queue() 1140 else if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_set_default_queues()
|
| A D | tx.c | 1676 if (WARN_ON_ONCE(q >= local->hw.queues)) { in ieee80211_tx_frags() 4776 for (i = 0; i < local->hw.queues; i++) { in ieee80211_clear_tx_pending() 4850 for (i = 0; i < local->hw.queues; i++) { in ieee80211_tx_pending() 6049 u32 queues; in ieee80211_reserve_tid() local 6090 queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); in ieee80211_reserve_tid() 6091 __ieee80211_flush_queues(local, sdata, queues, false); in ieee80211_reserve_tid() 6296 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_tx_control_port()
|
| A D | ibss.c | 198 if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_ibss_build_presp() 1007 local->hw.queues >= IEEE80211_NUM_ACS) { in ieee80211_update_sta_info()
|
| A D | tdls.c | 409 if (local->hw.queues >= IEEE80211_NUM_ACS && in ieee80211_tdls_add_setup_start_ies() 630 if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) in ieee80211_tdls_add_setup_cfm_ies()
|
| A D | mlme.c | 3431 for (q = 0; q < local->hw.queues; q++) { in ieee80211_dynamic_ps_enable_work() 3500 if (local->hw.queues < IEEE80211_NUM_ACS) in __ieee80211_sta_handle_tspec_ac_params() 3631 if (local->hw.queues < IEEE80211_NUM_ACS) in _ieee80211_sta_wmm_params() 6286 local->hw.queues >= IEEE80211_NUM_ACS; in ieee80211_assoc_success() 9260 wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS); in ieee80211_mgd_auth() 9705 (local->hw.queues >= IEEE80211_NUM_ACS); in ieee80211_mgd_assoc()
|
| A D | cfg.c | 2112 local->hw.queues >= IEEE80211_NUM_ACS) in sta_apply_parameters() 2950 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_set_txq_params()
|
| /net/core/ |
| A D | dev.c | 2670 if (map->queues[pos] != index) in remove_xps_queue() 2674 map->queues[pos] = map->queues[--map->len]; in remove_xps_queue() 2777 if (map->queues[pos] != index) in expand_xps_map() 2802 new_map->queues[i] = map->queues[i]; in expand_xps_map() 2927 while ((pos < map->len) && (map->queues[pos] != index)) in __netif_set_xps_queue() 2931 map->queues[map->len++] = index; in __netif_set_xps_queue() 4484 queue_index = map->queues[0]; in __get_xps_queue_idx() 4486 queue_index = map->queues[reciprocal_scale( in __get_xps_queue_idx()
|
| A D | net-sysfs.c | 1746 if (map->queues[i] == index) { in xps_queue_show()
|
| /net/ipv4/ |
| A D | Kconfig | 673 maximize network utilization and minimize queues. It builds an explicit
|
| /net/netfilter/ |
| A D | Kconfig | 1023 As opposed to QUEUE, it supports 65535 different queues,
|