Lines Matching refs:wcid

65 		struct mt76_wcid *wcid;  in mt76_tx_status_unlock()  local
67 wcid = rcu_dereference(dev->wcid[cb->wcid]); in mt76_tx_status_unlock()
68 if (wcid) { in mt76_tx_status_unlock()
69 status.sta = wcid_to_sta(wcid); in mt76_tx_status_unlock()
70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { in mt76_tx_status_unlock()
71 rs.rate_idx = wcid->rate; in mt76_tx_status_unlock()
119 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, in mt76_tx_status_skb_add() argument
128 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) in mt76_tx_status_skb_add()
140 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, in mt76_tx_status_skb_add()
147 cb->wcid = wcid->idx; in mt76_tx_status_skb_add()
150 if (list_empty(&wcid->list)) in mt76_tx_status_skb_add()
151 list_add_tail(&wcid->list, &dev->wcid_list); in mt76_tx_status_skb_add()
161 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, in mt76_tx_status_skb_get() argument
169 skb = idr_remove(&wcid->pktid, pktid); in mt76_tx_status_skb_get()
174 idr_for_each_entry(&wcid->pktid, skb, id) { in mt76_tx_status_skb_get()
189 idr_remove(&wcid->pktid, cb->pktid); in mt76_tx_status_skb_get()
195 if (idr_is_empty(&wcid->pktid)) in mt76_tx_status_skb_get()
196 list_del_init(&wcid->list); in mt76_tx_status_skb_get()
205 struct mt76_wcid *wcid, *tmp; in mt76_tx_status_check() local
209 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) in mt76_tx_status_check()
210 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); in mt76_tx_status_check()
216 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, in mt76_tx_check_non_aql() argument
222 if (!wcid || info->tx_time_est) in mt76_tx_check_non_aql()
225 pending = atomic_dec_return(&wcid->non_aql_packets); in mt76_tx_check_non_aql()
227 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); in mt76_tx_check_non_aql()
238 struct mt76_wcid *wcid = NULL; in __mt76_tx_complete_skb() local
244 if (wcid_idx < ARRAY_SIZE(dev->wcid)) in __mt76_tx_complete_skb()
245 wcid = rcu_dereference(dev->wcid[wcid_idx]); in __mt76_tx_complete_skb()
247 mt76_tx_check_non_aql(dev, wcid, skb); in __mt76_tx_complete_skb()
265 status.sta = wcid_to_sta(wcid); in __mt76_tx_complete_skb()
282 struct mt76_wcid *wcid, struct ieee80211_sta *sta, in __mt76_tx_queue_skb() argument
293 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); in __mt76_tx_queue_skb()
297 wcid = (struct mt76_wcid *)sta->drv_priv; in __mt76_tx_queue_skb()
298 q->entry[idx].wcid = wcid->idx; in __mt76_tx_queue_skb()
303 pending = atomic_inc_return(&wcid->non_aql_packets); in __mt76_tx_queue_skb()
312 struct mt76_wcid *wcid, struct sk_buff *skb) in mt76_tx() argument
337 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_tx()
345 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); in mt76_tx()
372 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; in mt76_queue_ps_skb() local
381 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); in mt76_queue_ps_skb()
438 struct mt76_txq *mtxq, struct mt76_wcid *wcid) in mt76_txq_send_burst() argument
449 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) in mt76_txq_send_burst()
452 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) in mt76_txq_send_burst()
460 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_txq_send_burst()
465 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); in mt76_txq_send_burst()
482 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_txq_send_burst()
487 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); in mt76_txq_send_burst()
509 struct mt76_wcid *wcid; in mt76_txq_schedule_list() local
528 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); in mt76_txq_schedule_list()
529 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) in mt76_txq_schedule_list()
544 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); in mt76_txq_schedule_list()