1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include <net/page_pool.h>
8 #include "mt76.h"
9
10 #define CHAN2G(_idx, _freq) { \
11 .band = NL80211_BAND_2GHZ, \
12 .center_freq = (_freq), \
13 .hw_value = (_idx), \
14 .max_power = 30, \
15 }
16
17 #define CHAN5G(_idx, _freq) { \
18 .band = NL80211_BAND_5GHZ, \
19 .center_freq = (_freq), \
20 .hw_value = (_idx), \
21 .max_power = 30, \
22 }
23
24 #define CHAN6G(_idx, _freq) { \
25 .band = NL80211_BAND_6GHZ, \
26 .center_freq = (_freq), \
27 .hw_value = (_idx), \
28 .max_power = 30, \
29 }
30
31 static const struct ieee80211_channel mt76_channels_2ghz[] = {
32 CHAN2G(1, 2412),
33 CHAN2G(2, 2417),
34 CHAN2G(3, 2422),
35 CHAN2G(4, 2427),
36 CHAN2G(5, 2432),
37 CHAN2G(6, 2437),
38 CHAN2G(7, 2442),
39 CHAN2G(8, 2447),
40 CHAN2G(9, 2452),
41 CHAN2G(10, 2457),
42 CHAN2G(11, 2462),
43 CHAN2G(12, 2467),
44 CHAN2G(13, 2472),
45 CHAN2G(14, 2484),
46 };
47
48 static const struct ieee80211_channel mt76_channels_5ghz[] = {
49 CHAN5G(36, 5180),
50 CHAN5G(40, 5200),
51 CHAN5G(44, 5220),
52 CHAN5G(48, 5240),
53
54 CHAN5G(52, 5260),
55 CHAN5G(56, 5280),
56 CHAN5G(60, 5300),
57 CHAN5G(64, 5320),
58
59 CHAN5G(100, 5500),
60 CHAN5G(104, 5520),
61 CHAN5G(108, 5540),
62 CHAN5G(112, 5560),
63 CHAN5G(116, 5580),
64 CHAN5G(120, 5600),
65 CHAN5G(124, 5620),
66 CHAN5G(128, 5640),
67 CHAN5G(132, 5660),
68 CHAN5G(136, 5680),
69 CHAN5G(140, 5700),
70 CHAN5G(144, 5720),
71
72 CHAN5G(149, 5745),
73 CHAN5G(153, 5765),
74 CHAN5G(157, 5785),
75 CHAN5G(161, 5805),
76 CHAN5G(165, 5825),
77 CHAN5G(169, 5845),
78 CHAN5G(173, 5865),
79 };
80
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 /* UNII-5 */
83 CHAN6G(1, 5955),
84 CHAN6G(5, 5975),
85 CHAN6G(9, 5995),
86 CHAN6G(13, 6015),
87 CHAN6G(17, 6035),
88 CHAN6G(21, 6055),
89 CHAN6G(25, 6075),
90 CHAN6G(29, 6095),
91 CHAN6G(33, 6115),
92 CHAN6G(37, 6135),
93 CHAN6G(41, 6155),
94 CHAN6G(45, 6175),
95 CHAN6G(49, 6195),
96 CHAN6G(53, 6215),
97 CHAN6G(57, 6235),
98 CHAN6G(61, 6255),
99 CHAN6G(65, 6275),
100 CHAN6G(69, 6295),
101 CHAN6G(73, 6315),
102 CHAN6G(77, 6335),
103 CHAN6G(81, 6355),
104 CHAN6G(85, 6375),
105 CHAN6G(89, 6395),
106 CHAN6G(93, 6415),
107 /* UNII-6 */
108 CHAN6G(97, 6435),
109 CHAN6G(101, 6455),
110 CHAN6G(105, 6475),
111 CHAN6G(109, 6495),
112 CHAN6G(113, 6515),
113 CHAN6G(117, 6535),
114 /* UNII-7 */
115 CHAN6G(121, 6555),
116 CHAN6G(125, 6575),
117 CHAN6G(129, 6595),
118 CHAN6G(133, 6615),
119 CHAN6G(137, 6635),
120 CHAN6G(141, 6655),
121 CHAN6G(145, 6675),
122 CHAN6G(149, 6695),
123 CHAN6G(153, 6715),
124 CHAN6G(157, 6735),
125 CHAN6G(161, 6755),
126 CHAN6G(165, 6775),
127 CHAN6G(169, 6795),
128 CHAN6G(173, 6815),
129 CHAN6G(177, 6835),
130 CHAN6G(181, 6855),
131 CHAN6G(185, 6875),
132 /* UNII-8 */
133 CHAN6G(189, 6895),
134 CHAN6G(193, 6915),
135 CHAN6G(197, 6935),
136 CHAN6G(201, 6955),
137 CHAN6G(205, 6975),
138 CHAN6G(209, 6995),
139 CHAN6G(213, 7015),
140 CHAN6G(217, 7035),
141 CHAN6G(221, 7055),
142 CHAN6G(225, 7075),
143 CHAN6G(229, 7095),
144 CHAN6G(233, 7115),
145 };
146
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
158 };
159
160 struct ieee80211_rate mt76_rates[] = {
161 CCK_RATE(0, 10),
162 CCK_RATE(1, 20),
163 CCK_RATE(2, 55),
164 CCK_RATE(3, 110),
165 OFDM_RATE(11, 60),
166 OFDM_RATE(15, 90),
167 OFDM_RATE(10, 120),
168 OFDM_RATE(14, 180),
169 OFDM_RATE(9, 240),
170 OFDM_RATE(13, 360),
171 OFDM_RATE(8, 480),
172 OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 { .start_freq = 2402, .end_freq = 2494, },
178 { .start_freq = 5150, .end_freq = 5350, },
179 { .start_freq = 5350, .end_freq = 5470, },
180 { .start_freq = 5470, .end_freq = 5725, },
181 { .start_freq = 5725, .end_freq = 5950, },
182 { .start_freq = 5945, .end_freq = 6165, },
183 { .start_freq = 6165, .end_freq = 6405, },
184 { .start_freq = 6405, .end_freq = 6525, },
185 { .start_freq = 6525, .end_freq = 6705, },
186 { .start_freq = 6705, .end_freq = 6865, },
187 { .start_freq = 6865, .end_freq = 7125, },
188 };
189
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 .type = NL80211_SAR_TYPE_POWER,
192 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 .freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195
mt76_led_init(struct mt76_phy * phy)196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 struct mt76_dev *dev = phy->dev;
199 struct ieee80211_hw *hw = phy->hw;
200
201 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
202 return 0;
203
204 snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
205 wiphy_name(hw->wiphy));
206
207 phy->leds.cdev.name = phy->leds.name;
208 phy->leds.cdev.default_trigger =
209 ieee80211_create_tpt_led_trigger(hw,
210 IEEE80211_TPT_LEDTRIG_FL_RADIO,
211 mt76_tpt_blink,
212 ARRAY_SIZE(mt76_tpt_blink));
213
214 if (phy == &dev->phy) {
215 struct device_node *np = dev->dev->of_node;
216
217 np = of_get_child_by_name(np, "led");
218 if (np) {
219 int led_pin;
220
221 if (!of_property_read_u32(np, "led-sources", &led_pin))
222 phy->leds.pin = led_pin;
223 phy->leds.al = of_property_read_bool(np,
224 "led-active-low");
225 of_node_put(np);
226 }
227 }
228
229 return led_classdev_register(dev->dev, &phy->leds.cdev);
230 }
231
mt76_led_cleanup(struct mt76_phy * phy)232 static void mt76_led_cleanup(struct mt76_phy *phy)
233 {
234 if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
235 return;
236
237 led_classdev_unregister(&phy->leds.cdev);
238 }
239
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)240 static void mt76_init_stream_cap(struct mt76_phy *phy,
241 struct ieee80211_supported_band *sband,
242 bool vht)
243 {
244 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
245 int i, nstream = hweight8(phy->antenna_mask);
246 struct ieee80211_sta_vht_cap *vht_cap;
247 u16 mcs_map = 0;
248
249 if (nstream > 1)
250 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
251 else
252 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
253
254 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
255 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
256
257 if (!vht)
258 return;
259
260 vht_cap = &sband->vht_cap;
261 if (nstream > 1)
262 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
263 else
264 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
265 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
266 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
267
268 for (i = 0; i < 8; i++) {
269 if (i < nstream)
270 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
271 else
272 mcs_map |=
273 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
274 }
275 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
276 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
277 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
278 vht_cap->vht_mcs.tx_highest |=
279 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
280 }
281
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)282 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
283 {
284 if (phy->cap.has_2ghz)
285 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
286 if (phy->cap.has_5ghz)
287 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
288 if (phy->cap.has_6ghz)
289 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
290 }
291 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
292
293 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool ht,bool vht)294 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
295 const struct ieee80211_channel *chan, int n_chan,
296 struct ieee80211_rate *rates, int n_rates,
297 bool ht, bool vht)
298 {
299 struct ieee80211_supported_band *sband = &msband->sband;
300 struct ieee80211_sta_vht_cap *vht_cap;
301 struct ieee80211_sta_ht_cap *ht_cap;
302 struct mt76_dev *dev = phy->dev;
303 void *chanlist;
304 int size;
305
306 size = n_chan * sizeof(*chan);
307 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
308 if (!chanlist)
309 return -ENOMEM;
310
311 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
312 GFP_KERNEL);
313 if (!msband->chan)
314 return -ENOMEM;
315
316 sband->channels = chanlist;
317 sband->n_channels = n_chan;
318 sband->bitrates = rates;
319 sband->n_bitrates = n_rates;
320
321 if (!ht)
322 return 0;
323
324 ht_cap = &sband->ht_cap;
325 ht_cap->ht_supported = true;
326 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
327 IEEE80211_HT_CAP_GRN_FLD |
328 IEEE80211_HT_CAP_SGI_20 |
329 IEEE80211_HT_CAP_SGI_40 |
330 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
331
332 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
333 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
334
335 mt76_init_stream_cap(phy, sband, vht);
336
337 if (!vht)
338 return 0;
339
340 vht_cap = &sband->vht_cap;
341 vht_cap->vht_supported = true;
342 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
343 IEEE80211_VHT_CAP_RXSTBC_1 |
344 IEEE80211_VHT_CAP_SHORT_GI_80 |
345 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
346
347 return 0;
348 }
349
350 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)351 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
352 int n_rates)
353 {
354 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
355
356 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
357 ARRAY_SIZE(mt76_channels_2ghz), rates,
358 n_rates, true, false);
359 }
360
361 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)362 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 int n_rates, bool vht)
364 {
365 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
366
367 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
368 ARRAY_SIZE(mt76_channels_5ghz), rates,
369 n_rates, true, vht);
370 }
371
372 static int
mt76_init_sband_6g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)373 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 int n_rates)
375 {
376 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
377
378 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
379 ARRAY_SIZE(mt76_channels_6ghz), rates,
380 n_rates, false, false);
381 }
382
383 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)384 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
385 enum nl80211_band band)
386 {
387 struct ieee80211_supported_band *sband = &msband->sband;
388 bool found = false;
389 int i;
390
391 if (!sband)
392 return;
393
394 for (i = 0; i < sband->n_channels; i++) {
395 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
396 continue;
397
398 found = true;
399 break;
400 }
401
402 if (found) {
403 phy->chandef.chan = &sband->channels[0];
404 phy->chan_state = &msband->chan[0];
405 return;
406 }
407
408 sband->n_channels = 0;
409 phy->hw->wiphy->bands[band] = NULL;
410 }
411
412 static int
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)413 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
414 {
415 struct mt76_dev *dev = phy->dev;
416 struct wiphy *wiphy = hw->wiphy;
417
418 SET_IEEE80211_DEV(hw, dev->dev);
419 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
420
421 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
422 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
423 WIPHY_FLAG_SUPPORTS_TDLS |
424 WIPHY_FLAG_AP_UAPSD;
425
426 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
427 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
428 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
429
430 wiphy->available_antennas_tx = phy->antenna_mask;
431 wiphy->available_antennas_rx = phy->antenna_mask;
432
433 wiphy->sar_capa = &mt76_sar_capa;
434 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
435 sizeof(struct mt76_freq_range_power),
436 GFP_KERNEL);
437 if (!phy->frp)
438 return -ENOMEM;
439
440 hw->txq_data_size = sizeof(struct mt76_txq);
441 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
442
443 if (!hw->max_tx_fragments)
444 hw->max_tx_fragments = 16;
445
446 ieee80211_hw_set(hw, SIGNAL_DBM);
447 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
448 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
449 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
450 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
451 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
452 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
453
454 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
455 ieee80211_hw_set(hw, TX_AMSDU);
456 ieee80211_hw_set(hw, TX_FRAG_LIST);
457 }
458
459 ieee80211_hw_set(hw, MFP_CAPABLE);
460 ieee80211_hw_set(hw, AP_LINK_PS);
461 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
462
463 return 0;
464 }
465
466 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops,u8 band_idx)467 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
468 const struct ieee80211_ops *ops, u8 band_idx)
469 {
470 struct ieee80211_hw *hw;
471 unsigned int phy_size;
472 struct mt76_phy *phy;
473
474 phy_size = ALIGN(sizeof(*phy), 8);
475 hw = ieee80211_alloc_hw(size + phy_size, ops);
476 if (!hw)
477 return NULL;
478
479 phy = hw->priv;
480 phy->dev = dev;
481 phy->hw = hw;
482 phy->priv = hw->priv + phy_size;
483 phy->band_idx = band_idx;
484
485 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
486 hw->wiphy->interface_modes =
487 BIT(NL80211_IFTYPE_STATION) |
488 BIT(NL80211_IFTYPE_AP) |
489 #ifdef CONFIG_MAC80211_MESH
490 BIT(NL80211_IFTYPE_MESH_POINT) |
491 #endif
492 BIT(NL80211_IFTYPE_P2P_CLIENT) |
493 BIT(NL80211_IFTYPE_P2P_GO) |
494 BIT(NL80211_IFTYPE_ADHOC);
495
496 return phy;
497 }
498 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
499
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)500 int mt76_register_phy(struct mt76_phy *phy, bool vht,
501 struct ieee80211_rate *rates, int n_rates)
502 {
503 int ret;
504
505 ret = mt76_phy_init(phy, phy->hw);
506 if (ret)
507 return ret;
508
509 if (phy->cap.has_2ghz) {
510 ret = mt76_init_sband_2g(phy, rates, n_rates);
511 if (ret)
512 return ret;
513 }
514
515 if (phy->cap.has_5ghz) {
516 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
517 if (ret)
518 return ret;
519 }
520
521 if (phy->cap.has_6ghz) {
522 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
523 if (ret)
524 return ret;
525 }
526
527 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
528 ret = mt76_led_init(phy);
529 if (ret)
530 return ret;
531 }
532
533 wiphy_read_of_freq_limits(phy->hw->wiphy);
534 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
535 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
536 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
537
538 ret = ieee80211_register_hw(phy->hw);
539 if (ret)
540 return ret;
541
542 phy->dev->phys[phy->band_idx] = phy;
543
544 return 0;
545 }
546 EXPORT_SYMBOL_GPL(mt76_register_phy);
547
mt76_unregister_phy(struct mt76_phy * phy)548 void mt76_unregister_phy(struct mt76_phy *phy)
549 {
550 struct mt76_dev *dev = phy->dev;
551
552 if (IS_ENABLED(CONFIG_MT76_LEDS))
553 mt76_led_cleanup(phy);
554 mt76_tx_status_check(dev, true);
555 ieee80211_unregister_hw(phy->hw);
556 dev->phys[phy->band_idx] = NULL;
557 }
558 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
559
mt76_create_page_pool(struct mt76_dev * dev,struct mt76_queue * q)560 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
561 {
562 struct page_pool_params pp_params = {
563 .order = 0,
564 .flags = PP_FLAG_PAGE_FRAG,
565 .nid = NUMA_NO_NODE,
566 .dev = dev->dma_dev,
567 };
568 int idx = q - dev->q_rx;
569
570 switch (idx) {
571 case MT_RXQ_MAIN:
572 case MT_RXQ_BAND1:
573 case MT_RXQ_BAND2:
574 pp_params.pool_size = 256;
575 break;
576 default:
577 pp_params.pool_size = 16;
578 break;
579 }
580
581 if (mt76_is_mmio(dev)) {
582 /* rely on page_pool for DMA mapping */
583 pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
584 pp_params.dma_dir = DMA_FROM_DEVICE;
585 pp_params.max_len = PAGE_SIZE;
586 pp_params.offset = 0;
587 }
588
589 q->page_pool = page_pool_create(&pp_params);
590 if (IS_ERR(q->page_pool)) {
591 int err = PTR_ERR(q->page_pool);
592
593 q->page_pool = NULL;
594 return err;
595 }
596
597 return 0;
598 }
599 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
600
601 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)602 mt76_alloc_device(struct device *pdev, unsigned int size,
603 const struct ieee80211_ops *ops,
604 const struct mt76_driver_ops *drv_ops)
605 {
606 struct ieee80211_hw *hw;
607 struct mt76_phy *phy;
608 struct mt76_dev *dev;
609 int i;
610
611 hw = ieee80211_alloc_hw(size, ops);
612 if (!hw)
613 return NULL;
614
615 dev = hw->priv;
616 dev->hw = hw;
617 dev->dev = pdev;
618 dev->drv = drv_ops;
619 dev->dma_dev = pdev;
620
621 phy = &dev->phy;
622 phy->dev = dev;
623 phy->hw = hw;
624 phy->band_idx = MT_BAND0;
625 dev->phys[phy->band_idx] = phy;
626
627 spin_lock_init(&dev->rx_lock);
628 spin_lock_init(&dev->lock);
629 spin_lock_init(&dev->cc_lock);
630 spin_lock_init(&dev->status_lock);
631 spin_lock_init(&dev->wed_lock);
632 mutex_init(&dev->mutex);
633 init_waitqueue_head(&dev->tx_wait);
634
635 skb_queue_head_init(&dev->mcu.res_q);
636 init_waitqueue_head(&dev->mcu.wait);
637 mutex_init(&dev->mcu.mutex);
638 dev->tx_worker.fn = mt76_tx_worker;
639
640 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
641 hw->wiphy->interface_modes =
642 BIT(NL80211_IFTYPE_STATION) |
643 BIT(NL80211_IFTYPE_AP) |
644 #ifdef CONFIG_MAC80211_MESH
645 BIT(NL80211_IFTYPE_MESH_POINT) |
646 #endif
647 BIT(NL80211_IFTYPE_P2P_CLIENT) |
648 BIT(NL80211_IFTYPE_P2P_GO) |
649 BIT(NL80211_IFTYPE_ADHOC);
650
651 spin_lock_init(&dev->token_lock);
652 idr_init(&dev->token);
653
654 spin_lock_init(&dev->rx_token_lock);
655 idr_init(&dev->rx_token);
656
657 INIT_LIST_HEAD(&dev->wcid_list);
658
659 INIT_LIST_HEAD(&dev->txwi_cache);
660 INIT_LIST_HEAD(&dev->rxwi_cache);
661 dev->token_size = dev->drv->token_size;
662
663 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
664 skb_queue_head_init(&dev->rx_skb[i]);
665
666 dev->wq = alloc_ordered_workqueue("mt76", 0);
667 if (!dev->wq) {
668 ieee80211_free_hw(hw);
669 return NULL;
670 }
671
672 return dev;
673 }
674 EXPORT_SYMBOL_GPL(mt76_alloc_device);
675
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)676 int mt76_register_device(struct mt76_dev *dev, bool vht,
677 struct ieee80211_rate *rates, int n_rates)
678 {
679 struct ieee80211_hw *hw = dev->hw;
680 struct mt76_phy *phy = &dev->phy;
681 int ret;
682
683 dev_set_drvdata(dev->dev, dev);
684 ret = mt76_phy_init(phy, hw);
685 if (ret)
686 return ret;
687
688 if (phy->cap.has_2ghz) {
689 ret = mt76_init_sband_2g(phy, rates, n_rates);
690 if (ret)
691 return ret;
692 }
693
694 if (phy->cap.has_5ghz) {
695 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
696 if (ret)
697 return ret;
698 }
699
700 if (phy->cap.has_6ghz) {
701 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
702 if (ret)
703 return ret;
704 }
705
706 wiphy_read_of_freq_limits(hw->wiphy);
707 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
708 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
709 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
710
711 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
712 ret = mt76_led_init(phy);
713 if (ret)
714 return ret;
715 }
716
717 ret = ieee80211_register_hw(hw);
718 if (ret)
719 return ret;
720
721 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
722 sched_set_fifo_low(dev->tx_worker.task);
723
724 return 0;
725 }
726 EXPORT_SYMBOL_GPL(mt76_register_device);
727
mt76_unregister_device(struct mt76_dev * dev)728 void mt76_unregister_device(struct mt76_dev *dev)
729 {
730 struct ieee80211_hw *hw = dev->hw;
731
732 if (IS_ENABLED(CONFIG_MT76_LEDS))
733 mt76_led_cleanup(&dev->phy);
734 mt76_tx_status_check(dev, true);
735 ieee80211_unregister_hw(hw);
736 }
737 EXPORT_SYMBOL_GPL(mt76_unregister_device);
738
mt76_free_device(struct mt76_dev * dev)739 void mt76_free_device(struct mt76_dev *dev)
740 {
741 mt76_worker_teardown(&dev->tx_worker);
742 if (dev->wq) {
743 destroy_workqueue(dev->wq);
744 dev->wq = NULL;
745 }
746 ieee80211_free_hw(dev->hw);
747 }
748 EXPORT_SYMBOL_GPL(mt76_free_device);
749
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)750 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
751 {
752 struct sk_buff *skb = phy->rx_amsdu[q].head;
753 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
754 struct mt76_dev *dev = phy->dev;
755
756 phy->rx_amsdu[q].head = NULL;
757 phy->rx_amsdu[q].tail = NULL;
758
759 /*
760 * Validate if the amsdu has a proper first subframe.
761 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
762 * flag of the QoS header gets flipped. In such cases, the first
763 * subframe has a LLC/SNAP header in the location of the destination
764 * address.
765 */
766 if (skb_shinfo(skb)->frag_list) {
767 int offset = 0;
768
769 if (!(status->flag & RX_FLAG_8023)) {
770 offset = ieee80211_get_hdrlen_from_skb(skb);
771
772 if ((status->flag &
773 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
774 RX_FLAG_DECRYPTED)
775 offset += 8;
776 }
777
778 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
779 dev_kfree_skb(skb);
780 return;
781 }
782 }
783 __skb_queue_tail(&dev->rx_skb[q], skb);
784 }
785
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)786 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
787 struct sk_buff *skb)
788 {
789 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
790
791 if (phy->rx_amsdu[q].head &&
792 (!status->amsdu || status->first_amsdu ||
793 status->seqno != phy->rx_amsdu[q].seqno))
794 mt76_rx_release_amsdu(phy, q);
795
796 if (!phy->rx_amsdu[q].head) {
797 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
798 phy->rx_amsdu[q].seqno = status->seqno;
799 phy->rx_amsdu[q].head = skb;
800 } else {
801 *phy->rx_amsdu[q].tail = skb;
802 phy->rx_amsdu[q].tail = &skb->next;
803 }
804
805 if (!status->amsdu || status->last_amsdu)
806 mt76_rx_release_amsdu(phy, q);
807 }
808
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)809 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
810 {
811 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
812 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
813
814 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
815 dev_kfree_skb(skb);
816 return;
817 }
818
819 #ifdef CONFIG_NL80211_TESTMODE
820 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
821 phy->test.rx_stats.packets[q]++;
822 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
823 phy->test.rx_stats.fcs_error[q]++;
824 }
825 #endif
826
827 mt76_rx_release_burst(phy, q, skb);
828 }
829 EXPORT_SYMBOL_GPL(mt76_rx);
830
mt76_has_tx_pending(struct mt76_phy * phy)831 bool mt76_has_tx_pending(struct mt76_phy *phy)
832 {
833 struct mt76_queue *q;
834 int i;
835
836 for (i = 0; i < __MT_TXQ_MAX; i++) {
837 q = phy->q_tx[i];
838 if (q && q->queued)
839 return true;
840 }
841
842 return false;
843 }
844 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
845
846 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)847 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
848 {
849 struct mt76_sband *msband;
850 int idx;
851
852 if (c->band == NL80211_BAND_2GHZ)
853 msband = &phy->sband_2g;
854 else if (c->band == NL80211_BAND_6GHZ)
855 msband = &phy->sband_6g;
856 else
857 msband = &phy->sband_5g;
858
859 idx = c - &msband->sband.channels[0];
860 return &msband->chan[idx];
861 }
862
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)863 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
864 {
865 struct mt76_channel_state *state = phy->chan_state;
866
867 state->cc_active += ktime_to_us(ktime_sub(time,
868 phy->survey_time));
869 phy->survey_time = time;
870 }
871 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
872
mt76_update_survey(struct mt76_phy * phy)873 void mt76_update_survey(struct mt76_phy *phy)
874 {
875 struct mt76_dev *dev = phy->dev;
876 ktime_t cur_time;
877
878 if (dev->drv->update_survey)
879 dev->drv->update_survey(phy);
880
881 cur_time = ktime_get_boottime();
882 mt76_update_survey_active_time(phy, cur_time);
883
884 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
885 struct mt76_channel_state *state = phy->chan_state;
886
887 spin_lock_bh(&dev->cc_lock);
888 state->cc_bss_rx += dev->cur_cc_bss_rx;
889 dev->cur_cc_bss_rx = 0;
890 spin_unlock_bh(&dev->cc_lock);
891 }
892 }
893 EXPORT_SYMBOL_GPL(mt76_update_survey);
894
mt76_set_channel(struct mt76_phy * phy)895 void mt76_set_channel(struct mt76_phy *phy)
896 {
897 struct mt76_dev *dev = phy->dev;
898 struct ieee80211_hw *hw = phy->hw;
899 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
900 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
901 int timeout = HZ / 5;
902
903 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
904 mt76_update_survey(phy);
905
906 if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
907 phy->chandef.width != chandef->width)
908 phy->dfs_state = MT_DFS_STATE_UNKNOWN;
909
910 phy->chandef = *chandef;
911 phy->chan_state = mt76_channel_state(phy, chandef->chan);
912
913 if (!offchannel)
914 phy->main_chan = chandef->chan;
915
916 if (chandef->chan != phy->main_chan)
917 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
918 }
919 EXPORT_SYMBOL_GPL(mt76_set_channel);
920
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)921 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
922 struct survey_info *survey)
923 {
924 struct mt76_phy *phy = hw->priv;
925 struct mt76_dev *dev = phy->dev;
926 struct mt76_sband *sband;
927 struct ieee80211_channel *chan;
928 struct mt76_channel_state *state;
929 int ret = 0;
930
931 mutex_lock(&dev->mutex);
932 if (idx == 0 && dev->drv->update_survey)
933 mt76_update_survey(phy);
934
935 if (idx >= phy->sband_2g.sband.n_channels +
936 phy->sband_5g.sband.n_channels) {
937 idx -= (phy->sband_2g.sband.n_channels +
938 phy->sband_5g.sband.n_channels);
939 sband = &phy->sband_6g;
940 } else if (idx >= phy->sband_2g.sband.n_channels) {
941 idx -= phy->sband_2g.sband.n_channels;
942 sband = &phy->sband_5g;
943 } else {
944 sband = &phy->sband_2g;
945 }
946
947 if (idx >= sband->sband.n_channels) {
948 ret = -ENOENT;
949 goto out;
950 }
951
952 chan = &sband->sband.channels[idx];
953 state = mt76_channel_state(phy, chan);
954
955 memset(survey, 0, sizeof(*survey));
956 survey->channel = chan;
957 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
958 survey->filled |= dev->drv->survey_flags;
959 if (state->noise)
960 survey->filled |= SURVEY_INFO_NOISE_DBM;
961
962 if (chan == phy->main_chan) {
963 survey->filled |= SURVEY_INFO_IN_USE;
964
965 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
966 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
967 }
968
969 survey->time_busy = div_u64(state->cc_busy, 1000);
970 survey->time_rx = div_u64(state->cc_rx, 1000);
971 survey->time = div_u64(state->cc_active, 1000);
972 survey->noise = state->noise;
973
974 spin_lock_bh(&dev->cc_lock);
975 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
976 survey->time_tx = div_u64(state->cc_tx, 1000);
977 spin_unlock_bh(&dev->cc_lock);
978
979 out:
980 mutex_unlock(&dev->mutex);
981
982 return ret;
983 }
984 EXPORT_SYMBOL_GPL(mt76_get_survey);
985
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)986 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
987 struct ieee80211_key_conf *key)
988 {
989 struct ieee80211_key_seq seq;
990 int i;
991
992 wcid->rx_check_pn = false;
993
994 if (!key)
995 return;
996
997 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
998 return;
999
1000 wcid->rx_check_pn = true;
1001
1002 /* data frame */
1003 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1004 ieee80211_get_key_rx_seq(key, i, &seq);
1005 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1006 }
1007
1008 /* robust management frame */
1009 ieee80211_get_key_rx_seq(key, -1, &seq);
1010 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1011
1012 }
1013 EXPORT_SYMBOL(mt76_wcid_key_setup);
1014
mt76_rx_signal(u8 chain_mask,s8 * chain_signal)1015 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1016 {
1017 int signal = -128;
1018 u8 chains;
1019
1020 for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1021 int cur, diff;
1022
1023 cur = *chain_signal;
1024 if (!(chains & BIT(0)) ||
1025 cur > 0)
1026 continue;
1027
1028 if (cur > signal)
1029 swap(cur, signal);
1030
1031 diff = signal - cur;
1032 if (diff == 0)
1033 signal += 3;
1034 else if (diff <= 2)
1035 signal += 2;
1036 else if (diff <= 6)
1037 signal += 1;
1038 }
1039
1040 return signal;
1041 }
1042 EXPORT_SYMBOL(mt76_rx_signal);
1043
1044 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)1045 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1046 struct ieee80211_hw **hw,
1047 struct ieee80211_sta **sta)
1048 {
1049 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1050 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1051 struct mt76_rx_status mstat;
1052
1053 mstat = *((struct mt76_rx_status *)skb->cb);
1054 memset(status, 0, sizeof(*status));
1055
1056 status->flag = mstat.flag;
1057 status->freq = mstat.freq;
1058 status->enc_flags = mstat.enc_flags;
1059 status->encoding = mstat.encoding;
1060 status->bw = mstat.bw;
1061 status->he_ru = mstat.he_ru;
1062 status->he_gi = mstat.he_gi;
1063 status->he_dcm = mstat.he_dcm;
1064 status->rate_idx = mstat.rate_idx;
1065 status->nss = mstat.nss;
1066 status->band = mstat.band;
1067 status->signal = mstat.signal;
1068 status->chains = mstat.chains;
1069 status->ampdu_reference = mstat.ampdu_ref;
1070 status->device_timestamp = mstat.timestamp;
1071 status->mactime = mstat.timestamp;
1072 status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1073 if (status->signal <= -128)
1074 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1075
1076 if (ieee80211_is_beacon(hdr->frame_control) ||
1077 ieee80211_is_probe_resp(hdr->frame_control))
1078 status->boottime_ns = ktime_get_boottime_ns();
1079
1080 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1081 BUILD_BUG_ON(sizeof(status->chain_signal) !=
1082 sizeof(mstat.chain_signal));
1083 memcpy(status->chain_signal, mstat.chain_signal,
1084 sizeof(mstat.chain_signal));
1085
1086 *sta = wcid_to_sta(mstat.wcid);
1087 *hw = mt76_phy_hw(dev, mstat.phy_idx);
1088 }
1089
1090 static void
mt76_check_ccmp_pn(struct sk_buff * skb)1091 mt76_check_ccmp_pn(struct sk_buff *skb)
1092 {
1093 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1094 struct mt76_wcid *wcid = status->wcid;
1095 struct ieee80211_hdr *hdr;
1096 int security_idx;
1097 int ret;
1098
1099 if (!(status->flag & RX_FLAG_DECRYPTED))
1100 return;
1101
1102 if (status->flag & RX_FLAG_ONLY_MONITOR)
1103 return;
1104
1105 if (!wcid || !wcid->rx_check_pn)
1106 return;
1107
1108 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1109 if (status->flag & RX_FLAG_8023)
1110 goto skip_hdr_check;
1111
1112 hdr = mt76_skb_get_hdr(skb);
1113 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1114 /*
1115 * Validate the first fragment both here and in mac80211
1116 * All further fragments will be validated by mac80211 only.
1117 */
1118 if (ieee80211_is_frag(hdr) &&
1119 !ieee80211_is_first_frag(hdr->frame_control))
1120 return;
1121 }
1122
1123 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1124 *
1125 * the recipient shall maintain a single replay counter for received
1126 * individually addressed robust Management frames that are received
1127 * with the To DS subfield equal to 0, [...]
1128 */
1129 if (ieee80211_is_mgmt(hdr->frame_control) &&
1130 !ieee80211_has_tods(hdr->frame_control))
1131 security_idx = IEEE80211_NUM_TIDS;
1132
1133 skip_hdr_check:
1134 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1135 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1136 sizeof(status->iv));
1137 if (ret <= 0) {
1138 status->flag |= RX_FLAG_ONLY_MONITOR;
1139 return;
1140 }
1141
1142 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1143
1144 if (status->flag & RX_FLAG_IV_STRIPPED)
1145 status->flag |= RX_FLAG_PN_VALIDATED;
1146 }
1147
1148 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)1149 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1150 int len)
1151 {
1152 struct mt76_wcid *wcid = status->wcid;
1153 struct ieee80211_rx_status info = {
1154 .enc_flags = status->enc_flags,
1155 .rate_idx = status->rate_idx,
1156 .encoding = status->encoding,
1157 .band = status->band,
1158 .nss = status->nss,
1159 .bw = status->bw,
1160 };
1161 struct ieee80211_sta *sta;
1162 u32 airtime;
1163 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1164
1165 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1166 spin_lock(&dev->cc_lock);
1167 dev->cur_cc_bss_rx += airtime;
1168 spin_unlock(&dev->cc_lock);
1169
1170 if (!wcid || !wcid->sta)
1171 return;
1172
1173 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1174 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1175 }
1176
1177 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)1178 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1179 {
1180 struct mt76_wcid *wcid;
1181 int wcid_idx;
1182
1183 if (!dev->rx_ampdu_len)
1184 return;
1185
1186 wcid_idx = dev->rx_ampdu_status.wcid_idx;
1187 if (wcid_idx < ARRAY_SIZE(dev->wcid))
1188 wcid = rcu_dereference(dev->wcid[wcid_idx]);
1189 else
1190 wcid = NULL;
1191 dev->rx_ampdu_status.wcid = wcid;
1192
1193 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1194
1195 dev->rx_ampdu_len = 0;
1196 dev->rx_ampdu_ref = 0;
1197 }
1198
1199 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)1200 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1201 {
1202 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1203 struct mt76_wcid *wcid = status->wcid;
1204
1205 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1206 return;
1207
1208 if (!wcid || !wcid->sta) {
1209 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1210
1211 if (status->flag & RX_FLAG_8023)
1212 return;
1213
1214 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1215 return;
1216
1217 wcid = NULL;
1218 }
1219
1220 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1221 status->ampdu_ref != dev->rx_ampdu_ref)
1222 mt76_airtime_flush_ampdu(dev);
1223
1224 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1225 if (!dev->rx_ampdu_len ||
1226 status->ampdu_ref != dev->rx_ampdu_ref) {
1227 dev->rx_ampdu_status = *status;
1228 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1229 dev->rx_ampdu_ref = status->ampdu_ref;
1230 }
1231
1232 dev->rx_ampdu_len += skb->len;
1233 return;
1234 }
1235
1236 mt76_airtime_report(dev, status, skb->len);
1237 }
1238
1239 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)1240 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1241 {
1242 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1243 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1244 struct ieee80211_sta *sta;
1245 struct ieee80211_hw *hw;
1246 struct mt76_wcid *wcid = status->wcid;
1247 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1248 bool ps;
1249
1250 hw = mt76_phy_hw(dev, status->phy_idx);
1251 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1252 !(status->flag & RX_FLAG_8023)) {
1253 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1254 if (sta)
1255 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1256 }
1257
1258 mt76_airtime_check(dev, skb);
1259
1260 if (!wcid || !wcid->sta)
1261 return;
1262
1263 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1264
1265 if (status->signal <= 0)
1266 ewma_signal_add(&wcid->rssi, -status->signal);
1267
1268 wcid->inactive_count = 0;
1269
1270 if (status->flag & RX_FLAG_8023)
1271 return;
1272
1273 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1274 return;
1275
1276 if (ieee80211_is_pspoll(hdr->frame_control)) {
1277 ieee80211_sta_pspoll(sta);
1278 return;
1279 }
1280
1281 if (ieee80211_has_morefrags(hdr->frame_control) ||
1282 !(ieee80211_is_mgmt(hdr->frame_control) ||
1283 ieee80211_is_data(hdr->frame_control)))
1284 return;
1285
1286 ps = ieee80211_has_pm(hdr->frame_control);
1287
1288 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1289 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1290 ieee80211_sta_uapsd_trigger(sta, tidno);
1291
1292 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1293 return;
1294
1295 if (ps)
1296 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1297
1298 dev->drv->sta_ps(dev, sta, ps);
1299
1300 if (!ps)
1301 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1302
1303 ieee80211_sta_ps_transition(sta, ps);
1304 }
1305
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1306 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1307 struct napi_struct *napi)
1308 {
1309 struct ieee80211_sta *sta;
1310 struct ieee80211_hw *hw;
1311 struct sk_buff *skb, *tmp;
1312 LIST_HEAD(list);
1313
1314 spin_lock(&dev->rx_lock);
1315 while ((skb = __skb_dequeue(frames)) != NULL) {
1316 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1317
1318 mt76_check_ccmp_pn(skb);
1319 skb_shinfo(skb)->frag_list = NULL;
1320 mt76_rx_convert(dev, skb, &hw, &sta);
1321 ieee80211_rx_list(hw, sta, skb, &list);
1322
1323 /* subsequent amsdu frames */
1324 while (nskb) {
1325 skb = nskb;
1326 nskb = nskb->next;
1327 skb->next = NULL;
1328
1329 mt76_rx_convert(dev, skb, &hw, &sta);
1330 ieee80211_rx_list(hw, sta, skb, &list);
1331 }
1332 }
1333 spin_unlock(&dev->rx_lock);
1334
1335 if (!napi) {
1336 netif_receive_skb_list(&list);
1337 return;
1338 }
1339
1340 list_for_each_entry_safe(skb, tmp, &list, list) {
1341 skb_list_del_init(skb);
1342 napi_gro_receive(napi, skb);
1343 }
1344 }
1345
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1346 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1347 struct napi_struct *napi)
1348 {
1349 struct sk_buff_head frames;
1350 struct sk_buff *skb;
1351
1352 __skb_queue_head_init(&frames);
1353
1354 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1355 mt76_check_sta(dev, skb);
1356 if (mtk_wed_device_active(&dev->mmio.wed))
1357 __skb_queue_tail(&frames, skb);
1358 else
1359 mt76_rx_aggr_reorder(skb, &frames);
1360 }
1361
1362 mt76_rx_complete(dev, &frames, napi);
1363 }
1364 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1365
1366 static int
mt76_sta_add(struct mt76_phy * phy,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1367 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1368 struct ieee80211_sta *sta)
1369 {
1370 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1371 struct mt76_dev *dev = phy->dev;
1372 int ret;
1373 int i;
1374
1375 mutex_lock(&dev->mutex);
1376
1377 ret = dev->drv->sta_add(dev, vif, sta);
1378 if (ret)
1379 goto out;
1380
1381 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1382 struct mt76_txq *mtxq;
1383
1384 if (!sta->txq[i])
1385 continue;
1386
1387 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1388 mtxq->wcid = wcid->idx;
1389 }
1390
1391 ewma_signal_init(&wcid->rssi);
1392 if (phy->band_idx == MT_BAND1)
1393 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1394 wcid->phy_idx = phy->band_idx;
1395 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1396
1397 mt76_packet_id_init(wcid);
1398 out:
1399 mutex_unlock(&dev->mutex);
1400
1401 return ret;
1402 }
1403
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1404 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1405 struct ieee80211_sta *sta)
1406 {
1407 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1408 int i, idx = wcid->idx;
1409
1410 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1411 mt76_rx_aggr_stop(dev, wcid, i);
1412
1413 if (dev->drv->sta_remove)
1414 dev->drv->sta_remove(dev, vif, sta);
1415
1416 mt76_packet_id_flush(dev, wcid);
1417
1418 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1419 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1420 }
1421 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1422
1423 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1424 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1425 struct ieee80211_sta *sta)
1426 {
1427 mutex_lock(&dev->mutex);
1428 __mt76_sta_remove(dev, vif, sta);
1429 mutex_unlock(&dev->mutex);
1430 }
1431
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1432 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1433 struct ieee80211_sta *sta,
1434 enum ieee80211_sta_state old_state,
1435 enum ieee80211_sta_state new_state)
1436 {
1437 struct mt76_phy *phy = hw->priv;
1438 struct mt76_dev *dev = phy->dev;
1439
1440 if (old_state == IEEE80211_STA_NOTEXIST &&
1441 new_state == IEEE80211_STA_NONE)
1442 return mt76_sta_add(phy, vif, sta);
1443
1444 if (old_state == IEEE80211_STA_AUTH &&
1445 new_state == IEEE80211_STA_ASSOC &&
1446 dev->drv->sta_assoc)
1447 dev->drv->sta_assoc(dev, vif, sta);
1448
1449 if (old_state == IEEE80211_STA_NONE &&
1450 new_state == IEEE80211_STA_NOTEXIST)
1451 mt76_sta_remove(dev, vif, sta);
1452
1453 return 0;
1454 }
1455 EXPORT_SYMBOL_GPL(mt76_sta_state);
1456
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1457 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1458 struct ieee80211_sta *sta)
1459 {
1460 struct mt76_phy *phy = hw->priv;
1461 struct mt76_dev *dev = phy->dev;
1462 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1463
1464 mutex_lock(&dev->mutex);
1465 spin_lock_bh(&dev->status_lock);
1466 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1467 spin_unlock_bh(&dev->status_lock);
1468 mutex_unlock(&dev->mutex);
1469 }
1470 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1471
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1472 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1473 int *dbm)
1474 {
1475 struct mt76_phy *phy = hw->priv;
1476 int n_chains = hweight8(phy->antenna_mask);
1477 int delta = mt76_tx_power_nss_delta(n_chains);
1478
1479 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1480
1481 return 0;
1482 }
1483 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1484
mt76_init_sar_power(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)1485 int mt76_init_sar_power(struct ieee80211_hw *hw,
1486 const struct cfg80211_sar_specs *sar)
1487 {
1488 struct mt76_phy *phy = hw->priv;
1489 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1490 int i;
1491
1492 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1493 return -EINVAL;
1494
1495 for (i = 0; i < sar->num_sub_specs; i++) {
1496 u32 index = sar->sub_specs[i].freq_range_index;
1497 /* SAR specifies power limitaton in 0.25dbm */
1498 s32 power = sar->sub_specs[i].power >> 1;
1499
1500 if (power > 127 || power < -127)
1501 power = 127;
1502
1503 phy->frp[index].range = &capa->freq_ranges[index];
1504 phy->frp[index].power = power;
1505 }
1506
1507 return 0;
1508 }
1509 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1510
mt76_get_sar_power(struct mt76_phy * phy,struct ieee80211_channel * chan,int power)1511 int mt76_get_sar_power(struct mt76_phy *phy,
1512 struct ieee80211_channel *chan,
1513 int power)
1514 {
1515 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1516 int freq, i;
1517
1518 if (!capa || !phy->frp)
1519 return power;
1520
1521 if (power > 127 || power < -127)
1522 power = 127;
1523
1524 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1525 for (i = 0 ; i < capa->num_freq_ranges; i++) {
1526 if (phy->frp[i].range &&
1527 freq >= phy->frp[i].range->start_freq &&
1528 freq < phy->frp[i].range->end_freq) {
1529 power = min_t(int, phy->frp[i].power, power);
1530 break;
1531 }
1532 }
1533
1534 return power;
1535 }
1536 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1537
1538 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1539 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1540 {
1541 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1542 ieee80211_csa_finish(vif);
1543 }
1544
mt76_csa_finish(struct mt76_dev * dev)1545 void mt76_csa_finish(struct mt76_dev *dev)
1546 {
1547 if (!dev->csa_complete)
1548 return;
1549
1550 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1551 IEEE80211_IFACE_ITER_RESUME_ALL,
1552 __mt76_csa_finish, dev);
1553
1554 dev->csa_complete = 0;
1555 }
1556 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1557
1558 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1559 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1560 {
1561 struct mt76_dev *dev = priv;
1562
1563 if (!vif->bss_conf.csa_active)
1564 return;
1565
1566 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1567 }
1568
mt76_csa_check(struct mt76_dev * dev)1569 void mt76_csa_check(struct mt76_dev *dev)
1570 {
1571 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1572 IEEE80211_IFACE_ITER_RESUME_ALL,
1573 __mt76_csa_check, dev);
1574 }
1575 EXPORT_SYMBOL_GPL(mt76_csa_check);
1576
1577 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1578 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1579 {
1580 return 0;
1581 }
1582 EXPORT_SYMBOL_GPL(mt76_set_tim);
1583
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1584 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1585 {
1586 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1587 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1588 u8 *hdr, *pn = status->iv;
1589
1590 __skb_push(skb, 8);
1591 memmove(skb->data, skb->data + 8, hdr_len);
1592 hdr = skb->data + hdr_len;
1593
1594 hdr[0] = pn[5];
1595 hdr[1] = pn[4];
1596 hdr[2] = 0;
1597 hdr[3] = 0x20 | (key_id << 6);
1598 hdr[4] = pn[3];
1599 hdr[5] = pn[2];
1600 hdr[6] = pn[1];
1601 hdr[7] = pn[0];
1602
1603 status->flag &= ~RX_FLAG_IV_STRIPPED;
1604 }
1605 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1606
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1607 int mt76_get_rate(struct mt76_dev *dev,
1608 struct ieee80211_supported_band *sband,
1609 int idx, bool cck)
1610 {
1611 int i, offset = 0, len = sband->n_bitrates;
1612
1613 if (cck) {
1614 if (sband != &dev->phy.sband_2g.sband)
1615 return 0;
1616
1617 idx &= ~BIT(2); /* short preamble */
1618 } else if (sband == &dev->phy.sband_2g.sband) {
1619 offset = 4;
1620 }
1621
1622 for (i = offset; i < len; i++) {
1623 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1624 return i;
1625 }
1626
1627 return 0;
1628 }
1629 EXPORT_SYMBOL_GPL(mt76_get_rate);
1630
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1631 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1632 const u8 *mac)
1633 {
1634 struct mt76_phy *phy = hw->priv;
1635
1636 set_bit(MT76_SCANNING, &phy->state);
1637 }
1638 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1639
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1640 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1641 {
1642 struct mt76_phy *phy = hw->priv;
1643
1644 clear_bit(MT76_SCANNING, &phy->state);
1645 }
1646 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1647
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1648 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1649 {
1650 struct mt76_phy *phy = hw->priv;
1651 struct mt76_dev *dev = phy->dev;
1652
1653 mutex_lock(&dev->mutex);
1654 *tx_ant = phy->antenna_mask;
1655 *rx_ant = phy->antenna_mask;
1656 mutex_unlock(&dev->mutex);
1657
1658 return 0;
1659 }
1660 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1661
1662 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base,u32 flags)1663 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1664 int ring_base, u32 flags)
1665 {
1666 struct mt76_queue *hwq;
1667 int err;
1668
1669 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1670 if (!hwq)
1671 return ERR_PTR(-ENOMEM);
1672
1673 hwq->flags = flags;
1674
1675 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1676 if (err < 0)
1677 return ERR_PTR(err);
1678
1679 return hwq;
1680 }
1681 EXPORT_SYMBOL_GPL(mt76_init_queue);
1682
mt76_calculate_default_rate(struct mt76_phy * phy,int rateidx)1683 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
1684 {
1685 int offset = 0;
1686
1687 if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
1688 offset = 4;
1689
1690 /* pick the lowest rate for hidden nodes */
1691 if (rateidx < 0)
1692 rateidx = 0;
1693
1694 rateidx += offset;
1695 if (rateidx >= ARRAY_SIZE(mt76_rates))
1696 rateidx = offset;
1697
1698 return mt76_rates[rateidx].hw_value;
1699 }
1700 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
1701
mt76_ethtool_worker(struct mt76_ethtool_worker_info * wi,struct mt76_sta_stats * stats,bool eht)1702 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1703 struct mt76_sta_stats *stats, bool eht)
1704 {
1705 int i, ei = wi->initial_stat_idx;
1706 u64 *data = wi->data;
1707
1708 wi->sta_count++;
1709
1710 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
1711 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
1712 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
1713 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
1714 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
1715 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
1716 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
1717 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
1718 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
1719 if (eht) {
1720 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
1721 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
1722 data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
1723 }
1724
1725 for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
1726 data[ei++] += stats->tx_bw[i];
1727
1728 for (i = 0; i < (eht ? 14 : 12); i++)
1729 data[ei++] += stats->tx_mcs[i];
1730
1731 wi->worker_stat_count = ei - wi->initial_stat_idx;
1732 }
1733 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
1734
mt76_ethtool_page_pool_stats(struct mt76_dev * dev,u64 * data,int * index)1735 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
1736 {
1737 #ifdef CONFIG_PAGE_POOL_STATS
1738 struct page_pool_stats stats = {};
1739 int i;
1740
1741 mt76_for_each_q_rx(dev, i)
1742 page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
1743
1744 page_pool_ethtool_stats_get(data, &stats);
1745 *index += page_pool_ethtool_stats_get_count();
1746 #endif
1747 }
1748 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
1749
mt76_phy_dfs_state(struct mt76_phy * phy)1750 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
1751 {
1752 struct ieee80211_hw *hw = phy->hw;
1753 struct mt76_dev *dev = phy->dev;
1754
1755 if (dev->region == NL80211_DFS_UNSET ||
1756 test_bit(MT76_SCANNING, &phy->state))
1757 return MT_DFS_STATE_DISABLED;
1758
1759 if (!hw->conf.radar_enabled) {
1760 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
1761 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
1762 return MT_DFS_STATE_ACTIVE;
1763
1764 return MT_DFS_STATE_DISABLED;
1765 }
1766
1767 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
1768 return MT_DFS_STATE_CAC;
1769
1770 return MT_DFS_STATE_ACTIVE;
1771 }
1772 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
1773