1
2
3
4#include <linux/etherdevice.h>
5#include <linux/timekeeping.h>
6#include "mt7915.h"
7#include "../dma.h"
8#include "mac.h"
9
10#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
11
12#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 IEEE80211_RADIOTAP_HE_##f)
15
16static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28};
29
30static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39};
40
41static const struct mt7915_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53};
54
55static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
56 u16 idx, bool unicast)
57{
58 struct mt7915_sta *sta;
59 struct mt76_wcid *wcid;
60
61 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
62 return NULL;
63
64 wcid = rcu_dereference(dev->mt76.wcid[idx]);
65 if (unicast || !wcid)
66 return wcid;
67
68 if (!wcid->sta)
69 return NULL;
70
71 sta = container_of(wcid, struct mt7915_sta, wcid);
72 if (!sta->vif)
73 return NULL;
74
75 return &sta->vif->sta.wcid;
76}
77
78void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
79{
80}
81
82bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
83{
84 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
85 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
86
87 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
88 0, 5000);
89}
90
91static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
92{
93 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
94 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
95
96 return MT_WTBL_LMAC_OFFS(wcid, 0);
97}
98
99
100static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
101{
102 static const u8 ac_to_tid[] = {
103 [IEEE80211_AC_BE] = 0,
104 [IEEE80211_AC_BK] = 1,
105 [IEEE80211_AC_VI] = 4,
106 [IEEE80211_AC_VO] = 6
107 };
108 struct ieee80211_sta *sta;
109 struct mt7915_sta *msta;
110 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
111 LIST_HEAD(sta_poll_list);
112 int i;
113
114 spin_lock_bh(&dev->sta_poll_lock);
115 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
116 spin_unlock_bh(&dev->sta_poll_lock);
117
118 rcu_read_lock();
119
120 while (true) {
121 bool clear = false;
122 u32 addr;
123 u16 idx;
124
125 spin_lock_bh(&dev->sta_poll_lock);
126 if (list_empty(&sta_poll_list)) {
127 spin_unlock_bh(&dev->sta_poll_lock);
128 break;
129 }
130 msta = list_first_entry(&sta_poll_list,
131 struct mt7915_sta, poll_list);
132 list_del_init(&msta->poll_list);
133 spin_unlock_bh(&dev->sta_poll_lock);
134
135 idx = msta->wcid.idx;
136 addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
137
138 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
139 u32 tx_last = msta->airtime_ac[i];
140 u32 rx_last = msta->airtime_ac[i + 4];
141
142 msta->airtime_ac[i] = mt76_rr(dev, addr);
143 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
144
145 tx_time[i] = msta->airtime_ac[i] - tx_last;
146 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
147
148 if ((tx_last | rx_last) & BIT(30))
149 clear = true;
150
151 addr += 8;
152 }
153
154 if (clear) {
155 mt7915_mac_wtbl_update(dev, idx,
156 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
157 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
158 }
159
160 if (!msta->wcid.sta)
161 continue;
162
163 sta = container_of((void *)msta, struct ieee80211_sta,
164 drv_priv);
165 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
166 u8 q = mt7915_lmac_mapping(dev, i);
167 u32 tx_cur = tx_time[q];
168 u32 rx_cur = rx_time[q];
169 u8 tid = ac_to_tid[i];
170
171 if (!tx_cur && !rx_cur)
172 continue;
173
174 ieee80211_sta_register_airtime(sta, tid, tx_cur,
175 rx_cur);
176 }
177 }
178
179 rcu_read_unlock();
180}
181
182static void
183mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
184 struct ieee80211_radiotap_he *he,
185 __le32 *rxv)
186{
187 u32 ru_h, ru_l;
188 u8 ru, offs = 0;
189
190 ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
191 ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
192 ru = (u8)(ru_l | ru_h << 4);
193
194 status->bw = RATE_INFO_BW_HE_RU;
195
196 switch (ru) {
197 case 0 ... 36:
198 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
199 offs = ru;
200 break;
201 case 37 ... 52:
202 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
203 offs = ru - 37;
204 break;
205 case 53 ... 60:
206 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
207 offs = ru - 53;
208 break;
209 case 61 ... 64:
210 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
211 offs = ru - 61;
212 break;
213 case 65 ... 66:
214 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
215 offs = ru - 65;
216 break;
217 case 67:
218 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
219 break;
220 case 68:
221 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
222 break;
223 }
224
225 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
226 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
227 le16_encode_bits(offs,
228 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
229}
230
231static void
232mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
233 struct mt76_rx_status *status,
234 __le32 *rxv, u32 phy)
235{
236
237 static const struct ieee80211_radiotap_he known = {
238 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
239 HE_BITS(DATA1_DATA_DCM_KNOWN) |
240 HE_BITS(DATA1_STBC_KNOWN) |
241 HE_BITS(DATA1_CODING_KNOWN) |
242 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
243 HE_BITS(DATA1_DOPPLER_KNOWN) |
244 HE_BITS(DATA1_BSS_COLOR_KNOWN),
245 .data2 = HE_BITS(DATA2_GI_KNOWN) |
246 HE_BITS(DATA2_TXBF_KNOWN) |
247 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
248 HE_BITS(DATA2_TXOP_KNOWN),
249 };
250 struct ieee80211_radiotap_he *he = NULL;
251 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
252
253 he = skb_push(skb, sizeof(known));
254 memcpy(he, &known, sizeof(known));
255
256 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
257 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
258 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
259 le16_encode_bits(ltf_size,
260 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
261 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
262 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
263
264 switch (phy) {
265 case MT_PHY_TYPE_HE_SU:
266 he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
267 HE_BITS(DATA1_UL_DL_KNOWN) |
268 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
269 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
270
271 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
272 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
273 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
274 break;
275 case MT_PHY_TYPE_HE_EXT_SU:
276 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
277 HE_BITS(DATA1_UL_DL_KNOWN);
278
279 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
280 break;
281 case MT_PHY_TYPE_HE_MU:
282 he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
283 HE_BITS(DATA1_UL_DL_KNOWN) |
284 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
285
286 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
287 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
288
289 mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
290 break;
291 case MT_PHY_TYPE_HE_TB:
292 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
293 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
294 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
295 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
296 HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
297
298 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
299 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
300 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
301 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
302
303 mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
304 break;
305 default:
306 break;
307 }
308}
309
310static int
311mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
312{
313 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
314 struct mt76_phy *mphy = &dev->mt76.phy;
315 struct mt7915_phy *phy = &dev->phy;
316 struct ieee80211_supported_band *sband;
317 struct ieee80211_hdr *hdr;
318 __le32 *rxd = (__le32 *)skb->data;
319 __le32 *rxv = NULL;
320 u32 mode = 0;
321 u32 rxd0 = le32_to_cpu(rxd[0]);
322 u32 rxd1 = le32_to_cpu(rxd[1]);
323 u32 rxd2 = le32_to_cpu(rxd[2]);
324 u32 rxd3 = le32_to_cpu(rxd[3]);
325 u32 rxd4 = le32_to_cpu(rxd[4]);
326 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
327 bool unicast, insert_ccmp_hdr = false;
328 u8 remove_pad, amsdu_info;
329 bool hdr_trans;
330 u16 seq_ctrl = 0;
331 u8 qos_ctl = 0;
332 __le16 fc = 0;
333 int i, idx;
334
335 memset(status, 0, sizeof(*status));
336
337 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
338 mphy = dev->mt76.phy2;
339 if (!mphy)
340 return -EINVAL;
341
342 phy = mphy->priv;
343 status->ext_phy = true;
344 }
345
346 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
347 return -EINVAL;
348
349 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
350 return -EINVAL;
351
352 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
353 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
354 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
355 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
356
357 if (status->wcid) {
358 struct mt7915_sta *msta;
359
360 msta = container_of(status->wcid, struct mt7915_sta, wcid);
361 spin_lock_bh(&dev->sta_poll_lock);
362 if (list_empty(&msta->poll_list))
363 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
364 spin_unlock_bh(&dev->sta_poll_lock);
365 }
366
367 status->freq = mphy->chandef.chan->center_freq;
368 status->band = mphy->chandef.chan->band;
369 if (status->band == NL80211_BAND_5GHZ)
370 sband = &mphy->sband_5g.sband;
371 else
372 sband = &mphy->sband_2g.sband;
373
374 if (!sband->channels)
375 return -EINVAL;
376
377 if ((rxd0 & csum_mask) == csum_mask)
378 skb->ip_summed = CHECKSUM_UNNECESSARY;
379
380 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
381 status->flag |= RX_FLAG_FAILED_FCS_CRC;
382
383 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
384 status->flag |= RX_FLAG_MMIC_ERROR;
385
386 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
387 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
388 status->flag |= RX_FLAG_DECRYPTED;
389 status->flag |= RX_FLAG_IV_STRIPPED;
390 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
391 }
392
393 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
394
395 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
396 return -EINVAL;
397
398 rxd += 6;
399 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
400 u32 v0 = le32_to_cpu(rxd[0]);
401 u32 v2 = le32_to_cpu(rxd[2]);
402
403 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
404 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
405 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
406
407 rxd += 4;
408 if ((u8 *)rxd - skb->data >= skb->len)
409 return -EINVAL;
410 }
411
412 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
413 u8 *data = (u8 *)rxd;
414
415 if (status->flag & RX_FLAG_DECRYPTED) {
416 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
417 case MT_CIPHER_AES_CCMP:
418 case MT_CIPHER_CCMP_CCX:
419 case MT_CIPHER_CCMP_256:
420 insert_ccmp_hdr =
421 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
422 fallthrough;
423 case MT_CIPHER_TKIP:
424 case MT_CIPHER_TKIP_NO_MIC:
425 case MT_CIPHER_GCMP:
426 case MT_CIPHER_GCMP_256:
427 status->iv[0] = data[5];
428 status->iv[1] = data[4];
429 status->iv[2] = data[3];
430 status->iv[3] = data[2];
431 status->iv[4] = data[1];
432 status->iv[5] = data[0];
433 break;
434 default:
435 break;
436 }
437 }
438 rxd += 4;
439 if ((u8 *)rxd - skb->data >= skb->len)
440 return -EINVAL;
441 }
442
443 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
444 status->timestamp = le32_to_cpu(rxd[0]);
445 status->flag |= RX_FLAG_MACTIME_START;
446
447 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
448 status->flag |= RX_FLAG_AMPDU_DETAILS;
449
450
451 if (phy->rx_ampdu_ts != status->timestamp) {
452 if (!++phy->ampdu_ref)
453 phy->ampdu_ref++;
454 }
455 phy->rx_ampdu_ts = status->timestamp;
456
457 status->ampdu_ref = phy->ampdu_ref;
458 }
459
460 rxd += 2;
461 if ((u8 *)rxd - skb->data >= skb->len)
462 return -EINVAL;
463 }
464
465
466 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
467 u32 v0, v1, v2;
468
469 rxv = rxd;
470 rxd += 2;
471 if ((u8 *)rxd - skb->data >= skb->len)
472 return -EINVAL;
473
474 v0 = le32_to_cpu(rxv[0]);
475 v1 = le32_to_cpu(rxv[1]);
476 v2 = le32_to_cpu(rxv[2]);
477
478 if (v0 & MT_PRXV_HT_AD_CODE)
479 status->enc_flags |= RX_ENC_FLAG_LDPC;
480
481 status->chains = mphy->antenna_mask;
482 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
483 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
484 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
485 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
486 status->signal = status->chain_signal[0];
487
488 for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
489 if (!(status->chains & BIT(i)))
490 continue;
491
492 status->signal = max(status->signal,
493 status->chain_signal[i]);
494 }
495
496
497 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
498 u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
499 u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
500 bool cck = false;
501
502 rxd += 18;
503 if ((u8 *)rxd - skb->data >= skb->len)
504 return -EINVAL;
505
506 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
507 mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
508
509 switch (mode) {
510 case MT_PHY_TYPE_CCK:
511 cck = true;
512 fallthrough;
513 case MT_PHY_TYPE_OFDM:
514 i = mt76_get_rate(&dev->mt76, sband, i, cck);
515 break;
516 case MT_PHY_TYPE_HT_GF:
517 case MT_PHY_TYPE_HT:
518 status->encoding = RX_ENC_HT;
519 if (i > 31)
520 return -EINVAL;
521 break;
522 case MT_PHY_TYPE_VHT:
523 status->nss =
524 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
525 status->encoding = RX_ENC_VHT;
526 if (i > 9)
527 return -EINVAL;
528 break;
529 case MT_PHY_TYPE_HE_MU:
530 status->flag |= RX_FLAG_RADIOTAP_HE_MU;
531 fallthrough;
532 case MT_PHY_TYPE_HE_SU:
533 case MT_PHY_TYPE_HE_EXT_SU:
534 case MT_PHY_TYPE_HE_TB:
535 status->nss =
536 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
537 status->encoding = RX_ENC_HE;
538 status->flag |= RX_FLAG_RADIOTAP_HE;
539 i &= GENMASK(3, 0);
540
541 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
542 status->he_gi = gi;
543
544 status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
545 break;
546 default:
547 return -EINVAL;
548 }
549 status->rate_idx = i;
550
551 switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
552 case IEEE80211_STA_RX_BW_20:
553 break;
554 case IEEE80211_STA_RX_BW_40:
555 if (mode & MT_PHY_TYPE_HE_EXT_SU &&
556 (idx & MT_PRXV_TX_ER_SU_106T)) {
557 status->bw = RATE_INFO_BW_HE_RU;
558 status->he_ru =
559 NL80211_RATE_INFO_HE_RU_ALLOC_106;
560 } else {
561 status->bw = RATE_INFO_BW_40;
562 }
563 break;
564 case IEEE80211_STA_RX_BW_80:
565 status->bw = RATE_INFO_BW_80;
566 break;
567 case IEEE80211_STA_RX_BW_160:
568 status->bw = RATE_INFO_BW_160;
569 break;
570 default:
571 return -EINVAL;
572 }
573
574 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
575 if (mode < MT_PHY_TYPE_HE_SU && gi)
576 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
577 }
578 }
579
580 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
581
582 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
583 status->amsdu = !!amsdu_info;
584 if (status->amsdu) {
585 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
586 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
587 if (!hdr_trans) {
588 memmove(skb->data + 2, skb->data,
589 ieee80211_get_hdrlen_from_skb(skb));
590 skb_pull(skb, 2);
591 }
592 }
593
594 if (insert_ccmp_hdr && !hdr_trans) {
595 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
596
597 mt76_insert_ccmp_hdr(skb, key_id);
598 }
599
600 if (!hdr_trans) {
601 hdr = mt76_skb_get_hdr(skb);
602 fc = hdr->frame_control;
603 if (ieee80211_is_data_qos(fc)) {
604 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
605 qos_ctl = *ieee80211_get_qos_ctl(hdr);
606 }
607 } else {
608 status->flag &= ~(RX_FLAG_RADIOTAP_HE |
609 RX_FLAG_RADIOTAP_HE_MU);
610 status->flag |= RX_FLAG_8023;
611 }
612
613 if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
614 mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
615
616 if (!status->wcid || !ieee80211_is_data_qos(fc))
617 return 0;
618
619 status->aggr = unicast &&
620 !ieee80211_is_qos_nullfunc(fc);
621 status->qos_ctl = qos_ctl;
622 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
623
624 return 0;
625}
626
627static void
628mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
629{
630#ifdef CONFIG_NL80211_TESTMODE
631 struct mt7915_phy *phy = &dev->phy;
632 __le32 *rxd = (__le32 *)skb->data;
633 __le32 *rxv_hdr = rxd + 2;
634 __le32 *rxv = rxd + 4;
635 u32 rcpi, ib_rssi, wb_rssi, v20, v21;
636 bool ext_phy;
637 s32 foe;
638 u8 snr;
639 int i;
640
641 ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
642 if (ext_phy)
643 phy = mt7915_ext_phy(dev);
644
645 rcpi = le32_to_cpu(rxv[6]);
646 ib_rssi = le32_to_cpu(rxv[7]);
647 wb_rssi = le32_to_cpu(rxv[8]) >> 5;
648
649 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
650 if (i == 3)
651 wb_rssi = le32_to_cpu(rxv[9]);
652
653 phy->test.last_rcpi[i] = rcpi & 0xff;
654 phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
655 phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
656 }
657
658 v20 = le32_to_cpu(rxv[20]);
659 v21 = le32_to_cpu(rxv[21]);
660
661 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
662 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
663
664 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
665
666 phy->test.last_freq_offset = foe;
667 phy->test.last_snr = snr;
668#endif
669
670 dev_kfree_skb(skb);
671}
672
673static void
674mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
675 struct sk_buff *skb)
676{
677#ifdef CONFIG_NL80211_TESTMODE
678 struct mt76_testmode_data *td = &phy->mt76->test;
679 const struct ieee80211_rate *r;
680 u8 bw, mode, nss = td->tx_rate_nss;
681 u8 rate_idx = td->tx_rate_idx;
682 u16 rateval = 0;
683 u32 val;
684 bool cck = false;
685 int band;
686
687 if (skb != phy->mt76->test.tx_skb)
688 return;
689
690 switch (td->tx_rate_mode) {
691 case MT76_TM_TX_MODE_HT:
692 nss = 1 + (rate_idx >> 3);
693 mode = MT_PHY_TYPE_HT;
694 break;
695 case MT76_TM_TX_MODE_VHT:
696 mode = MT_PHY_TYPE_VHT;
697 break;
698 case MT76_TM_TX_MODE_HE_SU:
699 mode = MT_PHY_TYPE_HE_SU;
700 break;
701 case MT76_TM_TX_MODE_HE_EXT_SU:
702 mode = MT_PHY_TYPE_HE_EXT_SU;
703 break;
704 case MT76_TM_TX_MODE_HE_TB:
705 mode = MT_PHY_TYPE_HE_TB;
706 break;
707 case MT76_TM_TX_MODE_HE_MU:
708 mode = MT_PHY_TYPE_HE_MU;
709 break;
710 case MT76_TM_TX_MODE_CCK:
711 cck = true;
712 fallthrough;
713 case MT76_TM_TX_MODE_OFDM:
714 band = phy->mt76->chandef.chan->band;
715 if (band == NL80211_BAND_2GHZ && !cck)
716 rate_idx += 4;
717
718 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
719 val = cck ? r->hw_value_short : r->hw_value;
720
721 mode = val >> 8;
722 rate_idx = val & 0xff;
723 break;
724 default:
725 mode = MT_PHY_TYPE_OFDM;
726 break;
727 }
728
729 switch (phy->mt76->chandef.width) {
730 case NL80211_CHAN_WIDTH_40:
731 bw = 1;
732 break;
733 case NL80211_CHAN_WIDTH_80:
734 bw = 2;
735 break;
736 case NL80211_CHAN_WIDTH_80P80:
737 case NL80211_CHAN_WIDTH_160:
738 bw = 3;
739 break;
740 default:
741 bw = 0;
742 break;
743 }
744
745 if (td->tx_rate_stbc && nss == 1) {
746 nss++;
747 rateval |= MT_TX_RATE_STBC;
748 }
749
750 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
751 FIELD_PREP(MT_TX_RATE_MODE, mode) |
752 FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
753
754 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
755
756 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
757 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
758 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
759
760 val = MT_TXD6_FIXED_BW |
761 FIELD_PREP(MT_TXD6_BW, bw) |
762 FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
763 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
764
765
766
767
768
769
770
771
772
773
774
775 if (mode >= MT_PHY_TYPE_HE_SU)
776 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
777
778 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
779 val |= MT_TXD6_LDPC;
780
781 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
782 txwi[6] |= cpu_to_le32(val);
783 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
784 phy->test.spe_idx));
785#endif
786}
787
788static void
789mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
790 struct sk_buff *skb, struct mt76_wcid *wcid)
791{
792
793 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
794 u8 fc_type, fc_stype;
795 bool wmm = false;
796 u32 val;
797
798 if (wcid->sta) {
799 struct ieee80211_sta *sta;
800
801 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
802 wmm = sta->wme;
803 }
804
805 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
806 FIELD_PREP(MT_TXD1_TID, tid);
807
808 if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
809 val |= MT_TXD1_ETH_802_3;
810
811 txwi[1] |= cpu_to_le32(val);
812
813 fc_type = IEEE80211_FTYPE_DATA >> 2;
814 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
815
816 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
817 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
818
819 txwi[2] |= cpu_to_le32(val);
820
821 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
822 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
823 txwi[7] |= cpu_to_le32(val);
824}
825
826static void
827mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
828 struct sk_buff *skb, struct ieee80211_key_conf *key)
829{
830 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
831 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
832 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
833 bool multicast = is_multicast_ether_addr(hdr->addr1);
834 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
835 __le16 fc = hdr->frame_control;
836 u8 fc_type, fc_stype;
837 u32 val;
838
839 if (ieee80211_is_action(fc) &&
840 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
841 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
842 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
843
844 txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
845 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
846 } else if (ieee80211_is_back_req(hdr->frame_control)) {
847 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
848 u16 control = le16_to_cpu(bar->control);
849
850 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
851 }
852
853 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
854 FIELD_PREP(MT_TXD1_HDR_INFO,
855 ieee80211_get_hdrlen_from_skb(skb) / 2) |
856 FIELD_PREP(MT_TXD1_TID, tid);
857 txwi[1] |= cpu_to_le32(val);
858
859 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
860 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
861
862 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
863 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
864 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
865
866 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
867 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
868 val |= MT_TXD2_BIP;
869 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
870 }
871
872 if (!ieee80211_is_data(fc) || multicast)
873 val |= MT_TXD2_FIX_RATE;
874
875 txwi[2] |= cpu_to_le32(val);
876
877 if (ieee80211_is_beacon(fc)) {
878 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
879 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
880 }
881
882 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
883 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
884
885 if (ieee80211_is_back_req(hdr->frame_control)) {
886 struct ieee80211_bar *bar;
887
888 bar = (struct ieee80211_bar *)skb->data;
889 seqno = le16_to_cpu(bar->start_seq_num);
890 }
891
892 val = MT_TXD3_SN_VALID |
893 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
894 txwi[3] |= cpu_to_le32(val);
895 }
896
897 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
898 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
899 txwi[7] |= cpu_to_le32(val);
900}
901
902void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
903 struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
904 struct ieee80211_key_conf *key, bool beacon)
905{
906 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
907 struct ieee80211_vif *vif = info->control.vif;
908 struct mt76_phy *mphy = &dev->mphy;
909 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
910 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
911 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
912 u16 tx_count = 15;
913 u32 val;
914
915 if (vif) {
916 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
917
918 omac_idx = mvif->omac_idx;
919 wmm_idx = mvif->wmm_idx;
920 }
921
922 if (ext_phy && dev->mt76.phy2)
923 mphy = dev->mt76.phy2;
924
925 if (beacon) {
926 p_fmt = MT_TX_TYPE_FW;
927 q_idx = MT_LMAC_BCN0;
928 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
929 p_fmt = MT_TX_TYPE_CT;
930 q_idx = MT_LMAC_ALTX0;
931 } else {
932 p_fmt = MT_TX_TYPE_CT;
933 q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
934 mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
935 }
936
937 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
938 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
939 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
940 txwi[0] = cpu_to_le32(val);
941
942 val = MT_TXD1_LONG_FORMAT |
943 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
944 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
945
946 if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
947 val |= MT_TXD1_TGID;
948
949 txwi[1] = cpu_to_le32(val);
950
951 txwi[2] = 0;
952
953 val = MT_TXD3_SW_POWER_MGMT |
954 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
955 if (key)
956 val |= MT_TXD3_PROTECT_FRAME;
957 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
958 val |= MT_TXD3_NO_ACK;
959
960 txwi[3] = cpu_to_le32(val);
961 txwi[4] = 0;
962
963 val = FIELD_PREP(MT_TXD5_PID, pid);
964 if (pid >= MT_PACKET_ID_FIRST)
965 val |= MT_TXD5_TX_STATUS_HOST;
966 txwi[5] = cpu_to_le32(val);
967
968 txwi[6] = 0;
969 txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
970
971 if (is_8023)
972 mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid);
973 else
974 mt7915_mac_write_txwi_80211(dev, txwi, skb, key);
975
976 if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
977 u16 rate;
978
979
980 txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
981
982 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
983 rate = MT7915_5G_RATE_DEFAULT;
984 else
985 rate = MT7915_2G_RATE_DEFAULT;
986
987 val = MT_TXD6_FIXED_BW |
988 FIELD_PREP(MT_TXD6_TX_RATE, rate);
989 txwi[6] |= cpu_to_le32(val);
990 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
991 }
992
993 if (mt76_testmode_enabled(mphy))
994 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
995}
996
997int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
998 enum mt76_txq_id qid, struct mt76_wcid *wcid,
999 struct ieee80211_sta *sta,
1000 struct mt76_tx_info *tx_info)
1001{
1002 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1003 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1004 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1005 struct ieee80211_key_conf *key = info->control.hw_key;
1006 struct ieee80211_vif *vif = info->control.vif;
1007 struct mt76_txwi_cache *t;
1008 struct mt7915_txp *txp;
1009 int id, i, nbuf = tx_info->nbuf - 1;
1010 u8 *txwi = (u8 *)txwi_ptr;
1011 int pid;
1012
1013 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1014 return -EINVAL;
1015
1016 if (!wcid)
1017 wcid = &dev->mt76.global_wcid;
1018
1019 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1020
1021 mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
1022 false);
1023
1024 txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
1025 for (i = 0; i < nbuf; i++) {
1026 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1027 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
1028 }
1029 txp->nbuf = nbuf;
1030
1031 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
1032
1033 if (!key)
1034 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1035
1036 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
1037 ieee80211_is_mgmt(hdr->frame_control))
1038 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1039
1040 if (vif) {
1041 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
1042
1043 txp->bss_idx = mvif->idx;
1044 }
1045
1046 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1047 t->skb = tx_info->skb;
1048
1049 id = mt76_token_consume(mdev, &t);
1050 if (id < 0)
1051 return id;
1052
1053 txp->token = cpu_to_le16(id);
1054 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
1055 txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
1056 else
1057 txp->rept_wds_wcid = cpu_to_le16(0x3ff);
1058 tx_info->skb = DMA_DUMMY_DATA;
1059
1060
1061 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1062 tx_info->buf[1].skip_unmap = true;
1063 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1064
1065 return 0;
1066}
1067
1068static void
1069mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
1070{
1071 struct mt7915_sta *msta;
1072 u16 fc, tid;
1073 u32 val;
1074
1075 if (!sta || !sta->ht_cap.ht_supported)
1076 return;
1077
1078 tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
1079 if (tid >= 6)
1080 return;
1081
1082 val = le32_to_cpu(txwi[2]);
1083 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
1084 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
1085 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1086 return;
1087
1088 msta = (struct mt7915_sta *)sta->drv_priv;
1089 if (!test_and_set_bit(tid, &msta->ampdu_state))
1090 ieee80211_start_tx_ba_session(sta, tid, 0);
1091}
1092
1093static void
1094mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
1095{
1096 struct mt7915_txp *txp;
1097 int i;
1098
1099 txp = mt7915_txwi_to_txp(dev, t);
1100 for (i = 0; i < txp->nbuf; i++)
1101 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
1102 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
1103}
1104
1105static void
1106mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
1107 struct ieee80211_sta *sta, struct list_head *free_list)
1108{
1109 struct mt76_dev *mdev = &dev->mt76;
1110 struct mt76_wcid *wcid;
1111 __le32 *txwi;
1112 u16 wcid_idx;
1113
1114 mt7915_txp_skb_unmap(mdev, t);
1115 if (!t->skb)
1116 goto out;
1117
1118 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1119 if (sta) {
1120 wcid = (struct mt76_wcid *)sta->drv_priv;
1121 wcid_idx = wcid->idx;
1122
1123 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1124 mt7915_tx_check_aggr(sta, txwi);
1125 } else {
1126 wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
1127 }
1128
1129 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1130
1131out:
1132 t->skb = NULL;
1133 mt76_put_txwi(mdev, t);
1134}
1135
1136static void
1137mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
1138{
1139 struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
1140 struct mt76_dev *mdev = &dev->mt76;
1141 struct mt76_phy *mphy_ext = mdev->phy2;
1142 struct mt76_txwi_cache *txwi;
1143 struct ieee80211_sta *sta = NULL;
1144 LIST_HEAD(free_list);
1145 struct sk_buff *tmp;
1146 u8 i, count;
1147 bool wake = false;
1148
1149
1150 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1151 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1152 if (mphy_ext) {
1153 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
1154 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
1155 }
1156
1157
1158
1159
1160
1161
1162 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
1163 for (i = 0; i < count; i++) {
1164 u32 msdu, info = le32_to_cpu(free->info[i]);
1165 u8 stat;
1166
1167
1168
1169
1170
1171 if (info & MT_TX_FREE_PAIR) {
1172 struct mt7915_sta *msta;
1173 struct mt7915_phy *phy;
1174 struct mt76_wcid *wcid;
1175 u16 idx;
1176
1177 count++;
1178 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
1179 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1180 sta = wcid_to_sta(wcid);
1181 if (!sta)
1182 continue;
1183
1184 msta = container_of(wcid, struct mt7915_sta, wcid);
1185 phy = msta->vif->phy;
1186 spin_lock_bh(&dev->sta_poll_lock);
1187 if (list_empty(&msta->stats_list))
1188 list_add_tail(&msta->stats_list, &phy->stats_list);
1189 if (list_empty(&msta->poll_list))
1190 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1191 spin_unlock_bh(&dev->sta_poll_lock);
1192 continue;
1193 }
1194
1195 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
1196 stat = FIELD_GET(MT_TX_FREE_STATUS, info);
1197
1198 txwi = mt76_token_release(mdev, msdu, &wake);
1199 if (!txwi)
1200 continue;
1201
1202 mt7915_txwi_free(dev, txwi, sta, &free_list);
1203 }
1204
1205 mt7915_mac_sta_poll(dev);
1206
1207 if (wake)
1208 mt76_set_tx_blocked(&dev->mt76, false);
1209
1210 mt76_worker_schedule(&dev->mt76.tx_worker);
1211
1212 napi_consume_skb(skb, 1);
1213
1214 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1215 skb_list_del_init(skb);
1216 napi_consume_skb(skb, 1);
1217 }
1218}
1219
1220static bool
1221mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
1222 __le32 *txs_data)
1223{
1224 struct mt76_dev *mdev = &dev->mt76;
1225 struct ieee80211_tx_info *info;
1226 struct sk_buff_head list;
1227 struct sk_buff *skb;
1228
1229 mt76_tx_status_lock(mdev, &list);
1230 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1231 if (!skb)
1232 goto out;
1233
1234 info = IEEE80211_SKB_CB(skb);
1235 if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK)))
1236 info->flags |= IEEE80211_TX_STAT_ACK;
1237
1238 info->status.ampdu_len = 1;
1239 info->status.ampdu_ack_len = !!(info->flags &
1240 IEEE80211_TX_STAT_ACK);
1241
1242 info->status.rates[0].idx = -1;
1243 mt76_tx_status_skb_done(mdev, skb, &list);
1244
1245out:
1246 mt76_tx_status_unlock(mdev, &list);
1247
1248 return !!skb;
1249}
1250
1251static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
1252{
1253 struct mt7915_sta *msta = NULL;
1254 struct mt76_wcid *wcid;
1255 __le32 *txs_data = data;
1256 u16 wcidx;
1257 u32 txs;
1258 u8 pid;
1259
1260 txs = le32_to_cpu(txs_data[0]);
1261 if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
1262 return;
1263
1264 txs = le32_to_cpu(txs_data[2]);
1265 wcidx = FIELD_GET(MT_TXS2_WCID, txs);
1266
1267 txs = le32_to_cpu(txs_data[3]);
1268 pid = FIELD_GET(MT_TXS3_PID, txs);
1269
1270 if (pid < MT_PACKET_ID_FIRST)
1271 return;
1272
1273 if (wcidx >= MT7915_WTBL_SIZE)
1274 return;
1275
1276 rcu_read_lock();
1277
1278 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1279 if (!wcid)
1280 goto out;
1281
1282 mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data);
1283
1284 if (!wcid->sta)
1285 goto out;
1286
1287 msta = container_of(wcid, struct mt7915_sta, wcid);
1288 spin_lock_bh(&dev->sta_poll_lock);
1289 if (list_empty(&msta->poll_list))
1290 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1291 spin_unlock_bh(&dev->sta_poll_lock);
1292
1293out:
1294 rcu_read_unlock();
1295}
1296
1297void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1298 struct sk_buff *skb)
1299{
1300 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1301 __le32 *rxd = (__le32 *)skb->data;
1302 __le32 *end = (__le32 *)&skb->data[skb->len];
1303 enum rx_pkt_type type;
1304
1305 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
1306
1307 switch (type) {
1308 case PKT_TYPE_TXRX_NOTIFY:
1309 mt7915_mac_tx_free(dev, skb);
1310 break;
1311 case PKT_TYPE_RX_EVENT:
1312 mt7915_mcu_rx_event(dev, skb);
1313 break;
1314 case PKT_TYPE_TXRXV:
1315 mt7915_mac_fill_rx_vector(dev, skb);
1316 break;
1317 case PKT_TYPE_TXS:
1318 for (rxd += 2; rxd + 8 <= end; rxd += 8)
1319 mt7915_mac_add_txs(dev, rxd);
1320 dev_kfree_skb(skb);
1321 break;
1322 case PKT_TYPE_NORMAL:
1323 if (!mt7915_mac_fill_rx(dev, skb)) {
1324 mt76_rx(&dev->mt76, q, skb);
1325 return;
1326 }
1327 fallthrough;
1328 default:
1329 dev_kfree_skb(skb);
1330 break;
1331 }
1332}
1333
1334void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1335{
1336 struct mt7915_dev *dev;
1337
1338 if (!e->txwi) {
1339 dev_kfree_skb_any(e->skb);
1340 return;
1341 }
1342
1343 dev = container_of(mdev, struct mt7915_dev, mt76);
1344
1345
1346 if (e->skb == DMA_DUMMY_DATA) {
1347 struct mt76_txwi_cache *t;
1348 struct mt7915_txp *txp;
1349
1350 txp = mt7915_txwi_to_txp(mdev, e->txwi);
1351 t = mt76_token_put(mdev, le16_to_cpu(txp->token));
1352 e->skb = t ? t->skb : NULL;
1353 }
1354
1355 if (e->skb)
1356 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1357}
1358
1359void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1360{
1361 struct mt7915_dev *dev = phy->dev;
1362 bool ext_phy = phy != &dev->phy;
1363 u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
1364
1365 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1366 mt76_set(dev, reg, BIT(11) | BIT(9));
1367}
1368
1369void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1370{
1371 struct mt7915_dev *dev = phy->dev;
1372 bool ext_phy = phy != &dev->phy;
1373 int i;
1374
1375 for (i = 0; i < 4; i++) {
1376 mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1377 mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1378 }
1379
1380 if (ext_phy) {
1381 dev->mt76.phy2->survey_time = ktime_get_boottime();
1382 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
1383 } else {
1384 dev->mt76.phy.survey_time = ktime_get_boottime();
1385 i = 0;
1386 }
1387 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
1388
1389
1390 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
1391 MT_WF_RMAC_MIB_RXTIME_CLR);
1392
1393 mt7915_mcu_get_chan_mib_info(phy, true);
1394}
1395
1396void mt7915_mac_set_timing(struct mt7915_phy *phy)
1397{
1398 s16 coverage_class = phy->coverage_class;
1399 struct mt7915_dev *dev = phy->dev;
1400 bool ext_phy = phy != &dev->phy;
1401 u32 val, reg_offset;
1402 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1403 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1404 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1405 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1406 int sifs, offset;
1407 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1408
1409 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1410 return;
1411
1412 if (is_5ghz)
1413 sifs = 16;
1414 else
1415 sifs = 10;
1416
1417 if (ext_phy) {
1418 coverage_class = max_t(s16, dev->phy.coverage_class,
1419 coverage_class);
1420 } else {
1421 struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
1422
1423 if (phy_ext)
1424 coverage_class = max_t(s16, phy_ext->coverage_class,
1425 coverage_class);
1426 }
1427 mt76_set(dev, MT_ARB_SCR(ext_phy),
1428 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1429 udelay(1);
1430
1431 offset = 3 * coverage_class;
1432 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1433 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1434
1435 mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
1436 mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
1437 mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
1438 FIELD_PREP(MT_IFS_EIFS, 360) |
1439 FIELD_PREP(MT_IFS_RIFS, 2) |
1440 FIELD_PREP(MT_IFS_SIFS, sifs) |
1441 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1442
1443 if (phy->slottime < 20 || is_5ghz)
1444 val = MT7915_CFEND_RATE_DEFAULT;
1445 else
1446 val = MT7915_CFEND_RATE_11B;
1447
1448 mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
1449 mt76_clear(dev, MT_ARB_SCR(ext_phy),
1450 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1451}
1452
1453void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
1454{
1455 mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
1456 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1457 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1458
1459 mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
1460 FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1461}
1462
1463static u8
1464mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1465{
1466 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1467 struct mt7915_dev *dev = phy->dev;
1468 u32 val, sum = 0, n = 0;
1469 int nss, i;
1470
1471 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1472 u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
1473
1474 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1475 val = mt76_rr(dev, reg);
1476 sum += val * nf_power[i];
1477 n += val;
1478 }
1479 }
1480
1481 if (!n)
1482 return 0;
1483
1484 return sum / n;
1485}
1486
1487void mt7915_update_channel(struct mt76_phy *mphy)
1488{
1489 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
1490 struct mt76_channel_state *state = mphy->chan_state;
1491 bool ext_phy = phy != &phy->dev->phy;
1492 int nf;
1493
1494 mt7915_mcu_get_chan_mib_info(phy, false);
1495
1496 nf = mt7915_phy_get_nf(phy, ext_phy);
1497 if (!phy->noise)
1498 phy->noise = nf << 4;
1499 else if (nf)
1500 phy->noise += nf - (phy->noise >> 4);
1501
1502 state->noise = -(phy->noise >> 4);
1503}
1504
1505static bool
1506mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1507{
1508 bool ret;
1509
1510 ret = wait_event_timeout(dev->reset_wait,
1511 (READ_ONCE(dev->reset_state) & state),
1512 MT7915_RESET_TIMEOUT);
1513
1514 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1515 return ret;
1516}
1517
1518static void
1519mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1520{
1521 struct ieee80211_hw *hw = priv;
1522
1523 switch (vif->type) {
1524 case NL80211_IFTYPE_MESH_POINT:
1525 case NL80211_IFTYPE_ADHOC:
1526 case NL80211_IFTYPE_AP:
1527 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1528 break;
1529 default:
1530 break;
1531 }
1532}
1533
1534static void
1535mt7915_update_beacons(struct mt7915_dev *dev)
1536{
1537 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1538 IEEE80211_IFACE_ITER_RESUME_ALL,
1539 mt7915_update_vif_beacon, dev->mt76.hw);
1540
1541 if (!dev->mt76.phy2)
1542 return;
1543
1544 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
1545 IEEE80211_IFACE_ITER_RESUME_ALL,
1546 mt7915_update_vif_beacon, dev->mt76.phy2->hw);
1547}
1548
1549static void
1550mt7915_dma_reset(struct mt7915_dev *dev)
1551{
1552 struct mt76_phy *mphy_ext = dev->mt76.phy2;
1553 u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
1554 int i;
1555
1556 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1557 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1558 mt76_clear(dev, MT_WFDMA1_GLO_CFG,
1559 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1560 if (dev->hif2) {
1561 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1562 (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1563 MT_WFDMA0_GLO_CFG_RX_DMA_EN));
1564 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1565 (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1566 MT_WFDMA1_GLO_CFG_RX_DMA_EN));
1567 }
1568
1569 usleep_range(1000, 2000);
1570
1571 for (i = 0; i < __MT_TXQ_MAX; i++) {
1572 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1573 if (mphy_ext)
1574 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
1575 }
1576
1577 for (i = 0; i < __MT_MCUQ_MAX; i++)
1578 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
1579
1580 mt76_for_each_q_rx(&dev->mt76, i)
1581 mt76_queue_rx_reset(dev, i);
1582
1583 mt76_tx_status_check(&dev->mt76, NULL, true);
1584
1585
1586 mt7915_dma_prefetch(dev);
1587
1588 mt76_set(dev, MT_WFDMA0_GLO_CFG,
1589 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1590 mt76_set(dev, MT_WFDMA1_GLO_CFG,
1591 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
1592 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
1593 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
1594 if (dev->hif2) {
1595 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1596 (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1597 MT_WFDMA0_GLO_CFG_RX_DMA_EN));
1598 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1599 (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1600 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
1601 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
1602 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
1603 }
1604}
1605
1606void mt7915_tx_token_put(struct mt7915_dev *dev)
1607{
1608 struct mt76_txwi_cache *txwi;
1609 int id;
1610
1611 spin_lock_bh(&dev->mt76.token_lock);
1612 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1613 mt7915_txwi_free(dev, txwi, NULL, NULL);
1614 dev->mt76.token_count--;
1615 }
1616 spin_unlock_bh(&dev->mt76.token_lock);
1617 idr_destroy(&dev->mt76.token);
1618}
1619
1620
1621void mt7915_mac_reset_work(struct work_struct *work)
1622{
1623 struct mt7915_phy *phy2;
1624 struct mt76_phy *ext_phy;
1625 struct mt7915_dev *dev;
1626
1627 dev = container_of(work, struct mt7915_dev, reset_work);
1628 ext_phy = dev->mt76.phy2;
1629 phy2 = ext_phy ? ext_phy->priv : NULL;
1630
1631 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1632 return;
1633
1634 ieee80211_stop_queues(mt76_hw(dev));
1635 if (ext_phy)
1636 ieee80211_stop_queues(ext_phy->hw);
1637
1638 set_bit(MT76_RESET, &dev->mphy.state);
1639 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1640 wake_up(&dev->mt76.mcu.wait);
1641 cancel_delayed_work_sync(&dev->mphy.mac_work);
1642 if (phy2) {
1643 set_bit(MT76_RESET, &phy2->mt76->state);
1644 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1645 }
1646 mt76_worker_disable(&dev->mt76.tx_worker);
1647 napi_disable(&dev->mt76.napi[0]);
1648 napi_disable(&dev->mt76.napi[1]);
1649 napi_disable(&dev->mt76.napi[2]);
1650 napi_disable(&dev->mt76.tx_napi);
1651
1652 mutex_lock(&dev->mt76.mutex);
1653
1654 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1655
1656 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1657 mt7915_dma_reset(dev);
1658
1659 mt7915_tx_token_put(dev);
1660 idr_init(&dev->mt76.token);
1661
1662 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1663 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1664 }
1665
1666 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1667 clear_bit(MT76_RESET, &dev->mphy.state);
1668 if (phy2)
1669 clear_bit(MT76_RESET, &phy2->mt76->state);
1670
1671 napi_enable(&dev->mt76.napi[0]);
1672 napi_schedule(&dev->mt76.napi[0]);
1673
1674 napi_enable(&dev->mt76.napi[1]);
1675 napi_schedule(&dev->mt76.napi[1]);
1676
1677 napi_enable(&dev->mt76.napi[2]);
1678 napi_schedule(&dev->mt76.napi[2]);
1679 tasklet_schedule(&dev->irq_tasklet);
1680
1681 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1682 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1683
1684 mt76_worker_enable(&dev->mt76.tx_worker);
1685
1686 napi_enable(&dev->mt76.tx_napi);
1687 napi_schedule(&dev->mt76.tx_napi);
1688
1689 ieee80211_wake_queues(mt76_hw(dev));
1690 if (ext_phy)
1691 ieee80211_wake_queues(ext_phy->hw);
1692
1693 mutex_unlock(&dev->mt76.mutex);
1694
1695 mt7915_update_beacons(dev);
1696
1697 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1698 MT7915_WATCHDOG_TIME);
1699 if (phy2)
1700 ieee80211_queue_delayed_work(ext_phy->hw,
1701 &phy2->mt76->mac_work,
1702 MT7915_WATCHDOG_TIME);
1703}
1704
1705static void
1706mt7915_mac_update_stats(struct mt7915_phy *phy)
1707{
1708 struct mt7915_dev *dev = phy->dev;
1709 struct mib_stats *mib = &phy->mib;
1710 bool ext_phy = phy != &dev->phy;
1711 int i, aggr0, aggr1;
1712
1713 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1714 MT_MIB_SDR3_FCS_ERR_MASK);
1715
1716 aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1717 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1718 u32 val;
1719
1720 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
1721 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1722 mib->ack_fail_cnt +=
1723 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1724
1725 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
1726 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1727 mib->rts_retries_cnt +=
1728 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1729
1730 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1731 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1732 dev->mt76.aggr_stats[aggr0++] += val >> 16;
1733
1734 val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1735 dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
1736 dev->mt76.aggr_stats[aggr1++] += val >> 16;
1737 }
1738}
1739
1740static void
1741mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
1742{
1743 struct mt7915_dev *dev = phy->dev;
1744 struct mt7915_sta *msta;
1745 LIST_HEAD(list);
1746
1747 spin_lock_bh(&dev->sta_poll_lock);
1748 list_splice_init(&phy->stats_list, &list);
1749
1750 while (!list_empty(&list)) {
1751 msta = list_first_entry(&list, struct mt7915_sta, stats_list);
1752 list_del_init(&msta->stats_list);
1753 spin_unlock_bh(&dev->sta_poll_lock);
1754
1755
1756 mt7915_mcu_get_tx_rate(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
1757
1758 spin_lock_bh(&dev->sta_poll_lock);
1759 }
1760
1761 spin_unlock_bh(&dev->sta_poll_lock);
1762}
1763
1764void mt7915_mac_sta_rc_work(struct work_struct *work)
1765{
1766 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1767 struct ieee80211_sta *sta;
1768 struct ieee80211_vif *vif;
1769 struct mt7915_sta *msta;
1770 u32 changed;
1771 LIST_HEAD(list);
1772
1773 spin_lock_bh(&dev->sta_poll_lock);
1774 list_splice_init(&dev->sta_rc_list, &list);
1775
1776 while (!list_empty(&list)) {
1777 msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1778 list_del_init(&msta->rc_list);
1779 changed = msta->stats.changed;
1780 msta->stats.changed = 0;
1781 spin_unlock_bh(&dev->sta_poll_lock);
1782
1783 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1784 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1785
1786 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1787 IEEE80211_RC_NSS_CHANGED |
1788 IEEE80211_RC_BW_CHANGED)) {
1789 mt7915_mcu_add_he(dev, vif, sta);
1790 mt7915_mcu_add_rate_ctrl(dev, vif, sta);
1791 }
1792
1793 if (changed & IEEE80211_RC_SMPS_CHANGED)
1794 mt7915_mcu_add_smps(dev, vif, sta);
1795
1796 spin_lock_bh(&dev->sta_poll_lock);
1797 }
1798
1799 spin_unlock_bh(&dev->sta_poll_lock);
1800}
1801
1802void mt7915_mac_work(struct work_struct *work)
1803{
1804 struct mt7915_phy *phy;
1805 struct mt76_phy *mphy;
1806
1807 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1808 mac_work.work);
1809 phy = mphy->priv;
1810
1811 mutex_lock(&mphy->dev->mutex);
1812
1813 mt76_update_survey(mphy);
1814 if (++mphy->mac_work_count == 5) {
1815 mphy->mac_work_count = 0;
1816
1817 mt7915_mac_update_stats(phy);
1818 }
1819
1820 if (++phy->sta_work_count == 10) {
1821 phy->sta_work_count = 0;
1822 mt7915_mac_sta_stats_work(phy);
1823 }
1824
1825 mutex_unlock(&mphy->dev->mutex);
1826
1827 mt76_tx_status_check(mphy->dev, NULL, false);
1828
1829 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1830 MT7915_WATCHDOG_TIME);
1831}
1832
1833static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
1834{
1835 struct mt7915_dev *dev = phy->dev;
1836
1837 if (phy->rdd_state & BIT(0))
1838 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
1839 if (phy->rdd_state & BIT(1))
1840 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
1841}
1842
1843static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
1844{
1845 int err;
1846
1847 err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
1848 if (err < 0)
1849 return err;
1850
1851 return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
1852}
1853
1854static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
1855{
1856 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1857 struct mt7915_dev *dev = phy->dev;
1858 bool ext_phy = phy != &dev->phy;
1859 int err;
1860
1861
1862 err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
1863 if (err < 0)
1864 return err;
1865
1866 err = mt7915_dfs_start_rdd(dev, ext_phy);
1867 if (err < 0)
1868 return err;
1869
1870 phy->rdd_state |= BIT(ext_phy);
1871
1872 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1873 chandef->width == NL80211_CHAN_WIDTH_80P80) {
1874 err = mt7915_dfs_start_rdd(dev, 1);
1875 if (err < 0)
1876 return err;
1877
1878 phy->rdd_state |= BIT(1);
1879 }
1880
1881 return 0;
1882}
1883
1884static int
1885mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
1886{
1887 const struct mt7915_dfs_radar_spec *radar_specs;
1888 struct mt7915_dev *dev = phy->dev;
1889 int err, i;
1890
1891 switch (dev->mt76.region) {
1892 case NL80211_DFS_FCC:
1893 radar_specs = &fcc_radar_specs;
1894 err = mt7915_mcu_set_fcc5_lpn(dev, 8);
1895 if (err < 0)
1896 return err;
1897 break;
1898 case NL80211_DFS_ETSI:
1899 radar_specs = &etsi_radar_specs;
1900 break;
1901 case NL80211_DFS_JP:
1902 radar_specs = &jp_radar_specs;
1903 break;
1904 default:
1905 return -EINVAL;
1906 }
1907
1908 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
1909 err = mt7915_mcu_set_radar_th(dev, i,
1910 &radar_specs->radar_pattern[i]);
1911 if (err < 0)
1912 return err;
1913 }
1914
1915 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
1916}
1917
1918int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
1919{
1920 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1921 struct mt7915_dev *dev = phy->dev;
1922 bool ext_phy = phy != &dev->phy;
1923 int err;
1924
1925 if (dev->mt76.region == NL80211_DFS_UNSET) {
1926 phy->dfs_state = -1;
1927 if (phy->rdd_state)
1928 goto stop;
1929
1930 return 0;
1931 }
1932
1933 if (test_bit(MT76_SCANNING, &phy->mt76->state))
1934 return 0;
1935
1936 if (phy->dfs_state == chandef->chan->dfs_state)
1937 return 0;
1938
1939 err = mt7915_dfs_init_radar_specs(phy);
1940 if (err < 0) {
1941 phy->dfs_state = -1;
1942 goto stop;
1943 }
1944
1945 phy->dfs_state = chandef->chan->dfs_state;
1946
1947 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
1948 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
1949 return mt7915_dfs_start_radar_detector(phy);
1950
1951 return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
1952 MT_RX_SEL0, 0);
1953 }
1954
1955stop:
1956 err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
1957 MT_RX_SEL0, 0);
1958 if (err < 0)
1959 return err;
1960
1961 mt7915_dfs_stop_radar_detector(phy);
1962 return 0;
1963}
1964