1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "mt7601u.h"
16#include "trace.h"
17
18enum mt76_txq_id {
19 MT_TXQ_VO = IEEE80211_AC_VO,
20 MT_TXQ_VI = IEEE80211_AC_VI,
21 MT_TXQ_BE = IEEE80211_AC_BE,
22 MT_TXQ_BK = IEEE80211_AC_BK,
23 MT_TXQ_PSD,
24 MT_TXQ_MCU,
25 __MT_TXQ_MAX
26};
27
28
29static u8 q2hwq(u8 q)
30{
31 return q ^ 0x3;
32}
33
34
35static u8 skb2q(struct sk_buff *skb)
36{
37 int qid = skb_get_queue_mapping(skb);
38
39 if (WARN_ON(qid >= MT_TXQ_PSD)) {
40 qid = MT_TXQ_BE;
41 skb_set_queue_mapping(skb, qid);
42 }
43
44 return q2hwq(qid);
45}
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
64{
65 u8 encoded = (rate + 1) + is_probe * 8;
66
67
68
69
70
71
72 if (is_probe && rate == 7)
73 return encoded - 7;
74
75 return encoded;
76}
77
78static void
79mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
80{
81 u8 req_rate = stat->pktid;
82 u8 eff_rate = stat->rate & 0x7;
83
84 req_rate -= 1;
85
86 if (req_rate > 7) {
87 stat->is_probe = true;
88 req_rate -= 8;
89
90
91 if (!req_rate && eff_rate)
92 req_rate = 7;
93 }
94
95 stat->retry = req_rate - eff_rate;
96}
97
98static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
99 struct ieee80211_tx_info *info)
100{
101 int pkt_len = (unsigned long)info->status.status_driver_data[0];
102
103 skb_pull(skb, sizeof(struct mt76_txwi) + 4);
104 if (ieee80211_get_hdrlen_from_skb(skb) % 4)
105 mt76_remove_hdr_pad(skb);
106
107 skb_trim(skb, pkt_len);
108}
109
110void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
111{
112 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
113
114 mt7601u_tx_skb_remove_dma_overhead(skb, info);
115
116 ieee80211_tx_info_clear_status(info);
117 info->status.rates[0].idx = -1;
118 info->flags |= IEEE80211_TX_STAT_ACK;
119
120 spin_lock(&dev->mac_lock);
121 ieee80211_tx_status(dev->hw, skb);
122 spin_unlock(&dev->mac_lock);
123}
124
125static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
126{
127 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
128 u32 need_head;
129
130 need_head = sizeof(struct mt76_txwi) + 4;
131 if (hdr_len % 4)
132 need_head += 2;
133
134 return skb_cow(skb, need_head);
135}
136
137static struct mt76_txwi *
138mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
139 struct ieee80211_sta *sta, struct mt76_wcid *wcid,
140 int pkt_len)
141{
142 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
143 struct ieee80211_tx_rate *rate = &info->control.rates[0];
144 struct mt76_txwi *txwi;
145 unsigned long flags;
146 bool is_probe;
147 u32 pkt_id;
148 u16 rate_ctl;
149 u8 nss;
150
151 txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
152 memset(txwi, 0, sizeof(*txwi));
153
154 if (!wcid->tx_rate_set)
155 ieee80211_get_tx_rates(info->control.vif, sta, skb,
156 info->control.rates, 1);
157
158 spin_lock_irqsave(&dev->lock, flags);
159 if (rate->idx < 0 || !rate->count)
160 rate_ctl = wcid->tx_rate;
161 else
162 rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
163 spin_unlock_irqrestore(&dev->lock, flags);
164 txwi->rate_ctl = cpu_to_le16(rate_ctl);
165
166 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
167 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
168 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
169 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
170
171 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
172 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
173
174 ba_size <<= sta->ht_cap.ampdu_factor;
175 ba_size = min_t(int, 63, ba_size);
176 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
177 ba_size = 0;
178 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
179
180 txwi->flags =
181 cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
182 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
183 sta->ht_cap.ampdu_density));
184 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
185 txwi->flags = 0;
186 }
187
188 txwi->wcid = wcid->idx;
189
190 is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
191 pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
192 pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
193 txwi->len_ctl = cpu_to_le16(pkt_len);
194
195 return txwi;
196}
197
198void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
199 struct sk_buff *skb)
200{
201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
202 struct mt7601u_dev *dev = hw->priv;
203 struct ieee80211_vif *vif = info->control.vif;
204 struct ieee80211_sta *sta = control->sta;
205 struct mt76_sta *msta = NULL;
206 struct mt76_wcid *wcid = dev->mon_wcid;
207 struct mt76_txwi *txwi;
208 int pkt_len = skb->len;
209 int hw_q = skb2q(skb);
210
211 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
212 info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
213
214 if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
215 ieee80211_free_txskb(dev->hw, skb);
216 return;
217 }
218
219 if (sta) {
220 msta = (struct mt76_sta *) sta->drv_priv;
221 wcid = &msta->wcid;
222 } else if (vif) {
223 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
224
225 wcid = &mvif->group_wcid;
226 }
227
228 txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
229
230 if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
231 return;
232
233 trace_mt_tx(dev, skb, msta, txwi);
234}
235
236void mt7601u_tx_stat(struct work_struct *work)
237{
238 struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
239 stat_work.work);
240 struct mt76_tx_status stat;
241 unsigned long flags;
242 int cleaned = 0;
243
244 while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
245 stat = mt7601u_mac_fetch_tx_status(dev);
246 if (!stat.valid)
247 break;
248
249 mt7601u_tx_pktid_dec(dev, &stat);
250 mt76_send_tx_status(dev, &stat);
251
252 cleaned++;
253 }
254 trace_mt_tx_status_cleaned(dev, cleaned);
255
256 spin_lock_irqsave(&dev->tx_lock, flags);
257 if (cleaned)
258 queue_delayed_work(dev->stat_wq, &dev->stat_work,
259 msecs_to_jiffies(10));
260 else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
261 queue_delayed_work(dev->stat_wq, &dev->stat_work,
262 msecs_to_jiffies(20));
263 else
264 clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
265 spin_unlock_irqrestore(&dev->tx_lock, flags);
266}
267
268int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
269 u16 queue, const struct ieee80211_tx_queue_params *params)
270{
271 struct mt7601u_dev *dev = hw->priv;
272 u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
273 u32 val;
274
275
276
277
278
279 if (params->cw_min)
280 cw_min = fls(params->cw_min);
281 if (params->cw_max)
282 cw_max = fls(params->cw_max);
283
284 WARN_ON(params->txop > 0xff);
285 WARN_ON(params->aifs > 0xf);
286 WARN_ON(cw_min > 0xf);
287 WARN_ON(cw_max > 0xf);
288
289 val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
290 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
291 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
292
293
294
295
296 if (!hw_q)
297 val |= 0x60;
298 else
299 val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
300 mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
301
302 val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
303 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
304 val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
305 mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
306
307 val = mt76_rr(dev, MT_WMM_AIFSN);
308 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
309 val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
310 mt76_wr(dev, MT_WMM_AIFSN, val);
311
312 val = mt76_rr(dev, MT_WMM_CWMIN);
313 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
314 val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
315 mt76_wr(dev, MT_WMM_CWMIN, val);
316
317 val = mt76_rr(dev, MT_WMM_CWMAX);
318 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
319 val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
320 mt76_wr(dev, MT_WMM_CWMAX, val);
321
322 return 0;
323}
324