1
2
3
4
5
6#include "mt7601u.h"
7#include "dma.h"
8#include "usb.h"
9#include "trace.h"
10
11static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
13
14static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
15{
16 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17 unsigned int hdrlen;
18
19 if (unlikely(len < 10))
20 return 0;
21 hdrlen = ieee80211_hdrlen(hdr->frame_control);
22 if (unlikely(hdrlen > len))
23 return 0;
24 return hdrlen;
25}
26
27static struct sk_buff *
28mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29 void *data, u32 seg_len, u32 truesize, struct page *p)
30{
31 struct sk_buff *skb;
32 u32 true_len, hdr_len = 0, copy, frag;
33
34 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35 if (!skb)
36 return NULL;
37
38 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39 if (!true_len || true_len > seg_len)
40 goto bad_frame;
41
42 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
43 if (!hdr_len)
44 goto bad_frame;
45
46 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47 skb_put_data(skb, data, hdr_len);
48
49 data += hdr_len + 2;
50 true_len -= hdr_len;
51 hdr_len = 0;
52 }
53
54
55 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56 frag = true_len - copy;
57
58 skb_put_data(skb, data, copy);
59 data += copy;
60
61 if (frag) {
62 skb_add_rx_frag(skb, 0, p, data - page_address(p),
63 frag, truesize);
64 get_page(p);
65 }
66
67 return skb;
68
69bad_frame:
70 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
71 true_len, hdr_len);
72 dev_kfree_skb(skb);
73 return NULL;
74}
75
76static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77 u32 seg_len, struct page *p)
78{
79 struct sk_buff *skb;
80 struct mt7601u_rxwi *rxwi;
81 u32 fce_info, truesize = seg_len;
82
83
84
85
86 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
87 seg_len -= MT_FCE_INFO_LEN;
88
89 data += MT_DMA_HDR_LEN;
90 seg_len -= MT_DMA_HDR_LEN;
91
92 rxwi = (struct mt7601u_rxwi *) data;
93 data += sizeof(struct mt7601u_rxwi);
94 seg_len -= sizeof(struct mt7601u_rxwi);
95
96 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
97 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
98 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
99 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
100
101 trace_mt_rx(dev, rxwi, fce_info);
102
103 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
104 if (!skb)
105 return;
106
107 spin_lock(&dev->mac_lock);
108 ieee80211_rx(dev->hw, skb);
109 spin_unlock(&dev->mac_lock);
110}
111
112static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
113{
114 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
115 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
116 u16 dma_len = get_unaligned_le16(data);
117
118 if (data_len < min_seg_len ||
119 WARN_ON_ONCE(!dma_len) ||
120 WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
121 WARN_ON_ONCE(dma_len & 0x3))
122 return 0;
123
124 return MT_DMA_HDRS + dma_len;
125}
126
127static void
128mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
129{
130 u32 seg_len, data_len = e->urb->actual_length;
131 u8 *data = page_address(e->p);
132 struct page *new_p = NULL;
133 int cnt = 0;
134
135 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
136 return;
137
138
139 if (data_len > 512)
140 new_p = dev_alloc_pages(MT_RX_ORDER);
141
142 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
143 mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
144
145 data_len -= seg_len;
146 data += seg_len;
147 cnt++;
148 }
149
150 if (cnt > 1)
151 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
152
153 if (new_p) {
154
155 __free_pages(e->p, MT_RX_ORDER);
156
157 e->p = new_p;
158 }
159}
160
161static struct mt7601u_dma_buf_rx *
162mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
163{
164 struct mt7601u_rx_queue *q = &dev->rx_q;
165 struct mt7601u_dma_buf_rx *buf = NULL;
166 unsigned long flags;
167
168 spin_lock_irqsave(&dev->rx_lock, flags);
169
170 if (!q->pending)
171 goto out;
172
173 buf = &q->e[q->start];
174 q->pending--;
175 q->start = (q->start + 1) % q->entries;
176out:
177 spin_unlock_irqrestore(&dev->rx_lock, flags);
178
179 return buf;
180}
181
182static void mt7601u_complete_rx(struct urb *urb)
183{
184 struct mt7601u_dev *dev = urb->context;
185 struct mt7601u_rx_queue *q = &dev->rx_q;
186 unsigned long flags;
187
188 spin_lock_irqsave(&dev->rx_lock, flags);
189
190 if (mt7601u_urb_has_error(urb))
191 dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
192 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
193 goto out;
194
195 q->end = (q->end + 1) % q->entries;
196 q->pending++;
197 tasklet_schedule(&dev->rx_tasklet);
198out:
199 spin_unlock_irqrestore(&dev->rx_lock, flags);
200}
201
202static void mt7601u_rx_tasklet(unsigned long data)
203{
204 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
205 struct mt7601u_dma_buf_rx *e;
206
207 while ((e = mt7601u_rx_get_pending_entry(dev))) {
208 if (e->urb->status)
209 continue;
210
211 mt7601u_rx_process_entry(dev, e);
212 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
213 }
214}
215
216static void mt7601u_complete_tx(struct urb *urb)
217{
218 struct mt7601u_tx_queue *q = urb->context;
219 struct mt7601u_dev *dev = q->dev;
220 struct sk_buff *skb;
221 unsigned long flags;
222
223 spin_lock_irqsave(&dev->tx_lock, flags);
224
225 if (mt7601u_urb_has_error(urb))
226 dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
227 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
228 goto out;
229
230 skb = q->e[q->start].skb;
231 trace_mt_tx_dma_done(dev, skb);
232
233 __skb_queue_tail(&dev->tx_skb_done, skb);
234 tasklet_schedule(&dev->tx_tasklet);
235
236 if (q->used == q->entries - q->entries / 8)
237 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
238
239 q->start = (q->start + 1) % q->entries;
240 q->used--;
241out:
242 spin_unlock_irqrestore(&dev->tx_lock, flags);
243}
244
245static void mt7601u_tx_tasklet(unsigned long data)
246{
247 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
248 struct sk_buff_head skbs;
249 unsigned long flags;
250
251 __skb_queue_head_init(&skbs);
252
253 spin_lock_irqsave(&dev->tx_lock, flags);
254
255 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
256 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
257 queue_delayed_work(dev->stat_wq, &dev->stat_work,
258 msecs_to_jiffies(10));
259
260 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
261
262 spin_unlock_irqrestore(&dev->tx_lock, flags);
263
264 while (!skb_queue_empty(&skbs)) {
265 struct sk_buff *skb = __skb_dequeue(&skbs);
266
267 mt7601u_tx_status(dev, skb);
268 }
269}
270
271static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
272 struct sk_buff *skb, u8 ep)
273{
274 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
275 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
276 struct mt7601u_dma_buf_tx *e;
277 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
278 unsigned long flags;
279 int ret;
280
281 spin_lock_irqsave(&dev->tx_lock, flags);
282
283 if (WARN_ON(q->entries <= q->used)) {
284 ret = -ENOSPC;
285 goto out;
286 }
287
288 e = &q->e[q->end];
289 e->skb = skb;
290 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
291 mt7601u_complete_tx, q);
292 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
293 if (ret) {
294
295
296
297 if (ret == -ENODEV)
298 set_bit(MT7601U_STATE_REMOVED, &dev->state);
299 else
300 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
301 ret);
302 goto out;
303 }
304
305 q->end = (q->end + 1) % q->entries;
306 q->used++;
307
308 if (q->used >= q->entries)
309 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
310out:
311 spin_unlock_irqrestore(&dev->tx_lock, flags);
312
313 return ret;
314}
315
316
317static u8 q2ep(u8 qid)
318{
319
320 return qid + 1;
321}
322
323
324static enum mt76_qsel ep2dmaq(u8 ep)
325{
326 if (ep == 5)
327 return MT_QSEL_MGMT;
328 return MT_QSEL_EDCA;
329}
330
331int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
332 struct mt76_wcid *wcid, int hw_q)
333{
334 u8 ep = q2ep(hw_q);
335 u32 dma_flags;
336 int ret;
337
338 dma_flags = MT_TXD_PKT_INFO_80211;
339 if (wcid->hw_key_idx == 0xff)
340 dma_flags |= MT_TXD_PKT_INFO_WIV;
341
342 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
343 if (ret)
344 return ret;
345
346 ret = mt7601u_dma_submit_tx(dev, skb, ep);
347 if (ret) {
348 ieee80211_free_txskb(dev->hw, skb);
349 return ret;
350 }
351
352 return 0;
353}
354
355static void mt7601u_kill_rx(struct mt7601u_dev *dev)
356{
357 int i;
358 unsigned long flags;
359
360 spin_lock_irqsave(&dev->rx_lock, flags);
361
362 for (i = 0; i < dev->rx_q.entries; i++) {
363 int next = dev->rx_q.end;
364
365 spin_unlock_irqrestore(&dev->rx_lock, flags);
366 usb_poison_urb(dev->rx_q.e[next].urb);
367 spin_lock_irqsave(&dev->rx_lock, flags);
368 }
369
370 spin_unlock_irqrestore(&dev->rx_lock, flags);
371}
372
373static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
374 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
375{
376 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
377 u8 *buf = page_address(e->p);
378 unsigned pipe;
379 int ret;
380
381 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
382
383 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
384 mt7601u_complete_rx, dev);
385
386 trace_mt_submit_urb(dev, e->urb);
387 ret = usb_submit_urb(e->urb, gfp);
388 if (ret)
389 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
390
391 return ret;
392}
393
394static int mt7601u_submit_rx(struct mt7601u_dev *dev)
395{
396 int i, ret;
397
398 for (i = 0; i < dev->rx_q.entries; i++) {
399 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
400 if (ret)
401 return ret;
402 }
403
404 return 0;
405}
406
407static void mt7601u_free_rx(struct mt7601u_dev *dev)
408{
409 int i;
410
411 for (i = 0; i < dev->rx_q.entries; i++) {
412 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
413 usb_free_urb(dev->rx_q.e[i].urb);
414 }
415}
416
417static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
418{
419 int i;
420
421 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
422 dev->rx_q.dev = dev;
423 dev->rx_q.entries = N_RX_ENTRIES;
424
425 for (i = 0; i < N_RX_ENTRIES; i++) {
426 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
427 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
428
429 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
430 return -ENOMEM;
431 }
432
433 return 0;
434}
435
436static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
437{
438 int i;
439
440 WARN_ON(q->used);
441
442 for (i = 0; i < q->entries; i++) {
443 usb_poison_urb(q->e[i].urb);
444 usb_free_urb(q->e[i].urb);
445 }
446}
447
448static void mt7601u_free_tx(struct mt7601u_dev *dev)
449{
450 int i;
451
452 if (!dev->tx_q)
453 return;
454
455 for (i = 0; i < __MT_EP_OUT_MAX; i++)
456 mt7601u_free_tx_queue(&dev->tx_q[i]);
457}
458
459static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
460 struct mt7601u_tx_queue *q)
461{
462 int i;
463
464 q->dev = dev;
465 q->entries = N_TX_ENTRIES;
466
467 for (i = 0; i < N_TX_ENTRIES; i++) {
468 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
469 if (!q->e[i].urb)
470 return -ENOMEM;
471 }
472
473 return 0;
474}
475
476static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
477{
478 int i;
479
480 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
481 sizeof(*dev->tx_q), GFP_KERNEL);
482 if (!dev->tx_q)
483 return -ENOMEM;
484
485 for (i = 0; i < __MT_EP_OUT_MAX; i++)
486 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
487 return -ENOMEM;
488
489 return 0;
490}
491
492int mt7601u_dma_init(struct mt7601u_dev *dev)
493{
494 int ret = -ENOMEM;
495
496 tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
497 tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
498
499 ret = mt7601u_alloc_tx(dev);
500 if (ret)
501 goto err;
502 ret = mt7601u_alloc_rx(dev);
503 if (ret)
504 goto err;
505
506 ret = mt7601u_submit_rx(dev);
507 if (ret)
508 goto err;
509
510 return 0;
511err:
512 mt7601u_dma_cleanup(dev);
513 return ret;
514}
515
516void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
517{
518 mt7601u_kill_rx(dev);
519
520 tasklet_kill(&dev->rx_tasklet);
521
522 mt7601u_free_rx(dev);
523 mt7601u_free_tx(dev);
524
525 tasklet_kill(&dev->tx_tasklet);
526}
527