1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/etherdevice.h>
19#include <net/ieee80211_radiotap.h>
20#include <linux/if_arp.h>
21#include <linux/moduleparam.h>
22#include <linux/ip.h>
23#include <linux/ipv6.h>
24#include <net/ipv6.h>
25#include <linux/prefetch.h>
26
27#include "wil6210.h"
28#include "wmi.h"
29#include "txrx.h"
30#include "trace.h"
31
32static bool rtap_include_phy_info;
33module_param(rtap_include_phy_info, bool, 0444);
34MODULE_PARM_DESC(rtap_include_phy_info,
35 " Include PHY info in the radiotap header, default - no");
36
37bool rx_align_2;
38module_param(rx_align_2, bool, 0444);
39MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
40
41bool rx_large_buf;
42module_param(rx_large_buf, bool, 0444);
43MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
44
45static inline uint wil_rx_snaplen(void)
46{
47 return rx_align_2 ? 6 : 0;
48}
49
50static inline int wil_vring_is_empty(struct vring *vring)
51{
52 return vring->swhead == vring->swtail;
53}
54
55static inline u32 wil_vring_next_tail(struct vring *vring)
56{
57 return (vring->swtail + 1) % vring->size;
58}
59
60static inline void wil_vring_advance_head(struct vring *vring, int n)
61{
62 vring->swhead = (vring->swhead + n) % vring->size;
63}
64
65static inline int wil_vring_is_full(struct vring *vring)
66{
67 return wil_vring_next_tail(vring) == vring->swhead;
68}
69
70
71static inline int wil_vring_used_tx(struct vring *vring)
72{
73 u32 swhead = vring->swhead;
74 u32 swtail = vring->swtail;
75 return (vring->size + swhead - swtail) % vring->size;
76}
77
78
79static inline int wil_vring_avail_tx(struct vring *vring)
80{
81 return vring->size - wil_vring_used_tx(vring) - 1;
82}
83
84
85static inline int wil_vring_wmark_low(struct vring *vring)
86{
87 return vring->size/8;
88}
89
90
91static inline int wil_vring_wmark_high(struct vring *vring)
92{
93 return vring->size/4;
94}
95
96
97static inline int wil_vring_avail_low(struct vring *vring)
98{
99 return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
100}
101
102
103static inline int wil_vring_avail_high(struct vring *vring)
104{
105 return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
106}
107
108
109bool wil_is_tx_idle(struct wil6210_priv *wil)
110{
111 int i;
112 unsigned long data_comp_to;
113
114 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
115 struct vring *vring = &wil->vring_tx[i];
116 int vring_index = vring - wil->vring_tx;
117 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
118
119 spin_lock(&txdata->lock);
120
121 if (!vring->va || !txdata->enabled) {
122 spin_unlock(&txdata->lock);
123 continue;
124 }
125
126 data_comp_to = jiffies + msecs_to_jiffies(
127 WIL_DATA_COMPLETION_TO_MS);
128 if (test_bit(wil_status_napi_en, wil->status)) {
129 while (!wil_vring_is_empty(vring)) {
130 if (time_after(jiffies, data_comp_to)) {
131 wil_dbg_pm(wil,
132 "TO waiting for idle tx\n");
133 spin_unlock(&txdata->lock);
134 return false;
135 }
136 wil_dbg_ratelimited(wil,
137 "tx vring is not empty -> NAPI\n");
138 spin_unlock(&txdata->lock);
139 napi_synchronize(&wil->napi_tx);
140 msleep(20);
141 spin_lock(&txdata->lock);
142 if (!vring->va || !txdata->enabled)
143 break;
144 }
145 }
146
147 spin_unlock(&txdata->lock);
148 }
149
150 return true;
151}
152
153
154static inline bool wil_val_in_range(int val, int min, int max)
155{
156 return val >= min && val < max;
157}
158
159static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
160{
161 struct device *dev = wil_to_dev(wil);
162 size_t sz = vring->size * sizeof(vring->va[0]);
163 uint i;
164
165 wil_dbg_misc(wil, "vring_alloc:\n");
166
167 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
168
169 vring->swhead = 0;
170 vring->swtail = 0;
171 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
172 if (!vring->ctx) {
173 vring->va = NULL;
174 return -ENOMEM;
175 }
176
177
178
179
180
181
182
183
184
185
186
187
188
189 if (wil->dma_addr_size > 32)
190 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
191
192 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
193 if (!vring->va) {
194 kfree(vring->ctx);
195 vring->ctx = NULL;
196 return -ENOMEM;
197 }
198
199 if (wil->dma_addr_size > 32)
200 dma_set_mask_and_coherent(dev,
201 DMA_BIT_MASK(wil->dma_addr_size));
202
203
204
205
206
207 for (i = 0; i < vring->size; i++) {
208 volatile struct vring_tx_desc *_d = &vring->va[i].tx;
209
210 _d->dma.status = TX_DMA_STATUS_DU;
211 }
212
213 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
214 vring->va, &vring->pa, vring->ctx);
215
216 return 0;
217}
218
219static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
220 struct wil_ctx *ctx)
221{
222 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
223 u16 dmalen = le16_to_cpu(d->dma.length);
224
225 switch (ctx->mapped_as) {
226 case wil_mapped_as_single:
227 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
228 break;
229 case wil_mapped_as_page:
230 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
231 break;
232 default:
233 break;
234 }
235}
236
237static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
238 int tx)
239{
240 struct device *dev = wil_to_dev(wil);
241 size_t sz = vring->size * sizeof(vring->va[0]);
242
243 lockdep_assert_held(&wil->mutex);
244 if (tx) {
245 int vring_index = vring - wil->vring_tx;
246
247 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
248 vring_index, vring->size, vring->va,
249 &vring->pa, vring->ctx);
250 } else {
251 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
252 vring->size, vring->va,
253 &vring->pa, vring->ctx);
254 }
255
256 while (!wil_vring_is_empty(vring)) {
257 dma_addr_t pa;
258 u16 dmalen;
259 struct wil_ctx *ctx;
260
261 if (tx) {
262 struct vring_tx_desc dd, *d = ⅆ
263 volatile struct vring_tx_desc *_d =
264 &vring->va[vring->swtail].tx;
265
266 ctx = &vring->ctx[vring->swtail];
267 if (!ctx) {
268 wil_dbg_txrx(wil,
269 "ctx(%d) was already completed\n",
270 vring->swtail);
271 vring->swtail = wil_vring_next_tail(vring);
272 continue;
273 }
274 *d = *_d;
275 wil_txdesc_unmap(dev, d, ctx);
276 if (ctx->skb)
277 dev_kfree_skb_any(ctx->skb);
278 vring->swtail = wil_vring_next_tail(vring);
279 } else {
280 struct vring_rx_desc dd, *d = ⅆ
281 volatile struct vring_rx_desc *_d =
282 &vring->va[vring->swhead].rx;
283
284 ctx = &vring->ctx[vring->swhead];
285 *d = *_d;
286 pa = wil_desc_addr(&d->dma.addr);
287 dmalen = le16_to_cpu(d->dma.length);
288 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
289 kfree_skb(ctx->skb);
290 wil_vring_advance_head(vring, 1);
291 }
292 }
293 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
294 kfree(vring->ctx);
295 vring->pa = 0;
296 vring->va = NULL;
297 vring->ctx = NULL;
298}
299
300
301
302
303
304
305static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
306 u32 i, int headroom)
307{
308 struct device *dev = wil_to_dev(wil);
309 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
310 struct vring_rx_desc dd, *d = ⅆ
311 volatile struct vring_rx_desc *_d = &vring->va[i].rx;
312 dma_addr_t pa;
313 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
314
315 if (unlikely(!skb))
316 return -ENOMEM;
317
318 skb_reserve(skb, headroom);
319 skb_put(skb, sz);
320
321 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
322 if (unlikely(dma_mapping_error(dev, pa))) {
323 kfree_skb(skb);
324 return -ENOMEM;
325 }
326
327 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
328 wil_desc_addr_set(&d->dma.addr, pa);
329
330
331
332 d->dma.status = 0;
333 d->dma.length = cpu_to_le16(sz);
334 *_d = *d;
335 vring->ctx[i].skb = skb;
336
337 return 0;
338}
339
340
341
342
343
344
345
346
347
348
349static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
350 struct sk_buff *skb)
351{
352 struct wil6210_rtap {
353 struct ieee80211_radiotap_header rthdr;
354
355
356 u8 flags;
357
358 __le16 chnl_freq __aligned(2);
359 __le16 chnl_flags;
360
361 u8 mcs_present;
362 u8 mcs_flags;
363 u8 mcs_index;
364 } __packed;
365 struct wil6210_rtap_vendor {
366 struct wil6210_rtap rtap;
367
368 u8 vendor_oui[3] __aligned(2);
369 u8 vendor_ns;
370 __le16 vendor_skip;
371 u8 vendor_data[0];
372 } __packed;
373 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
374 struct wil6210_rtap_vendor *rtap_vendor;
375 int rtap_len = sizeof(struct wil6210_rtap);
376 int phy_length = 0;
377 static char phy_data[128];
378 struct ieee80211_channel *ch = wil->monitor_chandef.chan;
379
380 if (rtap_include_phy_info) {
381 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
382
383 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
384
385
386
387
388
389
390 int len = min_t(int, 8 + sizeof(phy_data),
391 wil_rxdesc_phy_length(d));
392
393 if (len > 8) {
394 void *p = skb_tail_pointer(skb);
395 void *pa = PTR_ALIGN(p, 8);
396
397 if (skb_tailroom(skb) >= len + (pa - p)) {
398 phy_length = len - 8;
399 memcpy(phy_data, pa, phy_length);
400 }
401 }
402 }
403 rtap_len += phy_length;
404 }
405
406 if (skb_headroom(skb) < rtap_len &&
407 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
408 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
409 return;
410 }
411
412 rtap_vendor = skb_push(skb, rtap_len);
413 memset(rtap_vendor, 0, rtap_len);
414
415 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
416 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
417 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
418 (1 << IEEE80211_RADIOTAP_FLAGS) |
419 (1 << IEEE80211_RADIOTAP_CHANNEL) |
420 (1 << IEEE80211_RADIOTAP_MCS));
421 if (d->dma.status & RX_DMA_STATUS_ERROR)
422 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
423
424 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
425 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
426
427 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
428 rtap_vendor->rtap.mcs_flags = 0;
429 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
430
431 if (rtap_include_phy_info) {
432 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
433 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
434
435 rtap_vendor->vendor_oui[0] = 0x04;
436 rtap_vendor->vendor_oui[1] = 0xce;
437 rtap_vendor->vendor_oui[2] = 0x14;
438 rtap_vendor->vendor_ns = 1;
439
440 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
441 phy_length);
442 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
443 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
444 phy_length);
445 }
446}
447
448
449static inline int wil_is_back_req(u8 fc)
450{
451 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
452 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
453}
454
455bool wil_is_rx_idle(struct wil6210_priv *wil)
456{
457 struct vring_rx_desc *_d;
458 struct vring *vring = &wil->vring_rx;
459
460 _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
461 if (_d->dma.status & RX_DMA_STATUS_DU)
462 return false;
463
464 return true;
465}
466
467
468
469
470
471
472
473
474static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
475 struct vring *vring)
476{
477 struct device *dev = wil_to_dev(wil);
478 struct wil6210_vif *vif;
479 struct net_device *ndev;
480 volatile struct vring_rx_desc *_d;
481 struct vring_rx_desc *d;
482 struct sk_buff *skb;
483 dma_addr_t pa;
484 unsigned int snaplen = wil_rx_snaplen();
485 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
486 u16 dmalen;
487 u8 ftype;
488 int cid, mid;
489 int i;
490 struct wil_net_stats *stats;
491
492 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
493
494again:
495 if (unlikely(wil_vring_is_empty(vring)))
496 return NULL;
497
498 i = (int)vring->swhead;
499 _d = &vring->va[i].rx;
500 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
501
502 return NULL;
503 }
504
505 skb = vring->ctx[i].skb;
506 vring->ctx[i].skb = NULL;
507 wil_vring_advance_head(vring, 1);
508 if (!skb) {
509 wil_err(wil, "No Rx skb at [%d]\n", i);
510 goto again;
511 }
512 d = wil_skb_rxdesc(skb);
513 *d = *_d;
514 pa = wil_desc_addr(&d->dma.addr);
515
516 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
517 dmalen = le16_to_cpu(d->dma.length);
518
519 trace_wil6210_rx(i, d);
520 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
521 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
522 (const void *)d, sizeof(*d), false);
523
524 cid = wil_rxdesc_cid(d);
525 mid = wil_rxdesc_mid(d);
526 vif = wil->vifs[mid];
527
528 if (unlikely(!vif)) {
529 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
530 mid);
531 kfree_skb(skb);
532 goto again;
533 }
534 ndev = vif_to_ndev(vif);
535 stats = &wil->sta[cid].stats;
536
537 if (unlikely(dmalen > sz)) {
538 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
539 stats->rx_large_frame++;
540 kfree_skb(skb);
541 goto again;
542 }
543 skb_trim(skb, dmalen);
544
545 prefetch(skb->data);
546
547 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
548 skb->data, skb_headlen(skb), false);
549
550 stats->last_mcs_rx = wil_rxdesc_mcs(d);
551 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
552 stats->rx_per_mcs[stats->last_mcs_rx]++;
553
554
555 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
556 wil_rx_add_radiotap_header(wil, skb);
557
558
559 if (ndev->type != ARPHRD_ETHER)
560 return skb;
561
562
563
564
565 ftype = wil_rxdesc_ftype(d) << 2;
566 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
567 u8 fc1 = wil_rxdesc_fc1(d);
568 int tid = wil_rxdesc_tid(d);
569 u16 seq = wil_rxdesc_seq(d);
570
571 wil_dbg_txrx(wil,
572 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
573 fc1, mid, cid, tid, seq);
574 stats->rx_non_data_frame++;
575 if (wil_is_back_req(fc1)) {
576 wil_dbg_txrx(wil,
577 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
578 mid, cid, tid, seq);
579 wil_rx_bar(wil, vif, cid, tid, seq);
580 } else {
581
582
583
584 wil_dbg_txrx(wil,
585 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
586 fc1, mid, cid, tid, seq);
587 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
588 (const void *)d, sizeof(*d), false);
589 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
590 skb->data, skb_headlen(skb), false);
591 }
592 kfree_skb(skb);
593 goto again;
594 }
595
596 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
597 wil_err(wil, "Short frame, len = %d\n", skb->len);
598 stats->rx_short_frame++;
599 kfree_skb(skb);
600 goto again;
601 }
602
603
604
605
606
607 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
608
609 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
610 skb->ip_summed = CHECKSUM_UNNECESSARY;
611
612
613
614
615
616 }
617
618 if (snaplen) {
619
620
621
622
623
624
625 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
626 skb_pull(skb, snaplen);
627 }
628
629 return skb;
630}
631
632
633
634
635
636
637
638
639
640
641static int wil_rx_refill(struct wil6210_priv *wil, int count)
642{
643 struct net_device *ndev = wil->main_ndev;
644 struct vring *v = &wil->vring_rx;
645 u32 next_tail;
646 int rc = 0;
647 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
648 WIL6210_RTAP_SIZE : 0;
649
650 for (; next_tail = wil_vring_next_tail(v),
651 (next_tail != v->swhead) && (count-- > 0);
652 v->swtail = next_tail) {
653 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
654 if (unlikely(rc)) {
655 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
656 rc, v->swtail);
657 break;
658 }
659 }
660
661
662
663
664 wmb();
665
666 wil_w(wil, v->hwtail, v->swtail);
667
668 return rc;
669}
670
671
672
673
674
675
676
677
678
679
680static int reverse_memcmp(const void *cs, const void *ct, size_t count)
681{
682 const unsigned char *su1, *su2;
683 int res = 0;
684
685 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
686 --su1, --su2, count--) {
687 res = *su1 - *su2;
688 if (res)
689 break;
690 }
691 return res;
692}
693
694static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
695{
696 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
697 int cid = wil_rxdesc_cid(d);
698 int tid = wil_rxdesc_tid(d);
699 int key_id = wil_rxdesc_key_id(d);
700 int mc = wil_rxdesc_mcast(d);
701 struct wil_sta_info *s = &wil->sta[cid];
702 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
703 &s->tid_crypto_rx[tid];
704 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
705 const u8 *pn = (u8 *)&d->mac.pn_15_0;
706
707 if (!cc->key_set) {
708 wil_err_ratelimited(wil,
709 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
710 cid, tid, mc, key_id);
711 return -EINVAL;
712 }
713
714 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
715 wil_err_ratelimited(wil,
716 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
717 cid, tid, mc, key_id, pn, cc->pn);
718 return -EINVAL;
719 }
720 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
721
722 return 0;
723}
724
725
726
727
728
729void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
730{
731 gro_result_t rc = GRO_NORMAL;
732 struct wil6210_vif *vif = ndev_to_vif(ndev);
733 struct wil6210_priv *wil = ndev_to_wil(ndev);
734 struct wireless_dev *wdev = vif_to_wdev(vif);
735 unsigned int len = skb->len;
736 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
737 int cid = wil_rxdesc_cid(d);
738 int security = wil_rxdesc_security(d);
739 struct ethhdr *eth = (void *)skb->data;
740
741
742
743 int mcast = is_multicast_ether_addr(eth->h_dest);
744 struct wil_net_stats *stats = &wil->sta[cid].stats;
745 struct sk_buff *xmit_skb = NULL;
746 static const char * const gro_res_str[] = {
747 [GRO_MERGED] = "GRO_MERGED",
748 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
749 [GRO_HELD] = "GRO_HELD",
750 [GRO_NORMAL] = "GRO_NORMAL",
751 [GRO_DROP] = "GRO_DROP",
752 };
753
754 if (ndev->features & NETIF_F_RXHASH)
755
756
757
758
759
760 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
761
762 skb_orphan(skb);
763
764 if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
765 rc = GRO_DROP;
766 dev_kfree_skb(skb);
767 stats->rx_replay++;
768 goto stats;
769 }
770
771 if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
772 if (mcast) {
773
774
775
776 xmit_skb = skb_copy(skb, GFP_ATOMIC);
777 } else {
778 int xmit_cid = wil_find_cid(wil, vif->mid,
779 eth->h_dest);
780
781 if (xmit_cid >= 0) {
782
783
784
785
786
787 xmit_skb = skb;
788 skb = NULL;
789 }
790 }
791 }
792 if (xmit_skb) {
793
794
795
796
797 xmit_skb->dev = ndev;
798 xmit_skb->priority += 256;
799 xmit_skb->protocol = htons(ETH_P_802_3);
800 skb_reset_network_header(xmit_skb);
801 skb_reset_mac_header(xmit_skb);
802 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
803 dev_queue_xmit(xmit_skb);
804 }
805
806 if (skb) {
807 skb->protocol = eth_type_trans(skb, ndev);
808 skb->dev = ndev;
809 rc = napi_gro_receive(&wil->napi_rx, skb);
810 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
811 len, gro_res_str[rc]);
812 }
813stats:
814
815 if (unlikely(rc == GRO_DROP)) {
816 ndev->stats.rx_dropped++;
817 stats->rx_dropped++;
818 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
819 } else {
820 ndev->stats.rx_packets++;
821 stats->rx_packets++;
822 ndev->stats.rx_bytes += len;
823 stats->rx_bytes += len;
824 if (mcast)
825 ndev->stats.multicast++;
826 }
827}
828
829
830
831
832
833
834void wil_rx_handle(struct wil6210_priv *wil, int *quota)
835{
836 struct net_device *ndev = wil->main_ndev;
837 struct wireless_dev *wdev = ndev->ieee80211_ptr;
838 struct vring *v = &wil->vring_rx;
839 struct sk_buff *skb;
840
841 if (unlikely(!v->va)) {
842 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
843 return;
844 }
845 wil_dbg_txrx(wil, "rx_handle\n");
846 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
847 (*quota)--;
848
849
850 if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
851 skb->dev = ndev;
852 skb_reset_mac_header(skb);
853 skb->ip_summed = CHECKSUM_UNNECESSARY;
854 skb->pkt_type = PACKET_OTHERHOST;
855 skb->protocol = htons(ETH_P_802_2);
856 wil_netif_rx_any(skb, ndev);
857 } else {
858 wil_rx_reorder(wil, skb);
859 }
860 }
861 wil_rx_refill(wil, v->size);
862}
863
864static void wil_rx_buf_len_init(struct wil6210_priv *wil)
865{
866 wil->rx_buf_len = rx_large_buf ?
867 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
868 if (mtu_max > wil->rx_buf_len) {
869
870
871
872
873 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
874 wil->rx_buf_len = mtu_max;
875 }
876}
877
878int wil_rx_init(struct wil6210_priv *wil, u16 size)
879{
880 struct vring *vring = &wil->vring_rx;
881 int rc;
882
883 wil_dbg_misc(wil, "rx_init\n");
884
885 if (vring->va) {
886 wil_err(wil, "Rx ring already allocated\n");
887 return -EINVAL;
888 }
889
890 wil_rx_buf_len_init(wil);
891
892 vring->size = size;
893 rc = wil_vring_alloc(wil, vring);
894 if (rc)
895 return rc;
896
897 rc = wmi_rx_chain_add(wil, vring);
898 if (rc)
899 goto err_free;
900
901 rc = wil_rx_refill(wil, vring->size);
902 if (rc)
903 goto err_free;
904
905 return 0;
906 err_free:
907 wil_vring_free(wil, vring, 0);
908
909 return rc;
910}
911
912void wil_rx_fini(struct wil6210_priv *wil)
913{
914 struct vring *vring = &wil->vring_rx;
915
916 wil_dbg_misc(wil, "rx_fini\n");
917
918 if (vring->va)
919 wil_vring_free(wil, vring, 0);
920}
921
922static inline void wil_tx_data_init(struct vring_tx_data *txdata)
923{
924 spin_lock_bh(&txdata->lock);
925 txdata->dot1x_open = 0;
926 txdata->enabled = 0;
927 txdata->idle = 0;
928 txdata->last_idle = 0;
929 txdata->begin = 0;
930 txdata->agg_wsize = 0;
931 txdata->agg_timeout = 0;
932 txdata->agg_amsdu = 0;
933 txdata->addba_in_progress = false;
934 txdata->mid = U8_MAX;
935 spin_unlock_bh(&txdata->lock);
936}
937
938int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
939 int cid, int tid)
940{
941 struct wil6210_priv *wil = vif_to_wil(vif);
942 int rc;
943 struct wmi_vring_cfg_cmd cmd = {
944 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
945 .vring_cfg = {
946 .tx_sw_ring = {
947 .max_mpdu_size =
948 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
949 .ring_size = cpu_to_le16(size),
950 },
951 .ringid = id,
952 .cidxtid = mk_cidxtid(cid, tid),
953 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
954 .mac_ctrl = 0,
955 .to_resolution = 0,
956 .agg_max_wsize = 0,
957 .schd_params = {
958 .priority = cpu_to_le16(0),
959 .timeslot_us = cpu_to_le16(0xfff),
960 },
961 },
962 };
963 struct {
964 struct wmi_cmd_hdr wmi;
965 struct wmi_vring_cfg_done_event cmd;
966 } __packed reply = {
967 .cmd = {.status = WMI_FW_STATUS_FAILURE},
968 };
969 struct vring *vring = &wil->vring_tx[id];
970 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
971
972 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
973 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
974 lockdep_assert_held(&wil->mutex);
975
976 if (vring->va) {
977 wil_err(wil, "Tx ring [%d] already allocated\n", id);
978 rc = -EINVAL;
979 goto out;
980 }
981
982 wil_tx_data_init(txdata);
983 vring->size = size;
984 rc = wil_vring_alloc(wil, vring);
985 if (rc)
986 goto out;
987
988 wil->vring2cid_tid[id][0] = cid;
989 wil->vring2cid_tid[id][1] = tid;
990
991 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
992
993 if (!vif->privacy)
994 txdata->dot1x_open = true;
995 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
996 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
997 if (rc)
998 goto out_free;
999
1000 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1001 wil_err(wil, "Tx config failed, status 0x%02x\n",
1002 reply.cmd.status);
1003 rc = -EINVAL;
1004 goto out_free;
1005 }
1006
1007 spin_lock_bh(&txdata->lock);
1008 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1009 txdata->mid = vif->mid;
1010 txdata->enabled = 1;
1011 spin_unlock_bh(&txdata->lock);
1012
1013 if (txdata->dot1x_open && (agg_wsize >= 0))
1014 wil_addba_tx_request(wil, id, agg_wsize);
1015
1016 return 0;
1017 out_free:
1018 spin_lock_bh(&txdata->lock);
1019 txdata->dot1x_open = false;
1020 txdata->enabled = 0;
1021 spin_unlock_bh(&txdata->lock);
1022 wil_vring_free(wil, vring, 1);
1023 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
1024 wil->vring2cid_tid[id][1] = 0;
1025
1026 out:
1027
1028 return rc;
1029}
1030
1031int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1032{
1033 struct wil6210_priv *wil = vif_to_wil(vif);
1034 int rc;
1035 struct wmi_bcast_vring_cfg_cmd cmd = {
1036 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
1037 .vring_cfg = {
1038 .tx_sw_ring = {
1039 .max_mpdu_size =
1040 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1041 .ring_size = cpu_to_le16(size),
1042 },
1043 .ringid = id,
1044 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1045 },
1046 };
1047 struct {
1048 struct wmi_cmd_hdr wmi;
1049 struct wmi_vring_cfg_done_event cmd;
1050 } __packed reply = {
1051 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1052 };
1053 struct vring *vring = &wil->vring_tx[id];
1054 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
1055
1056 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1057 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1058 lockdep_assert_held(&wil->mutex);
1059
1060 if (vring->va) {
1061 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1062 rc = -EINVAL;
1063 goto out;
1064 }
1065
1066 wil_tx_data_init(txdata);
1067 vring->size = size;
1068 rc = wil_vring_alloc(wil, vring);
1069 if (rc)
1070 goto out;
1071
1072 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
1073 wil->vring2cid_tid[id][1] = 0;
1074
1075 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1076
1077 if (!vif->privacy)
1078 txdata->dot1x_open = true;
1079 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
1080 &cmd, sizeof(cmd),
1081 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
1082 if (rc)
1083 goto out_free;
1084
1085 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1086 wil_err(wil, "Tx config failed, status 0x%02x\n",
1087 reply.cmd.status);
1088 rc = -EINVAL;
1089 goto out_free;
1090 }
1091
1092 spin_lock_bh(&txdata->lock);
1093 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1094 txdata->mid = vif->mid;
1095 txdata->enabled = 1;
1096 spin_unlock_bh(&txdata->lock);
1097
1098 return 0;
1099 out_free:
1100 spin_lock_bh(&txdata->lock);
1101 txdata->enabled = 0;
1102 txdata->dot1x_open = false;
1103 spin_unlock_bh(&txdata->lock);
1104 wil_vring_free(wil, vring, 1);
1105 out:
1106
1107 return rc;
1108}
1109
1110void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
1111{
1112 struct vring *vring = &wil->vring_tx[id];
1113 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
1114
1115 lockdep_assert_held(&wil->mutex);
1116
1117 if (!vring->va)
1118 return;
1119
1120 wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
1121
1122 spin_lock_bh(&txdata->lock);
1123 txdata->dot1x_open = false;
1124 txdata->mid = U8_MAX;
1125 txdata->enabled = 0;
1126 spin_unlock_bh(&txdata->lock);
1127
1128
1129
1130
1131
1132
1133 wmb();
1134
1135 if (test_bit(wil_status_napi_en, wil->status))
1136 napi_synchronize(&wil->napi_tx);
1137
1138 wil_vring_free(wil, vring, 1);
1139}
1140
1141static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
1142 struct wil6210_vif *vif,
1143 struct sk_buff *skb)
1144{
1145 int i;
1146 struct ethhdr *eth = (void *)skb->data;
1147 int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
1148
1149 if (cid < 0)
1150 return NULL;
1151
1152
1153 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
1154 if (!wil->vring_tx_data[i].dot1x_open &&
1155 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1156 continue;
1157 if (wil->vring2cid_tid[i][0] == cid) {
1158 struct vring *v = &wil->vring_tx[i];
1159 struct vring_tx_data *txdata = &wil->vring_tx_data[i];
1160
1161 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1162 eth->h_dest, i);
1163 if (v->va && txdata->enabled) {
1164 return v;
1165 } else {
1166 wil_dbg_txrx(wil,
1167 "find_tx_ucast: vring[%d] not valid\n",
1168 i);
1169 return NULL;
1170 }
1171 }
1172 }
1173
1174 return NULL;
1175}
1176
1177static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1178 struct vring *vring, struct sk_buff *skb);
1179
1180static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
1181 struct wil6210_vif *vif,
1182 struct sk_buff *skb)
1183{
1184 struct vring *v;
1185 int i;
1186 u8 cid;
1187 struct vring_tx_data *txdata;
1188
1189
1190
1191
1192
1193 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1194 v = &wil->vring_tx[i];
1195 txdata = &wil->vring_tx_data[i];
1196 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1197 continue;
1198
1199 cid = wil->vring2cid_tid[i][0];
1200 if (cid >= WIL6210_MAX_CID)
1201 continue;
1202
1203 if (!wil->vring_tx_data[i].dot1x_open &&
1204 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1205 continue;
1206
1207 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1208
1209 return v;
1210 }
1211
1212 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1213
1214 return NULL;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1229 struct wil6210_vif *vif,
1230 struct sk_buff *skb)
1231{
1232 struct vring *v;
1233 struct vring_tx_data *txdata;
1234 int i = vif->bcast_vring;
1235
1236 if (i < 0)
1237 return NULL;
1238 v = &wil->vring_tx[i];
1239 txdata = &wil->vring_tx_data[i];
1240 if (!v->va || !txdata->enabled)
1241 return NULL;
1242 if (!wil->vring_tx_data[i].dot1x_open &&
1243 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1244 return NULL;
1245
1246 return v;
1247}
1248
1249static void wil_set_da_for_vring(struct wil6210_priv *wil,
1250 struct sk_buff *skb, int vring_index)
1251{
1252 struct ethhdr *eth = (void *)skb->data;
1253 int cid = wil->vring2cid_tid[vring_index][0];
1254
1255 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
1256}
1257
1258static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1259 struct wil6210_vif *vif,
1260 struct sk_buff *skb)
1261{
1262 struct vring *v, *v2;
1263 struct sk_buff *skb2;
1264 int i;
1265 u8 cid;
1266 struct ethhdr *eth = (void *)skb->data;
1267 char *src = eth->h_source;
1268 struct vring_tx_data *txdata, *txdata2;
1269
1270
1271 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1272 v = &wil->vring_tx[i];
1273 txdata = &wil->vring_tx_data[i];
1274 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1275 continue;
1276
1277 cid = wil->vring2cid_tid[i][0];
1278 if (cid >= WIL6210_MAX_CID)
1279 continue;
1280 if (!wil->vring_tx_data[i].dot1x_open &&
1281 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1282 continue;
1283
1284
1285 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1286 continue;
1287
1288 goto found;
1289 }
1290
1291 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1292
1293 return NULL;
1294
1295found:
1296 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1297 wil_set_da_for_vring(wil, skb, i);
1298
1299
1300 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1301 v2 = &wil->vring_tx[i];
1302 txdata2 = &wil->vring_tx_data[i];
1303 if (!v2->va || txdata2->mid != vif->mid)
1304 continue;
1305 cid = wil->vring2cid_tid[i][0];
1306 if (cid >= WIL6210_MAX_CID)
1307 continue;
1308 if (!wil->vring_tx_data[i].dot1x_open &&
1309 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
1310 continue;
1311
1312 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1313 continue;
1314
1315 skb2 = skb_copy(skb, GFP_ATOMIC);
1316 if (skb2) {
1317 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1318 wil_set_da_for_vring(wil, skb2, i);
1319 wil_tx_vring(wil, vif, v2, skb2);
1320 } else {
1321 wil_err(wil, "skb_copy failed\n");
1322 }
1323 }
1324
1325 return v;
1326}
1327
1328static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
1329 int vring_index)
1330{
1331 wil_desc_addr_set(&d->dma.addr, pa);
1332 d->dma.ip_length = 0;
1333
1334 d->dma.b11 = 0;
1335 d->dma.error = 0;
1336 d->dma.status = 0;
1337 d->dma.length = cpu_to_le16((u16)len);
1338 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1339 d->mac.d[0] = 0;
1340 d->mac.d[1] = 0;
1341 d->mac.d[2] = 0;
1342 d->mac.ucode_cmd = 0;
1343
1344 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1345 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1346
1347 return 0;
1348}
1349
1350static inline
1351void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1352{
1353 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1364 struct sk_buff *skb,
1365 int tso_desc_type, bool is_ipv4,
1366 int tcp_hdr_len, int skb_net_hdr_len)
1367{
1368 d->dma.b11 = ETH_HLEN;
1369 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1370
1371 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1372
1373 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1374
1375
1376 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1377 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1378 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1379
1380 d->dma.ip_length = skb_net_hdr_len;
1381
1382 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1383
1384 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1385}
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1398 struct sk_buff *skb){
1399 int protocol;
1400
1401 if (skb->ip_summed != CHECKSUM_PARTIAL)
1402 return 0;
1403
1404 d->dma.b11 = ETH_HLEN;
1405
1406 switch (skb->protocol) {
1407 case cpu_to_be16(ETH_P_IP):
1408 protocol = ip_hdr(skb)->protocol;
1409 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1410 break;
1411 case cpu_to_be16(ETH_P_IPV6):
1412 protocol = ipv6_hdr(skb)->nexthdr;
1413 break;
1414 default:
1415 return -EINVAL;
1416 }
1417
1418 switch (protocol) {
1419 case IPPROTO_TCP:
1420 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1421
1422 d->dma.d0 |=
1423 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1424 break;
1425 case IPPROTO_UDP:
1426
1427 d->dma.d0 |=
1428 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1429 break;
1430 default:
1431 return -EINVAL;
1432 }
1433
1434 d->dma.ip_length = skb_network_header_len(skb);
1435
1436 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1437
1438 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1439
1440 return 0;
1441}
1442
1443static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1444{
1445 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1446 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1447 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1448}
1449
1450static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1451{
1452 d->dma.d0 |= wil_tso_type_lst <<
1453 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1454}
1455
1456static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1457 struct vring *vring, struct sk_buff *skb)
1458{
1459 struct device *dev = wil_to_dev(wil);
1460
1461
1462 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1463 *_first_desc = NULL;
1464
1465
1466 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1467 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1468 *first_desc = &first_desc_mem;
1469
1470
1471 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1472
1473 int descs_used = 0;
1474 int sg_desc_cnt = 0;
1475
1476 u32 swhead = vring->swhead;
1477 int used, avail = wil_vring_avail_tx(vring);
1478 int nr_frags = skb_shinfo(skb)->nr_frags;
1479 int min_desc_required = nr_frags + 1;
1480 int mss = skb_shinfo(skb)->gso_size;
1481 int f, len, hdrlen, headlen;
1482 int vring_index = vring - wil->vring_tx;
1483 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1484 uint i = swhead;
1485 dma_addr_t pa;
1486 const skb_frag_t *frag = NULL;
1487 int rem_data = mss;
1488 int lenmss;
1489 int hdr_compensation_need = true;
1490 int desc_tso_type = wil_tso_type_first;
1491 bool is_ipv4;
1492 int tcp_hdr_len;
1493 int skb_net_hdr_len;
1494 int gso_type;
1495 int rc = -EINVAL;
1496
1497 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1498 vring_index);
1499
1500 if (unlikely(!txdata->enabled))
1501 return -EINVAL;
1502
1503
1504
1505
1506
1507
1508 if (unlikely(avail < min_desc_required)) {
1509 wil_err_ratelimited(wil,
1510 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1511 vring_index, min_desc_required);
1512 return -ENOMEM;
1513 }
1514
1515
1516 hdrlen = ETH_HLEN +
1517 (int)skb_network_header_len(skb) +
1518 tcp_hdrlen(skb);
1519
1520 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1521 switch (gso_type) {
1522 case SKB_GSO_TCPV4:
1523
1524
1525
1526 ip_hdr(skb)->tot_len = 0;
1527 ip_hdr(skb)->check = 0;
1528 is_ipv4 = true;
1529 break;
1530 case SKB_GSO_TCPV6:
1531
1532 ipv6_hdr(skb)->payload_len = 0;
1533 is_ipv4 = false;
1534 break;
1535 default:
1536
1537
1538
1539 return -EINVAL;
1540 }
1541
1542 if (skb->ip_summed != CHECKSUM_PARTIAL)
1543 return -EINVAL;
1544
1545
1546
1547
1548 tcp_hdr_len = tcp_hdrlen(skb);
1549 skb_net_hdr_len = skb_network_header_len(skb);
1550
1551 _hdr_desc = &vring->va[i].tx;
1552
1553 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1554 if (unlikely(dma_mapping_error(dev, pa))) {
1555 wil_err(wil, "TSO: Skb head DMA map error\n");
1556 goto err_exit;
1557 }
1558
1559 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
1560 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1561 tcp_hdr_len, skb_net_hdr_len);
1562 wil_tx_last_desc(hdr_desc);
1563
1564 vring->ctx[i].mapped_as = wil_mapped_as_single;
1565 hdr_ctx = &vring->ctx[i];
1566
1567 descs_used++;
1568 headlen = skb_headlen(skb) - hdrlen;
1569
1570 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1571 if (headlen) {
1572 len = headlen;
1573 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1574 len);
1575 } else {
1576 frag = &skb_shinfo(skb)->frags[f];
1577 len = frag->size;
1578 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1579 }
1580
1581 while (len) {
1582 wil_dbg_txrx(wil,
1583 "TSO: len %d, rem_data %d, descs_used %d\n",
1584 len, rem_data, descs_used);
1585
1586 if (descs_used == avail) {
1587 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1588 rc = -ENOMEM;
1589 goto mem_error;
1590 }
1591
1592 lenmss = min_t(int, rem_data, len);
1593 i = (swhead + descs_used) % vring->size;
1594 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1595
1596 if (!headlen) {
1597 pa = skb_frag_dma_map(dev, frag,
1598 frag->size - len, lenmss,
1599 DMA_TO_DEVICE);
1600 vring->ctx[i].mapped_as = wil_mapped_as_page;
1601 } else {
1602 pa = dma_map_single(dev,
1603 skb->data +
1604 skb_headlen(skb) - headlen,
1605 lenmss,
1606 DMA_TO_DEVICE);
1607 vring->ctx[i].mapped_as = wil_mapped_as_single;
1608 headlen -= lenmss;
1609 }
1610
1611 if (unlikely(dma_mapping_error(dev, pa))) {
1612 wil_err(wil, "TSO: DMA map page error\n");
1613 goto mem_error;
1614 }
1615
1616 _desc = &vring->va[i].tx;
1617
1618 if (!_first_desc) {
1619 _first_desc = _desc;
1620 first_ctx = &vring->ctx[i];
1621 d = first_desc;
1622 } else {
1623 d = &desc_mem;
1624 }
1625
1626 wil_tx_desc_map(d, pa, lenmss, vring_index);
1627 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1628 is_ipv4, tcp_hdr_len,
1629 skb_net_hdr_len);
1630
1631
1632 desc_tso_type = wil_tso_type_mid;
1633
1634 descs_used++;
1635 sg_desc_cnt++;
1636 len -= lenmss;
1637 rem_data -= lenmss;
1638
1639 wil_dbg_txrx(wil,
1640 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1641 len, rem_data, descs_used, sg_desc_cnt);
1642
1643
1644 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1645 if (hdr_compensation_need) {
1646
1647
1648
1649 hdr_ctx->nr_frags = sg_desc_cnt;
1650 wil_tx_desc_set_nr_frags(first_desc,
1651 sg_desc_cnt +
1652 1);
1653 hdr_compensation_need = false;
1654 } else {
1655 wil_tx_desc_set_nr_frags(first_desc,
1656 sg_desc_cnt);
1657 }
1658 first_ctx->nr_frags = sg_desc_cnt - 1;
1659
1660 wil_tx_last_desc(d);
1661
1662
1663
1664
1665
1666 if (first_desc != d)
1667 *_first_desc = *first_desc;
1668
1669
1670
1671
1672 if (f < nr_frags - 1 || len > 0)
1673 *_desc = *d;
1674
1675 rem_data = mss;
1676 _first_desc = NULL;
1677 sg_desc_cnt = 0;
1678 } else if (first_desc != d)
1679 *_desc = *d;
1680 }
1681 }
1682
1683
1684
1685
1686 if (_first_desc == _desc)
1687 d = first_desc;
1688
1689
1690 wil_set_tx_desc_last_tso(d);
1691 *_desc = *d;
1692
1693
1694 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1695 *_hdr_desc = *hdr_desc;
1696
1697
1698
1699
1700
1701 vring->ctx[i].skb = skb_get(skb);
1702
1703
1704 used = wil_vring_used_tx(vring);
1705 if (wil_val_in_range(wil->vring_idle_trsh,
1706 used, used + descs_used)) {
1707 txdata->idle += get_cycles() - txdata->last_idle;
1708 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1709 vring_index, used, used + descs_used);
1710 }
1711
1712
1713
1714
1715
1716
1717 wmb();
1718
1719
1720 wil_vring_advance_head(vring, descs_used);
1721 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1722
1723
1724
1725
1726 wmb();
1727
1728 wil_w(wil, vring->hwtail, vring->swhead);
1729 return 0;
1730
1731mem_error:
1732 while (descs_used > 0) {
1733 struct wil_ctx *ctx;
1734
1735 i = (swhead + descs_used - 1) % vring->size;
1736 d = (struct vring_tx_desc *)&vring->va[i].tx;
1737 _desc = &vring->va[i].tx;
1738 *d = *_desc;
1739 _desc->dma.status = TX_DMA_STATUS_DU;
1740 ctx = &vring->ctx[i];
1741 wil_txdesc_unmap(dev, d, ctx);
1742 memset(ctx, 0, sizeof(*ctx));
1743 descs_used--;
1744 }
1745err_exit:
1746 return rc;
1747}
1748
1749static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1750 struct vring *vring, struct sk_buff *skb)
1751{
1752 struct device *dev = wil_to_dev(wil);
1753 struct vring_tx_desc dd, *d = ⅆ
1754 volatile struct vring_tx_desc *_d;
1755 u32 swhead = vring->swhead;
1756 int avail = wil_vring_avail_tx(vring);
1757 int nr_frags = skb_shinfo(skb)->nr_frags;
1758 uint f = 0;
1759 int vring_index = vring - wil->vring_tx;
1760 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1761 uint i = swhead;
1762 dma_addr_t pa;
1763 int used;
1764 bool mcast = (vring_index == vif->bcast_vring);
1765 uint len = skb_headlen(skb);
1766
1767 wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
1768 vring_index);
1769
1770 if (unlikely(!txdata->enabled))
1771 return -EINVAL;
1772
1773 if (unlikely(avail < 1 + nr_frags)) {
1774 wil_err_ratelimited(wil,
1775 "Tx ring[%2d] full. No space for %d fragments\n",
1776 vring_index, 1 + nr_frags);
1777 return -ENOMEM;
1778 }
1779 _d = &vring->va[i].tx;
1780
1781 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1782
1783 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
1784 skb_headlen(skb), skb->data, &pa);
1785 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1786 skb->data, skb_headlen(skb), false);
1787
1788 if (unlikely(dma_mapping_error(dev, pa)))
1789 return -EINVAL;
1790 vring->ctx[i].mapped_as = wil_mapped_as_single;
1791
1792 wil_tx_desc_map(d, pa, len, vring_index);
1793 if (unlikely(mcast)) {
1794 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS);
1795 if (unlikely(len > WIL_BCAST_MCS0_LIMIT))
1796 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1797 }
1798
1799 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1800 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1801 vring_index);
1802 goto dma_error;
1803 }
1804
1805 vring->ctx[i].nr_frags = nr_frags;
1806 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1807
1808
1809 for (; f < nr_frags; f++) {
1810 const struct skb_frag_struct *frag =
1811 &skb_shinfo(skb)->frags[f];
1812 int len = skb_frag_size(frag);
1813
1814 *_d = *d;
1815 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1816 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1817 (const void *)d, sizeof(*d), false);
1818 i = (swhead + f + 1) % vring->size;
1819 _d = &vring->va[i].tx;
1820 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1821 DMA_TO_DEVICE);
1822 if (unlikely(dma_mapping_error(dev, pa))) {
1823 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1824 vring_index);
1825 goto dma_error;
1826 }
1827 vring->ctx[i].mapped_as = wil_mapped_as_page;
1828 wil_tx_desc_map(d, pa, len, vring_index);
1829
1830
1831
1832
1833 wil_tx_desc_offload_setup(d, skb);
1834 }
1835
1836 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1837 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1838 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1839 *_d = *d;
1840 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1841 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1842 (const void *)d, sizeof(*d), false);
1843
1844
1845
1846
1847
1848 vring->ctx[i].skb = skb_get(skb);
1849
1850
1851 used = wil_vring_used_tx(vring);
1852 if (wil_val_in_range(wil->vring_idle_trsh,
1853 used, used + nr_frags + 1)) {
1854 txdata->idle += get_cycles() - txdata->last_idle;
1855 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1856 vring_index, used, used + nr_frags + 1);
1857 }
1858
1859
1860
1861
1862
1863
1864 wmb();
1865
1866
1867 wil_vring_advance_head(vring, nr_frags + 1);
1868 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
1869 vring->swhead);
1870 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
1871
1872
1873
1874
1875 wmb();
1876
1877 wil_w(wil, vring->hwtail, vring->swhead);
1878
1879 return 0;
1880 dma_error:
1881
1882 nr_frags = f + 1;
1883 for (f = 0; f < nr_frags; f++) {
1884 struct wil_ctx *ctx;
1885
1886 i = (swhead + f) % vring->size;
1887 ctx = &vring->ctx[i];
1888 _d = &vring->va[i].tx;
1889 *d = *_d;
1890 _d->dma.status = TX_DMA_STATUS_DU;
1891 wil_txdesc_unmap(dev, d, ctx);
1892
1893 memset(ctx, 0, sizeof(*ctx));
1894 }
1895
1896 return -EINVAL;
1897}
1898
1899static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1900 struct vring *vring, struct sk_buff *skb)
1901{
1902 int vring_index = vring - wil->vring_tx;
1903 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1904 int rc;
1905
1906 spin_lock(&txdata->lock);
1907
1908 if (test_bit(wil_status_suspending, wil->status) ||
1909 test_bit(wil_status_suspended, wil->status) ||
1910 test_bit(wil_status_resuming, wil->status)) {
1911 wil_dbg_txrx(wil,
1912 "suspend/resume in progress. drop packet\n");
1913 spin_unlock(&txdata->lock);
1914 return -EINVAL;
1915 }
1916
1917 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
1918 (wil, vif, vring, skb);
1919
1920 spin_unlock(&txdata->lock);
1921
1922 return rc;
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1943 struct wil6210_vif *vif,
1944 struct vring *vring,
1945 bool check_stop)
1946{
1947 int i;
1948
1949 if (unlikely(!vif))
1950 return;
1951
1952 if (vring)
1953 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
1954 (int)(vring - wil->vring_tx), vif->mid, check_stop,
1955 vif->net_queue_stopped);
1956 else
1957 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
1958 check_stop, vif->mid, vif->net_queue_stopped);
1959
1960 if (check_stop == vif->net_queue_stopped)
1961
1962 return;
1963
1964 if (check_stop) {
1965 if (!vring || unlikely(wil_vring_avail_low(vring))) {
1966
1967 netif_tx_stop_all_queues(vif_to_ndev(vif));
1968 vif->net_queue_stopped = true;
1969 wil_dbg_txrx(wil, "netif_tx_stop called\n");
1970 }
1971 return;
1972 }
1973
1974
1975 if (test_bit(wil_status_suspending, wil->status) ||
1976 test_bit(wil_status_suspended, wil->status))
1977 return;
1978
1979
1980 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1981 struct vring *cur_vring = &wil->vring_tx[i];
1982 struct vring_tx_data *txdata = &wil->vring_tx_data[i];
1983
1984 if (txdata->mid != vif->mid || !cur_vring->va ||
1985 !txdata->enabled || cur_vring == vring)
1986 continue;
1987
1988 if (wil_vring_avail_low(cur_vring)) {
1989 wil_dbg_txrx(wil, "vring %d full, can't wake\n",
1990 (int)(cur_vring - wil->vring_tx));
1991 return;
1992 }
1993 }
1994
1995 if (!vring || wil_vring_avail_high(vring)) {
1996
1997 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
1998 netif_tx_wake_all_queues(vif_to_ndev(vif));
1999 vif->net_queue_stopped = false;
2000 }
2001}
2002
2003void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2004 struct vring *vring, bool check_stop)
2005{
2006 spin_lock(&wil->net_queue_lock);
2007 __wil_update_net_queues(wil, vif, vring, check_stop);
2008 spin_unlock(&wil->net_queue_lock);
2009}
2010
2011void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2012 struct vring *vring, bool check_stop)
2013{
2014 spin_lock_bh(&wil->net_queue_lock);
2015 __wil_update_net_queues(wil, vif, vring, check_stop);
2016 spin_unlock_bh(&wil->net_queue_lock);
2017}
2018
2019netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2020{
2021 struct wil6210_vif *vif = ndev_to_vif(ndev);
2022 struct wil6210_priv *wil = vif_to_wil(vif);
2023 struct ethhdr *eth = (void *)skb->data;
2024 bool bcast = is_multicast_ether_addr(eth->h_dest);
2025 struct vring *vring;
2026 static bool pr_once_fw;
2027 int rc;
2028
2029 wil_dbg_txrx(wil, "start_xmit\n");
2030 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
2031 if (!pr_once_fw) {
2032 wil_err(wil, "FW not ready\n");
2033 pr_once_fw = true;
2034 }
2035 goto drop;
2036 }
2037 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
2038 wil_dbg_ratelimited(wil,
2039 "VIF not connected, packet dropped\n");
2040 goto drop;
2041 }
2042 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
2043 wil_err(wil, "Xmit in monitor mode not supported\n");
2044 goto drop;
2045 }
2046 pr_once_fw = false;
2047
2048
2049 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2050
2051 vring = wil_find_tx_vring_sta(wil, vif, skb);
2052 } else if (bcast) {
2053 if (vif->pbss)
2054
2055
2056
2057 vring = wil_find_tx_bcast_2(wil, vif, skb);
2058 else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2059
2060 vring = wil_find_tx_bcast_1(wil, vif, skb);
2061 else
2062
2063
2064
2065 vring = wil_find_tx_bcast_2(wil, vif, skb);
2066 } else {
2067
2068 vring = wil_find_tx_ucast(wil, vif, skb);
2069 }
2070 if (unlikely(!vring)) {
2071 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
2072 goto drop;
2073 }
2074
2075 rc = wil_tx_vring(wil, vif, vring, skb);
2076
2077 switch (rc) {
2078 case 0:
2079
2080 wil_update_net_queues_bh(wil, vif, vring, true);
2081
2082 dev_kfree_skb_any(skb);
2083 return NETDEV_TX_OK;
2084 case -ENOMEM:
2085 return NETDEV_TX_BUSY;
2086 default:
2087 break;
2088 }
2089 drop:
2090 ndev->stats.tx_dropped++;
2091 dev_kfree_skb_any(skb);
2092
2093 return NET_XMIT_DROP;
2094}
2095
2096static inline bool wil_need_txstat(struct sk_buff *skb)
2097{
2098 struct ethhdr *eth = (void *)skb->data;
2099
2100 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
2101 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
2102}
2103
2104static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
2105{
2106 if (unlikely(wil_need_txstat(skb)))
2107 skb_complete_wifi_ack(skb, acked);
2108 else
2109 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
2110}
2111
2112
2113
2114
2115
2116
2117
2118
2119int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2120{
2121 struct wil6210_priv *wil = vif_to_wil(vif);
2122 struct net_device *ndev = vif_to_ndev(vif);
2123 struct device *dev = wil_to_dev(wil);
2124 struct vring *vring = &wil->vring_tx[ringid];
2125 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
2126 int done = 0;
2127 int cid = wil->vring2cid_tid[ringid][0];
2128 struct wil_net_stats *stats = NULL;
2129 volatile struct vring_tx_desc *_d;
2130 int used_before_complete;
2131 int used_new;
2132
2133 if (unlikely(!vring->va)) {
2134 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2135 return 0;
2136 }
2137
2138 if (unlikely(!txdata->enabled)) {
2139 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2140 return 0;
2141 }
2142
2143 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2144
2145 used_before_complete = wil_vring_used_tx(vring);
2146
2147 if (cid < WIL6210_MAX_CID)
2148 stats = &wil->sta[cid].stats;
2149
2150 while (!wil_vring_is_empty(vring)) {
2151 int new_swtail;
2152 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2153
2154
2155
2156
2157
2158 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2159
2160
2161 _d = &vring->va[lf].tx;
2162 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2163 break;
2164
2165 new_swtail = (lf + 1) % vring->size;
2166 while (vring->swtail != new_swtail) {
2167 struct vring_tx_desc dd, *d = ⅆ
2168 u16 dmalen;
2169 struct sk_buff *skb;
2170
2171 ctx = &vring->ctx[vring->swtail];
2172 skb = ctx->skb;
2173 _d = &vring->va[vring->swtail].tx;
2174
2175 *d = *_d;
2176
2177 dmalen = le16_to_cpu(d->dma.length);
2178 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2179 d->dma.error);
2180 wil_dbg_txrx(wil,
2181 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2182 ringid, vring->swtail, dmalen,
2183 d->dma.status, d->dma.error);
2184 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2185 (const void *)d, sizeof(*d), false);
2186
2187 wil_txdesc_unmap(dev, d, ctx);
2188
2189 if (skb) {
2190 if (likely(d->dma.error == 0)) {
2191 ndev->stats.tx_packets++;
2192 ndev->stats.tx_bytes += skb->len;
2193 if (stats) {
2194 stats->tx_packets++;
2195 stats->tx_bytes += skb->len;
2196 }
2197 } else {
2198 ndev->stats.tx_errors++;
2199 if (stats)
2200 stats->tx_errors++;
2201 }
2202 wil_consume_skb(skb, d->dma.error == 0);
2203 }
2204 memset(ctx, 0, sizeof(*ctx));
2205
2206
2207
2208
2209
2210 wmb();
2211
2212
2213
2214
2215
2216 vring->swtail = wil_vring_next_tail(vring);
2217 done++;
2218 }
2219 }
2220
2221
2222 used_new = wil_vring_used_tx(vring);
2223 if (wil_val_in_range(wil->vring_idle_trsh,
2224 used_new, used_before_complete)) {
2225 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2226 ringid, used_before_complete, used_new);
2227 txdata->last_idle = get_cycles();
2228 }
2229
2230
2231 if (done)
2232 wil_update_net_queues(wil, vif, vring, false);
2233
2234 return done;
2235}
2236