1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/etherdevice.h>
19#include <net/ieee80211_radiotap.h>
20#include <linux/if_arp.h>
21#include <linux/moduleparam.h>
22#include <linux/ip.h>
23#include <linux/ipv6.h>
24#include <net/ipv6.h>
25#include <linux/prefetch.h>
26
27#include "wil6210.h"
28#include "wmi.h"
29#include "txrx.h"
30#include "trace.h"
31#include "txrx_edma.h"
32
33bool rx_align_2;
34module_param(rx_align_2, bool, 0444);
35MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
36
37bool rx_large_buf;
38module_param(rx_large_buf, bool, 0444);
39MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
40
41
42bool drop_if_ring_full;
43
44static inline uint wil_rx_snaplen(void)
45{
46 return rx_align_2 ? 6 : 0;
47}
48
49
50static inline int wil_ring_wmark_low(struct wil_ring *ring)
51{
52 return ring->size / 8;
53}
54
55
56static inline int wil_ring_wmark_high(struct wil_ring *ring)
57{
58 return ring->size / 4;
59}
60
61
62static inline int wil_ring_avail_low(struct wil_ring *ring)
63{
64 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
65}
66
67
68static inline int wil_ring_avail_high(struct wil_ring *ring)
69{
70 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
71}
72
73
74bool wil_is_tx_idle(struct wil6210_priv *wil)
75{
76 int i;
77 unsigned long data_comp_to;
78 int min_ring_id = wil_get_min_tx_ring_id(wil);
79
80 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
81 struct wil_ring *vring = &wil->ring_tx[i];
82 int vring_index = vring - wil->ring_tx;
83 struct wil_ring_tx_data *txdata =
84 &wil->ring_tx_data[vring_index];
85
86 spin_lock(&txdata->lock);
87
88 if (!vring->va || !txdata->enabled) {
89 spin_unlock(&txdata->lock);
90 continue;
91 }
92
93 data_comp_to = jiffies + msecs_to_jiffies(
94 WIL_DATA_COMPLETION_TO_MS);
95 if (test_bit(wil_status_napi_en, wil->status)) {
96 while (!wil_ring_is_empty(vring)) {
97 if (time_after(jiffies, data_comp_to)) {
98 wil_dbg_pm(wil,
99 "TO waiting for idle tx\n");
100 spin_unlock(&txdata->lock);
101 return false;
102 }
103 wil_dbg_ratelimited(wil,
104 "tx vring is not empty -> NAPI\n");
105 spin_unlock(&txdata->lock);
106 napi_synchronize(&wil->napi_tx);
107 msleep(20);
108 spin_lock(&txdata->lock);
109 if (!vring->va || !txdata->enabled)
110 break;
111 }
112 }
113
114 spin_unlock(&txdata->lock);
115 }
116
117 return true;
118}
119
120static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
121{
122 struct device *dev = wil_to_dev(wil);
123 size_t sz = vring->size * sizeof(vring->va[0]);
124 uint i;
125
126 wil_dbg_misc(wil, "vring_alloc:\n");
127
128 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
129
130 vring->swhead = 0;
131 vring->swtail = 0;
132 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
133 if (!vring->ctx) {
134 vring->va = NULL;
135 return -ENOMEM;
136 }
137
138
139
140
141
142
143
144
145
146
147
148
149
150 if (wil->dma_addr_size > 32)
151 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
152
153 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
154 if (!vring->va) {
155 kfree(vring->ctx);
156 vring->ctx = NULL;
157 return -ENOMEM;
158 }
159
160 if (wil->dma_addr_size > 32)
161 dma_set_mask_and_coherent(dev,
162 DMA_BIT_MASK(wil->dma_addr_size));
163
164
165
166
167
168 for (i = 0; i < vring->size; i++) {
169 volatile struct vring_tx_desc *_d =
170 &vring->va[i].tx.legacy;
171
172 _d->dma.status = TX_DMA_STATUS_DU;
173 }
174
175 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
176 vring->va, &vring->pa, vring->ctx);
177
178 return 0;
179}
180
181static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
182 struct wil_ctx *ctx)
183{
184 struct vring_tx_desc *d = &desc->legacy;
185 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
186 u16 dmalen = le16_to_cpu(d->dma.length);
187
188 switch (ctx->mapped_as) {
189 case wil_mapped_as_single:
190 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
191 break;
192 case wil_mapped_as_page:
193 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
194 break;
195 default:
196 break;
197 }
198}
199
200static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
201{
202 struct device *dev = wil_to_dev(wil);
203 size_t sz = vring->size * sizeof(vring->va[0]);
204
205 lockdep_assert_held(&wil->mutex);
206 if (!vring->is_rx) {
207 int vring_index = vring - wil->ring_tx;
208
209 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
210 vring_index, vring->size, vring->va,
211 &vring->pa, vring->ctx);
212 } else {
213 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
214 vring->size, vring->va,
215 &vring->pa, vring->ctx);
216 }
217
218 while (!wil_ring_is_empty(vring)) {
219 dma_addr_t pa;
220 u16 dmalen;
221 struct wil_ctx *ctx;
222
223 if (!vring->is_rx) {
224 struct vring_tx_desc dd, *d = ⅆ
225 volatile struct vring_tx_desc *_d =
226 &vring->va[vring->swtail].tx.legacy;
227
228 ctx = &vring->ctx[vring->swtail];
229 if (!ctx) {
230 wil_dbg_txrx(wil,
231 "ctx(%d) was already completed\n",
232 vring->swtail);
233 vring->swtail = wil_ring_next_tail(vring);
234 continue;
235 }
236 *d = *_d;
237 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
238 if (ctx->skb)
239 dev_kfree_skb_any(ctx->skb);
240 vring->swtail = wil_ring_next_tail(vring);
241 } else {
242 struct vring_rx_desc dd, *d = ⅆ
243 volatile struct vring_rx_desc *_d =
244 &vring->va[vring->swhead].rx.legacy;
245
246 ctx = &vring->ctx[vring->swhead];
247 *d = *_d;
248 pa = wil_desc_addr(&d->dma.addr);
249 dmalen = le16_to_cpu(d->dma.length);
250 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
251 kfree_skb(ctx->skb);
252 wil_ring_advance_head(vring, 1);
253 }
254 }
255 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
256 kfree(vring->ctx);
257 vring->pa = 0;
258 vring->va = NULL;
259 vring->ctx = NULL;
260}
261
262
263
264
265
266
267static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
268 u32 i, int headroom)
269{
270 struct device *dev = wil_to_dev(wil);
271 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
272 struct vring_rx_desc dd, *d = ⅆ
273 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
274 dma_addr_t pa;
275 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
276
277 if (unlikely(!skb))
278 return -ENOMEM;
279
280 skb_reserve(skb, headroom);
281 skb_put(skb, sz);
282
283
284
285
286
287 skb->ip_summed = CHECKSUM_NONE;
288
289 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
290 if (unlikely(dma_mapping_error(dev, pa))) {
291 kfree_skb(skb);
292 return -ENOMEM;
293 }
294
295 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
296 wil_desc_addr_set(&d->dma.addr, pa);
297
298
299
300 d->dma.status = 0;
301 d->dma.length = cpu_to_le16(sz);
302 *_d = *d;
303 vring->ctx[i].skb = skb;
304
305 return 0;
306}
307
308
309
310
311
312
313
314
315
316
317static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
318 struct sk_buff *skb)
319{
320 struct wil6210_rtap {
321 struct ieee80211_radiotap_header rthdr;
322
323
324 u8 flags;
325
326 __le16 chnl_freq __aligned(2);
327 __le16 chnl_flags;
328
329 u8 mcs_present;
330 u8 mcs_flags;
331 u8 mcs_index;
332 } __packed;
333 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
334 struct wil6210_rtap *rtap;
335 int rtap_len = sizeof(struct wil6210_rtap);
336 struct ieee80211_channel *ch = wil->monitor_chandef.chan;
337
338 if (skb_headroom(skb) < rtap_len &&
339 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
340 wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
341 return;
342 }
343
344 rtap = skb_push(skb, rtap_len);
345 memset(rtap, 0, rtap_len);
346
347 rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
348 rtap->rthdr.it_len = cpu_to_le16(rtap_len);
349 rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
350 (1 << IEEE80211_RADIOTAP_CHANNEL) |
351 (1 << IEEE80211_RADIOTAP_MCS));
352 if (d->dma.status & RX_DMA_STATUS_ERROR)
353 rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
354
355 rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
356 rtap->chnl_flags = cpu_to_le16(0);
357
358 rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
359 rtap->mcs_flags = 0;
360 rtap->mcs_index = wil_rxdesc_mcs(d);
361}
362
363static bool wil_is_rx_idle(struct wil6210_priv *wil)
364{
365 struct vring_rx_desc *_d;
366 struct wil_ring *ring = &wil->ring_rx;
367
368 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
369 if (_d->dma.status & RX_DMA_STATUS_DU)
370 return false;
371
372 return true;
373}
374
375static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
376{
377 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
378 int mid = wil_rxdesc_mid(d);
379 struct wil6210_vif *vif = wil->vifs[mid];
380
381
382
383
384 int cid = wil_rxdesc_cid(d);
385 unsigned int snaplen = wil_rx_snaplen();
386 struct ieee80211_hdr_3addr *hdr;
387 int i;
388 unsigned char *ta;
389 u8 ftype;
390
391
392 if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
393 return cid;
394
395 ftype = wil_rxdesc_ftype(d) << 2;
396 if (likely(ftype == IEEE80211_FTYPE_DATA)) {
397 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
398 wil_err_ratelimited(wil,
399 "Short data frame, len = %d\n",
400 skb->len);
401 return -ENOENT;
402 }
403 ta = wil_skb_get_sa(skb);
404 } else {
405 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
406 wil_err_ratelimited(wil, "Short frame, len = %d\n",
407 skb->len);
408 return -ENOENT;
409 }
410 hdr = (void *)skb->data;
411 ta = hdr->addr2;
412 }
413
414 if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
415 return cid;
416
417
418
419
420
421 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
422 vif->wdev.iftype != NL80211_IFTYPE_AP)
423 return cid;
424
425
426
427
428
429 for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
430 if (wil->sta[i].status != wil_sta_unused &&
431 ether_addr_equal(wil->sta[i].addr, ta)) {
432 cid = i;
433 break;
434 }
435 }
436 if (i >= wil->max_assoc_sta) {
437 wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
438 ta, vif->wdev.iftype, ftype, skb->len);
439 cid = -ENOENT;
440 }
441
442 return cid;
443}
444
445
446
447
448
449
450
451
452static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
453 struct wil_ring *vring)
454{
455 struct device *dev = wil_to_dev(wil);
456 struct wil6210_vif *vif;
457 struct net_device *ndev;
458 volatile struct vring_rx_desc *_d;
459 struct vring_rx_desc *d;
460 struct sk_buff *skb;
461 dma_addr_t pa;
462 unsigned int snaplen = wil_rx_snaplen();
463 unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
464 u16 dmalen;
465 u8 ftype;
466 int cid, mid;
467 int i;
468 struct wil_net_stats *stats;
469
470 BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
471
472again:
473 if (unlikely(wil_ring_is_empty(vring)))
474 return NULL;
475
476 i = (int)vring->swhead;
477 _d = &vring->va[i].rx.legacy;
478 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
479
480 return NULL;
481 }
482
483 skb = vring->ctx[i].skb;
484 vring->ctx[i].skb = NULL;
485 wil_ring_advance_head(vring, 1);
486 if (!skb) {
487 wil_err(wil, "No Rx skb at [%d]\n", i);
488 goto again;
489 }
490 d = wil_skb_rxdesc(skb);
491 *d = *_d;
492 pa = wil_desc_addr(&d->dma.addr);
493
494 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
495 dmalen = le16_to_cpu(d->dma.length);
496
497 trace_wil6210_rx(i, d);
498 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
499 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
500 (const void *)d, sizeof(*d), false);
501
502 mid = wil_rxdesc_mid(d);
503 vif = wil->vifs[mid];
504
505 if (unlikely(!vif)) {
506 wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
507 mid);
508 kfree_skb(skb);
509 goto again;
510 }
511 ndev = vif_to_ndev(vif);
512 if (unlikely(dmalen > sz)) {
513 wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
514 dmalen);
515 kfree_skb(skb);
516 goto again;
517 }
518 skb_trim(skb, dmalen);
519
520 prefetch(skb->data);
521
522 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
523 skb->data, skb_headlen(skb), false);
524
525 cid = wil_rx_get_cid_by_skb(wil, skb);
526 if (cid == -ENOENT) {
527 kfree_skb(skb);
528 goto again;
529 }
530 wil_skb_set_cid(skb, (u8)cid);
531 stats = &wil->sta[cid].stats;
532
533 stats->last_mcs_rx = wil_rxdesc_mcs(d);
534 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
535 stats->rx_per_mcs[stats->last_mcs_rx]++;
536
537
538 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
539 wil_rx_add_radiotap_header(wil, skb);
540
541
542 if (ndev->type != ARPHRD_ETHER)
543 return skb;
544
545
546
547
548 ftype = wil_rxdesc_ftype(d) << 2;
549 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
550 u8 fc1 = wil_rxdesc_fc1(d);
551 int tid = wil_rxdesc_tid(d);
552 u16 seq = wil_rxdesc_seq(d);
553
554 wil_dbg_txrx(wil,
555 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
556 fc1, mid, cid, tid, seq);
557 stats->rx_non_data_frame++;
558 if (wil_is_back_req(fc1)) {
559 wil_dbg_txrx(wil,
560 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
561 mid, cid, tid, seq);
562 wil_rx_bar(wil, vif, cid, tid, seq);
563 } else {
564
565
566
567 wil_dbg_txrx(wil,
568 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
569 fc1, mid, cid, tid, seq);
570 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
571 (const void *)d, sizeof(*d), false);
572 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
573 skb->data, skb_headlen(skb), false);
574 }
575 kfree_skb(skb);
576 goto again;
577 }
578
579
580
581
582
583 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
584
585 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
586 skb->ip_summed = CHECKSUM_UNNECESSARY;
587
588
589
590
591
592 else
593 stats->rx_csum_err++;
594 }
595
596 if (snaplen) {
597
598
599
600
601
602
603 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
604 skb_pull(skb, snaplen);
605 }
606
607 return skb;
608}
609
610
611
612
613
614
615
616
617
618
619static int wil_rx_refill(struct wil6210_priv *wil, int count)
620{
621 struct net_device *ndev = wil->main_ndev;
622 struct wil_ring *v = &wil->ring_rx;
623 u32 next_tail;
624 int rc = 0;
625 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
626 WIL6210_RTAP_SIZE : 0;
627
628 for (; next_tail = wil_ring_next_tail(v),
629 (next_tail != v->swhead) && (count-- > 0);
630 v->swtail = next_tail) {
631 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
632 if (unlikely(rc)) {
633 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
634 rc, v->swtail);
635 break;
636 }
637 }
638
639
640
641
642 wmb();
643
644 wil_w(wil, v->hwtail, v->swtail);
645
646 return rc;
647}
648
649
650
651
652
653
654
655
656
657
658int reverse_memcmp(const void *cs, const void *ct, size_t count)
659{
660 const unsigned char *su1, *su2;
661 int res = 0;
662
663 for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
664 --su1, --su2, count--) {
665 res = *su1 - *su2;
666 if (res)
667 break;
668 }
669 return res;
670}
671
672static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
673{
674 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
675 int cid = wil_skb_get_cid(skb);
676 int tid = wil_rxdesc_tid(d);
677 int key_id = wil_rxdesc_key_id(d);
678 int mc = wil_rxdesc_mcast(d);
679 struct wil_sta_info *s = &wil->sta[cid];
680 struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
681 &s->tid_crypto_rx[tid];
682 struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
683 const u8 *pn = (u8 *)&d->mac.pn_15_0;
684
685 if (!cc->key_set) {
686 wil_err_ratelimited(wil,
687 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
688 cid, tid, mc, key_id);
689 return -EINVAL;
690 }
691
692 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
693 wil_err_ratelimited(wil,
694 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
695 cid, tid, mc, key_id, pn, cc->pn);
696 return -EINVAL;
697 }
698 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
699
700 return 0;
701}
702
703static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
704 struct wil_net_stats *stats)
705{
706 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
707
708 if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
709 (d->dma.error & RX_DMA_ERROR_MIC)) {
710 stats->rx_mic_error++;
711 wil_dbg_txrx(wil, "MIC error, dropping packet\n");
712 return -EFAULT;
713 }
714
715 return 0;
716}
717
718static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
719 int *security)
720{
721 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
722
723 *cid = wil_skb_get_cid(skb);
724 *security = wil_rxdesc_security(d);
725}
726
727
728
729
730
731void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
732{
733 gro_result_t rc = GRO_NORMAL;
734 struct wil6210_vif *vif = ndev_to_vif(ndev);
735 struct wil6210_priv *wil = ndev_to_wil(ndev);
736 struct wireless_dev *wdev = vif_to_wdev(vif);
737 unsigned int len = skb->len;
738 int cid;
739 int security;
740 u8 *sa, *da = wil_skb_get_da(skb);
741
742
743
744 int mcast = is_multicast_ether_addr(da);
745 struct wil_net_stats *stats;
746 struct sk_buff *xmit_skb = NULL;
747 static const char * const gro_res_str[] = {
748 [GRO_MERGED] = "GRO_MERGED",
749 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
750 [GRO_HELD] = "GRO_HELD",
751 [GRO_NORMAL] = "GRO_NORMAL",
752 [GRO_DROP] = "GRO_DROP",
753 [GRO_CONSUMED] = "GRO_CONSUMED",
754 };
755
756 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
757
758 stats = &wil->sta[cid].stats;
759
760 skb_orphan(skb);
761
762 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
763 rc = GRO_DROP;
764 dev_kfree_skb(skb);
765 stats->rx_replay++;
766 goto stats;
767 }
768
769
770 if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
771 dev_kfree_skb(skb);
772 return;
773 }
774
775 if (wdev->iftype == NL80211_IFTYPE_STATION) {
776 sa = wil_skb_get_sa(skb);
777 if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
778
779 rc = GRO_DROP;
780 dev_kfree_skb(skb);
781 goto stats;
782 }
783 } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
784 if (mcast) {
785
786
787
788 xmit_skb = skb_copy(skb, GFP_ATOMIC);
789 } else {
790 int xmit_cid = wil_find_cid(wil, vif->mid, da);
791
792 if (xmit_cid >= 0) {
793
794
795
796
797
798 xmit_skb = skb;
799 skb = NULL;
800 }
801 }
802 }
803 if (xmit_skb) {
804
805
806
807
808 xmit_skb->dev = ndev;
809 xmit_skb->priority += 256;
810 xmit_skb->protocol = htons(ETH_P_802_3);
811 skb_reset_network_header(xmit_skb);
812 skb_reset_mac_header(xmit_skb);
813 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
814 dev_queue_xmit(xmit_skb);
815 }
816
817 if (skb) {
818 skb->protocol = eth_type_trans(skb, ndev);
819 skb->dev = ndev;
820 rc = napi_gro_receive(&wil->napi_rx, skb);
821 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
822 len, gro_res_str[rc]);
823 }
824stats:
825
826 if (unlikely(rc == GRO_DROP)) {
827 ndev->stats.rx_dropped++;
828 stats->rx_dropped++;
829 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
830 } else {
831 ndev->stats.rx_packets++;
832 stats->rx_packets++;
833 ndev->stats.rx_bytes += len;
834 stats->rx_bytes += len;
835 if (mcast)
836 ndev->stats.multicast++;
837 }
838}
839
840
841
842
843
844
845void wil_rx_handle(struct wil6210_priv *wil, int *quota)
846{
847 struct net_device *ndev = wil->main_ndev;
848 struct wireless_dev *wdev = ndev->ieee80211_ptr;
849 struct wil_ring *v = &wil->ring_rx;
850 struct sk_buff *skb;
851
852 if (unlikely(!v->va)) {
853 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
854 return;
855 }
856 wil_dbg_txrx(wil, "rx_handle\n");
857 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
858 (*quota)--;
859
860
861 if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
862 skb->dev = ndev;
863 skb_reset_mac_header(skb);
864 skb->ip_summed = CHECKSUM_UNNECESSARY;
865 skb->pkt_type = PACKET_OTHERHOST;
866 skb->protocol = htons(ETH_P_802_2);
867 wil_netif_rx_any(skb, ndev);
868 } else {
869 wil_rx_reorder(wil, skb);
870 }
871 }
872 wil_rx_refill(wil, v->size);
873}
874
875static void wil_rx_buf_len_init(struct wil6210_priv *wil)
876{
877 wil->rx_buf_len = rx_large_buf ?
878 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
879 if (mtu_max > wil->rx_buf_len) {
880
881
882
883
884 wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
885 wil->rx_buf_len = mtu_max;
886 }
887}
888
889static int wil_rx_init(struct wil6210_priv *wil, uint order)
890{
891 struct wil_ring *vring = &wil->ring_rx;
892 int rc;
893
894 wil_dbg_misc(wil, "rx_init\n");
895
896 if (vring->va) {
897 wil_err(wil, "Rx ring already allocated\n");
898 return -EINVAL;
899 }
900
901 wil_rx_buf_len_init(wil);
902
903 vring->size = 1 << order;
904 vring->is_rx = true;
905 rc = wil_vring_alloc(wil, vring);
906 if (rc)
907 return rc;
908
909 rc = wmi_rx_chain_add(wil, vring);
910 if (rc)
911 goto err_free;
912
913 rc = wil_rx_refill(wil, vring->size);
914 if (rc)
915 goto err_free;
916
917 return 0;
918 err_free:
919 wil_vring_free(wil, vring);
920
921 return rc;
922}
923
924static void wil_rx_fini(struct wil6210_priv *wil)
925{
926 struct wil_ring *vring = &wil->ring_rx;
927
928 wil_dbg_misc(wil, "rx_fini\n");
929
930 if (vring->va)
931 wil_vring_free(wil, vring);
932}
933
934static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
935 u32 len, int vring_index)
936{
937 struct vring_tx_desc *d = &desc->legacy;
938
939 wil_desc_addr_set(&d->dma.addr, pa);
940 d->dma.ip_length = 0;
941
942 d->dma.b11 = 0;
943 d->dma.error = 0;
944 d->dma.status = 0;
945 d->dma.length = cpu_to_le16((u16)len);
946 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
947 d->mac.d[0] = 0;
948 d->mac.d[1] = 0;
949 d->mac.d[2] = 0;
950 d->mac.ucode_cmd = 0;
951
952 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
953 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
954
955 return 0;
956}
957
958void wil_tx_data_init(struct wil_ring_tx_data *txdata)
959{
960 spin_lock_bh(&txdata->lock);
961 txdata->dot1x_open = 0;
962 txdata->enabled = 0;
963 txdata->idle = 0;
964 txdata->last_idle = 0;
965 txdata->begin = 0;
966 txdata->agg_wsize = 0;
967 txdata->agg_timeout = 0;
968 txdata->agg_amsdu = 0;
969 txdata->addba_in_progress = false;
970 txdata->mid = U8_MAX;
971 spin_unlock_bh(&txdata->lock);
972}
973
974static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
975 int cid, int tid)
976{
977 struct wil6210_priv *wil = vif_to_wil(vif);
978 int rc;
979 struct wmi_vring_cfg_cmd cmd = {
980 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
981 .vring_cfg = {
982 .tx_sw_ring = {
983 .max_mpdu_size =
984 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
985 .ring_size = cpu_to_le16(size),
986 },
987 .ringid = id,
988 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
989 .mac_ctrl = 0,
990 .to_resolution = 0,
991 .agg_max_wsize = 0,
992 .schd_params = {
993 .priority = cpu_to_le16(0),
994 .timeslot_us = cpu_to_le16(0xfff),
995 },
996 },
997 };
998 struct {
999 struct wmi_cmd_hdr wmi;
1000 struct wmi_vring_cfg_done_event cmd;
1001 } __packed reply = {
1002 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1003 };
1004 struct wil_ring *vring = &wil->ring_tx[id];
1005 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1006
1007 if (cid >= WIL6210_RX_DESC_MAX_CID) {
1008 cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
1009 cmd.vring_cfg.cid = cid;
1010 cmd.vring_cfg.tid = tid;
1011 } else {
1012 cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
1013 }
1014
1015 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
1016 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1017 lockdep_assert_held(&wil->mutex);
1018
1019 if (vring->va) {
1020 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1021 rc = -EINVAL;
1022 goto out;
1023 }
1024
1025 wil_tx_data_init(txdata);
1026 vring->is_rx = false;
1027 vring->size = size;
1028 rc = wil_vring_alloc(wil, vring);
1029 if (rc)
1030 goto out;
1031
1032 wil->ring2cid_tid[id][0] = cid;
1033 wil->ring2cid_tid[id][1] = tid;
1034
1035 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1036
1037 if (!vif->privacy)
1038 txdata->dot1x_open = true;
1039 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1040 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1041 WIL_WMI_CALL_GENERAL_TO_MS);
1042 if (rc)
1043 goto out_free;
1044
1045 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1046 wil_err(wil, "Tx config failed, status 0x%02x\n",
1047 reply.cmd.status);
1048 rc = -EINVAL;
1049 goto out_free;
1050 }
1051
1052 spin_lock_bh(&txdata->lock);
1053 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1054 txdata->mid = vif->mid;
1055 txdata->enabled = 1;
1056 spin_unlock_bh(&txdata->lock);
1057
1058 if (txdata->dot1x_open && (agg_wsize >= 0))
1059 wil_addba_tx_request(wil, id, agg_wsize);
1060
1061 return 0;
1062 out_free:
1063 spin_lock_bh(&txdata->lock);
1064 txdata->dot1x_open = false;
1065 txdata->enabled = 0;
1066 spin_unlock_bh(&txdata->lock);
1067 wil_vring_free(wil, vring);
1068 wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
1069 wil->ring2cid_tid[id][1] = 0;
1070
1071 out:
1072
1073 return rc;
1074}
1075
1076static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
1077 int tid)
1078{
1079 struct wil6210_priv *wil = vif_to_wil(vif);
1080 int rc;
1081 struct wmi_vring_cfg_cmd cmd = {
1082 .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
1083 .vring_cfg = {
1084 .tx_sw_ring = {
1085 .max_mpdu_size =
1086 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1087 .ring_size = 0,
1088 },
1089 .ringid = ring_id,
1090 .cidxtid = mk_cidxtid(cid, tid),
1091 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1092 .mac_ctrl = 0,
1093 .to_resolution = 0,
1094 .agg_max_wsize = 0,
1095 .schd_params = {
1096 .priority = cpu_to_le16(0),
1097 .timeslot_us = cpu_to_le16(0xfff),
1098 },
1099 },
1100 };
1101 struct {
1102 struct wmi_cmd_hdr wmi;
1103 struct wmi_vring_cfg_done_event cmd;
1104 } __packed reply = {
1105 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1106 };
1107 struct wil_ring *vring = &wil->ring_tx[ring_id];
1108 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
1109
1110 wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
1111 cid, tid);
1112 lockdep_assert_held(&wil->mutex);
1113
1114 if (!vring->va) {
1115 wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
1116 return -EINVAL;
1117 }
1118
1119 if (wil->ring2cid_tid[ring_id][0] != cid ||
1120 wil->ring2cid_tid[ring_id][1] != tid) {
1121 wil_err(wil, "ring info does not match cid=%u tid=%u\n",
1122 wil->ring2cid_tid[ring_id][0],
1123 wil->ring2cid_tid[ring_id][1]);
1124 }
1125
1126 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1127
1128 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1129 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1130 WIL_WMI_CALL_GENERAL_TO_MS);
1131 if (rc)
1132 goto fail;
1133
1134 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1135 wil_err(wil, "Tx modify failed, status 0x%02x\n",
1136 reply.cmd.status);
1137 rc = -EINVAL;
1138 goto fail;
1139 }
1140
1141
1142
1143
1144 txdata->agg_wsize = 0;
1145 if (txdata->dot1x_open && agg_wsize >= 0)
1146 wil_addba_tx_request(wil, ring_id, agg_wsize);
1147
1148 return 0;
1149fail:
1150 spin_lock_bh(&txdata->lock);
1151 txdata->dot1x_open = false;
1152 txdata->enabled = 0;
1153 spin_unlock_bh(&txdata->lock);
1154 wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
1155 wil->ring2cid_tid[ring_id][1] = 0;
1156 return rc;
1157}
1158
1159int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1160{
1161 struct wil6210_priv *wil = vif_to_wil(vif);
1162 int rc;
1163 struct wmi_bcast_vring_cfg_cmd cmd = {
1164 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
1165 .vring_cfg = {
1166 .tx_sw_ring = {
1167 .max_mpdu_size =
1168 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1169 .ring_size = cpu_to_le16(size),
1170 },
1171 .ringid = id,
1172 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1173 },
1174 };
1175 struct {
1176 struct wmi_cmd_hdr wmi;
1177 struct wmi_vring_cfg_done_event cmd;
1178 } __packed reply = {
1179 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1180 };
1181 struct wil_ring *vring = &wil->ring_tx[id];
1182 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1183
1184 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1185 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1186 lockdep_assert_held(&wil->mutex);
1187
1188 if (vring->va) {
1189 wil_err(wil, "Tx ring [%d] already allocated\n", id);
1190 rc = -EINVAL;
1191 goto out;
1192 }
1193
1194 wil_tx_data_init(txdata);
1195 vring->is_rx = false;
1196 vring->size = size;
1197 rc = wil_vring_alloc(wil, vring);
1198 if (rc)
1199 goto out;
1200
1201 wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
1202 wil->ring2cid_tid[id][1] = 0;
1203
1204 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1205
1206 if (!vif->privacy)
1207 txdata->dot1x_open = true;
1208 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
1209 &cmd, sizeof(cmd),
1210 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1211 WIL_WMI_CALL_GENERAL_TO_MS);
1212 if (rc)
1213 goto out_free;
1214
1215 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1216 wil_err(wil, "Tx config failed, status 0x%02x\n",
1217 reply.cmd.status);
1218 rc = -EINVAL;
1219 goto out_free;
1220 }
1221
1222 spin_lock_bh(&txdata->lock);
1223 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1224 txdata->mid = vif->mid;
1225 txdata->enabled = 1;
1226 spin_unlock_bh(&txdata->lock);
1227
1228 return 0;
1229 out_free:
1230 spin_lock_bh(&txdata->lock);
1231 txdata->enabled = 0;
1232 txdata->dot1x_open = false;
1233 spin_unlock_bh(&txdata->lock);
1234 wil_vring_free(wil, vring);
1235 out:
1236
1237 return rc;
1238}
1239
1240static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
1241 struct wil6210_vif *vif,
1242 struct sk_buff *skb)
1243{
1244 int i, cid;
1245 const u8 *da = wil_skb_get_da(skb);
1246 int min_ring_id = wil_get_min_tx_ring_id(wil);
1247
1248 cid = wil_find_cid(wil, vif->mid, da);
1249
1250 if (cid < 0 || cid >= wil->max_assoc_sta)
1251 return NULL;
1252
1253
1254 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1255 if (!wil->ring_tx_data[i].dot1x_open &&
1256 skb->protocol != cpu_to_be16(ETH_P_PAE))
1257 continue;
1258 if (wil->ring2cid_tid[i][0] == cid) {
1259 struct wil_ring *v = &wil->ring_tx[i];
1260 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1261
1262 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1263 da, i);
1264 if (v->va && txdata->enabled) {
1265 return v;
1266 } else {
1267 wil_dbg_txrx(wil,
1268 "find_tx_ucast: vring[%d] not valid\n",
1269 i);
1270 return NULL;
1271 }
1272 }
1273 }
1274
1275 return NULL;
1276}
1277
1278static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1279 struct wil_ring *ring, struct sk_buff *skb);
1280
1281static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
1282 struct wil6210_vif *vif,
1283 struct sk_buff *skb)
1284{
1285 struct wil_ring *ring;
1286 int i;
1287 u8 cid;
1288 struct wil_ring_tx_data *txdata;
1289 int min_ring_id = wil_get_min_tx_ring_id(wil);
1290
1291
1292
1293
1294
1295 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1296 ring = &wil->ring_tx[i];
1297 txdata = &wil->ring_tx_data[i];
1298 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
1299 continue;
1300
1301 cid = wil->ring2cid_tid[i][0];
1302 if (cid >= wil->max_assoc_sta)
1303 continue;
1304
1305 if (!wil->ring_tx_data[i].dot1x_open &&
1306 skb->protocol != cpu_to_be16(ETH_P_PAE))
1307 continue;
1308
1309 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1310
1311 return ring;
1312 }
1313
1314 wil_dbg_txrx(wil, "Tx while no rings active?\n");
1315
1316 return NULL;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1331 struct wil6210_vif *vif,
1332 struct sk_buff *skb)
1333{
1334 struct wil_ring *v;
1335 struct wil_ring_tx_data *txdata;
1336 int i = vif->bcast_ring;
1337
1338 if (i < 0)
1339 return NULL;
1340 v = &wil->ring_tx[i];
1341 txdata = &wil->ring_tx_data[i];
1342 if (!v->va || !txdata->enabled)
1343 return NULL;
1344 if (!wil->ring_tx_data[i].dot1x_open &&
1345 skb->protocol != cpu_to_be16(ETH_P_PAE))
1346 return NULL;
1347
1348 return v;
1349}
1350
1351static void wil_set_da_for_vring(struct wil6210_priv *wil,
1352 struct sk_buff *skb, int vring_index)
1353{
1354 u8 *da = wil_skb_get_da(skb);
1355 int cid = wil->ring2cid_tid[vring_index][0];
1356
1357 ether_addr_copy(da, wil->sta[cid].addr);
1358}
1359
1360static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1361 struct wil6210_vif *vif,
1362 struct sk_buff *skb)
1363{
1364 struct wil_ring *v, *v2;
1365 struct sk_buff *skb2;
1366 int i;
1367 u8 cid;
1368 const u8 *src = wil_skb_get_sa(skb);
1369 struct wil_ring_tx_data *txdata, *txdata2;
1370 int min_ring_id = wil_get_min_tx_ring_id(wil);
1371
1372
1373 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1374 v = &wil->ring_tx[i];
1375 txdata = &wil->ring_tx_data[i];
1376 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1377 continue;
1378
1379 cid = wil->ring2cid_tid[i][0];
1380 if (cid >= wil->max_assoc_sta)
1381 continue;
1382 if (!wil->ring_tx_data[i].dot1x_open &&
1383 skb->protocol != cpu_to_be16(ETH_P_PAE))
1384 continue;
1385
1386
1387 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1388 continue;
1389
1390 goto found;
1391 }
1392
1393 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1394
1395 return NULL;
1396
1397found:
1398 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1399 wil_set_da_for_vring(wil, skb, i);
1400
1401
1402 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1403 v2 = &wil->ring_tx[i];
1404 txdata2 = &wil->ring_tx_data[i];
1405 if (!v2->va || txdata2->mid != vif->mid)
1406 continue;
1407 cid = wil->ring2cid_tid[i][0];
1408 if (cid >= wil->max_assoc_sta)
1409 continue;
1410 if (!wil->ring_tx_data[i].dot1x_open &&
1411 skb->protocol != cpu_to_be16(ETH_P_PAE))
1412 continue;
1413
1414 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1415 continue;
1416
1417 skb2 = skb_copy(skb, GFP_ATOMIC);
1418 if (skb2) {
1419 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1420 wil_set_da_for_vring(wil, skb2, i);
1421 wil_tx_ring(wil, vif, v2, skb2);
1422
1423 dev_kfree_skb_any(skb2);
1424 } else {
1425 wil_err(wil, "skb_copy failed\n");
1426 }
1427 }
1428
1429 return v;
1430}
1431
1432static inline
1433void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1434{
1435 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1446 struct sk_buff *skb,
1447 int tso_desc_type, bool is_ipv4,
1448 int tcp_hdr_len, int skb_net_hdr_len)
1449{
1450 d->dma.b11 = ETH_HLEN;
1451 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1452
1453 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1454
1455 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1456
1457
1458 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1459 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1460 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1461
1462 d->dma.ip_length = skb_net_hdr_len;
1463
1464 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1465
1466 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1480 struct sk_buff *skb){
1481 int protocol;
1482
1483 if (skb->ip_summed != CHECKSUM_PARTIAL)
1484 return 0;
1485
1486 d->dma.b11 = ETH_HLEN;
1487
1488 switch (skb->protocol) {
1489 case cpu_to_be16(ETH_P_IP):
1490 protocol = ip_hdr(skb)->protocol;
1491 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1492 break;
1493 case cpu_to_be16(ETH_P_IPV6):
1494 protocol = ipv6_hdr(skb)->nexthdr;
1495 break;
1496 default:
1497 return -EINVAL;
1498 }
1499
1500 switch (protocol) {
1501 case IPPROTO_TCP:
1502 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1503
1504 d->dma.d0 |=
1505 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1506 break;
1507 case IPPROTO_UDP:
1508
1509 d->dma.d0 |=
1510 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1511 break;
1512 default:
1513 return -EINVAL;
1514 }
1515
1516 d->dma.ip_length = skb_network_header_len(skb);
1517
1518 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1519
1520 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1521
1522 return 0;
1523}
1524
1525static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1526{
1527 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1528 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1529 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1530}
1531
1532static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1533{
1534 d->dma.d0 |= wil_tso_type_lst <<
1535 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1536}
1537
1538static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1539 struct wil_ring *vring, struct sk_buff *skb)
1540{
1541 struct device *dev = wil_to_dev(wil);
1542
1543
1544 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1545 *_first_desc = NULL;
1546
1547
1548 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1549 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1550 *first_desc = &first_desc_mem;
1551
1552
1553 struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1554
1555 int descs_used = 0;
1556 int sg_desc_cnt = 0;
1557
1558 u32 swhead = vring->swhead;
1559 int used, avail = wil_ring_avail_tx(vring);
1560 int nr_frags = skb_shinfo(skb)->nr_frags;
1561 int min_desc_required = nr_frags + 1;
1562 int mss = skb_shinfo(skb)->gso_size;
1563 int f, len, hdrlen, headlen;
1564 int vring_index = vring - wil->ring_tx;
1565 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
1566 uint i = swhead;
1567 dma_addr_t pa;
1568 const skb_frag_t *frag = NULL;
1569 int rem_data = mss;
1570 int lenmss;
1571 int hdr_compensation_need = true;
1572 int desc_tso_type = wil_tso_type_first;
1573 bool is_ipv4;
1574 int tcp_hdr_len;
1575 int skb_net_hdr_len;
1576 int gso_type;
1577 int rc = -EINVAL;
1578
1579 wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1580 vring_index);
1581
1582 if (unlikely(!txdata->enabled))
1583 return -EINVAL;
1584
1585
1586
1587
1588
1589
1590 if (unlikely(avail < min_desc_required)) {
1591 wil_err_ratelimited(wil,
1592 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1593 vring_index, min_desc_required);
1594 return -ENOMEM;
1595 }
1596
1597
1598 hdrlen = ETH_HLEN +
1599 (int)skb_network_header_len(skb) +
1600 tcp_hdrlen(skb);
1601
1602 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1603 switch (gso_type) {
1604 case SKB_GSO_TCPV4:
1605
1606
1607
1608 ip_hdr(skb)->tot_len = 0;
1609 ip_hdr(skb)->check = 0;
1610 is_ipv4 = true;
1611 break;
1612 case SKB_GSO_TCPV6:
1613
1614 ipv6_hdr(skb)->payload_len = 0;
1615 is_ipv4 = false;
1616 break;
1617 default:
1618
1619
1620
1621 return -EINVAL;
1622 }
1623
1624 if (skb->ip_summed != CHECKSUM_PARTIAL)
1625 return -EINVAL;
1626
1627
1628
1629
1630 tcp_hdr_len = tcp_hdrlen(skb);
1631 skb_net_hdr_len = skb_network_header_len(skb);
1632
1633 _hdr_desc = &vring->va[i].tx.legacy;
1634
1635 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1636 if (unlikely(dma_mapping_error(dev, pa))) {
1637 wil_err(wil, "TSO: Skb head DMA map error\n");
1638 goto err_exit;
1639 }
1640
1641 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
1642 hdrlen, vring_index);
1643 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1644 tcp_hdr_len, skb_net_hdr_len);
1645 wil_tx_last_desc(hdr_desc);
1646
1647 vring->ctx[i].mapped_as = wil_mapped_as_single;
1648 hdr_ctx = &vring->ctx[i];
1649
1650 descs_used++;
1651 headlen = skb_headlen(skb) - hdrlen;
1652
1653 for (f = headlen ? -1 : 0; f < nr_frags; f++) {
1654 if (headlen) {
1655 len = headlen;
1656 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1657 len);
1658 } else {
1659 frag = &skb_shinfo(skb)->frags[f];
1660 len = frag->size;
1661 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1662 }
1663
1664 while (len) {
1665 wil_dbg_txrx(wil,
1666 "TSO: len %d, rem_data %d, descs_used %d\n",
1667 len, rem_data, descs_used);
1668
1669 if (descs_used == avail) {
1670 wil_err_ratelimited(wil, "TSO: ring overflow\n");
1671 rc = -ENOMEM;
1672 goto mem_error;
1673 }
1674
1675 lenmss = min_t(int, rem_data, len);
1676 i = (swhead + descs_used) % vring->size;
1677 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1678
1679 if (!headlen) {
1680 pa = skb_frag_dma_map(dev, frag,
1681 frag->size - len, lenmss,
1682 DMA_TO_DEVICE);
1683 vring->ctx[i].mapped_as = wil_mapped_as_page;
1684 } else {
1685 pa = dma_map_single(dev,
1686 skb->data +
1687 skb_headlen(skb) - headlen,
1688 lenmss,
1689 DMA_TO_DEVICE);
1690 vring->ctx[i].mapped_as = wil_mapped_as_single;
1691 headlen -= lenmss;
1692 }
1693
1694 if (unlikely(dma_mapping_error(dev, pa))) {
1695 wil_err(wil, "TSO: DMA map page error\n");
1696 goto mem_error;
1697 }
1698
1699 _desc = &vring->va[i].tx.legacy;
1700
1701 if (!_first_desc) {
1702 _first_desc = _desc;
1703 first_ctx = &vring->ctx[i];
1704 d = first_desc;
1705 } else {
1706 d = &desc_mem;
1707 }
1708
1709 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1710 pa, lenmss, vring_index);
1711 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1712 is_ipv4, tcp_hdr_len,
1713 skb_net_hdr_len);
1714
1715
1716 desc_tso_type = wil_tso_type_mid;
1717
1718 descs_used++;
1719 sg_desc_cnt++;
1720 len -= lenmss;
1721 rem_data -= lenmss;
1722
1723 wil_dbg_txrx(wil,
1724 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1725 len, rem_data, descs_used, sg_desc_cnt);
1726
1727
1728 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1729 if (hdr_compensation_need) {
1730
1731
1732
1733 hdr_ctx->nr_frags = sg_desc_cnt;
1734 wil_tx_desc_set_nr_frags(first_desc,
1735 sg_desc_cnt +
1736 1);
1737 hdr_compensation_need = false;
1738 } else {
1739 wil_tx_desc_set_nr_frags(first_desc,
1740 sg_desc_cnt);
1741 }
1742 first_ctx->nr_frags = sg_desc_cnt - 1;
1743
1744 wil_tx_last_desc(d);
1745
1746
1747
1748
1749
1750 if (first_desc != d)
1751 *_first_desc = *first_desc;
1752
1753
1754
1755
1756 if (f < nr_frags - 1 || len > 0)
1757 *_desc = *d;
1758
1759 rem_data = mss;
1760 _first_desc = NULL;
1761 sg_desc_cnt = 0;
1762 } else if (first_desc != d)
1763 *_desc = *d;
1764 }
1765 }
1766
1767 if (!_desc)
1768 goto mem_error;
1769
1770
1771
1772
1773 if (_first_desc == _desc)
1774 d = first_desc;
1775
1776
1777 wil_set_tx_desc_last_tso(d);
1778 *_desc = *d;
1779
1780
1781 wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1782 *_hdr_desc = *hdr_desc;
1783
1784
1785
1786
1787
1788 vring->ctx[i].skb = skb_get(skb);
1789
1790
1791 used = wil_ring_used_tx(vring);
1792 if (wil_val_in_range(wil->ring_idle_trsh,
1793 used, used + descs_used)) {
1794 txdata->idle += get_cycles() - txdata->last_idle;
1795 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1796 vring_index, used, used + descs_used);
1797 }
1798
1799
1800
1801
1802
1803
1804 wmb();
1805
1806
1807 wil_ring_advance_head(vring, descs_used);
1808 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1809
1810
1811
1812
1813 wmb();
1814
1815 if (wil->tx_latency)
1816 *(ktime_t *)&skb->cb = ktime_get();
1817 else
1818 memset(skb->cb, 0, sizeof(ktime_t));
1819
1820 wil_w(wil, vring->hwtail, vring->swhead);
1821 return 0;
1822
1823mem_error:
1824 while (descs_used > 0) {
1825 struct wil_ctx *ctx;
1826
1827 i = (swhead + descs_used - 1) % vring->size;
1828 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
1829 _desc = &vring->va[i].tx.legacy;
1830 *d = *_desc;
1831 _desc->dma.status = TX_DMA_STATUS_DU;
1832 ctx = &vring->ctx[i];
1833 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
1834 memset(ctx, 0, sizeof(*ctx));
1835 descs_used--;
1836 }
1837err_exit:
1838 return rc;
1839}
1840
1841static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1842 struct wil_ring *ring, struct sk_buff *skb)
1843{
1844 struct device *dev = wil_to_dev(wil);
1845 struct vring_tx_desc dd, *d = ⅆ
1846 volatile struct vring_tx_desc *_d;
1847 u32 swhead = ring->swhead;
1848 int avail = wil_ring_avail_tx(ring);
1849 int nr_frags = skb_shinfo(skb)->nr_frags;
1850 uint f = 0;
1851 int ring_index = ring - wil->ring_tx;
1852 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1853 uint i = swhead;
1854 dma_addr_t pa;
1855 int used;
1856 bool mcast = (ring_index == vif->bcast_ring);
1857 uint len = skb_headlen(skb);
1858
1859 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
1860 skb->len, ring_index, nr_frags);
1861
1862 if (unlikely(!txdata->enabled))
1863 return -EINVAL;
1864
1865 if (unlikely(avail < 1 + nr_frags)) {
1866 wil_err_ratelimited(wil,
1867 "Tx ring[%2d] full. No space for %d fragments\n",
1868 ring_index, 1 + nr_frags);
1869 return -ENOMEM;
1870 }
1871 _d = &ring->va[i].tx.legacy;
1872
1873 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1874
1875 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
1876 skb_headlen(skb), skb->data, &pa);
1877 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1878 skb->data, skb_headlen(skb), false);
1879
1880 if (unlikely(dma_mapping_error(dev, pa)))
1881 return -EINVAL;
1882 ring->ctx[i].mapped_as = wil_mapped_as_single;
1883
1884 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
1885 ring_index);
1886 if (unlikely(mcast)) {
1887 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS);
1888 if (unlikely(len > WIL_BCAST_MCS0_LIMIT))
1889 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1890 }
1891
1892 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1893 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1894 ring_index);
1895 goto dma_error;
1896 }
1897
1898 ring->ctx[i].nr_frags = nr_frags;
1899 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1900
1901
1902 for (; f < nr_frags; f++) {
1903 const struct skb_frag_struct *frag =
1904 &skb_shinfo(skb)->frags[f];
1905 int len = skb_frag_size(frag);
1906
1907 *_d = *d;
1908 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1909 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1910 (const void *)d, sizeof(*d), false);
1911 i = (swhead + f + 1) % ring->size;
1912 _d = &ring->va[i].tx.legacy;
1913 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1914 DMA_TO_DEVICE);
1915 if (unlikely(dma_mapping_error(dev, pa))) {
1916 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1917 ring_index);
1918 goto dma_error;
1919 }
1920 ring->ctx[i].mapped_as = wil_mapped_as_page;
1921 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1922 pa, len, ring_index);
1923
1924
1925
1926
1927 wil_tx_desc_offload_setup(d, skb);
1928 }
1929
1930 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
1931 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1932 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1933 *_d = *d;
1934 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1935 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1936 (const void *)d, sizeof(*d), false);
1937
1938
1939
1940
1941
1942 ring->ctx[i].skb = skb_get(skb);
1943
1944
1945 used = wil_ring_used_tx(ring);
1946 if (wil_val_in_range(wil->ring_idle_trsh,
1947 used, used + nr_frags + 1)) {
1948 txdata->idle += get_cycles() - txdata->last_idle;
1949 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1950 ring_index, used, used + nr_frags + 1);
1951 }
1952
1953
1954
1955
1956
1957
1958 wmb();
1959
1960
1961 wil_ring_advance_head(ring, nr_frags + 1);
1962 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
1963 ring->swhead);
1964 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
1965
1966
1967
1968
1969 wmb();
1970
1971 if (wil->tx_latency)
1972 *(ktime_t *)&skb->cb = ktime_get();
1973 else
1974 memset(skb->cb, 0, sizeof(ktime_t));
1975
1976 wil_w(wil, ring->hwtail, ring->swhead);
1977
1978 return 0;
1979 dma_error:
1980
1981 nr_frags = f + 1;
1982 for (f = 0; f < nr_frags; f++) {
1983 struct wil_ctx *ctx;
1984
1985 i = (swhead + f) % ring->size;
1986 ctx = &ring->ctx[i];
1987 _d = &ring->va[i].tx.legacy;
1988 *d = *_d;
1989 _d->dma.status = TX_DMA_STATUS_DU;
1990 wil->txrx_ops.tx_desc_unmap(dev,
1991 (union wil_tx_desc *)d,
1992 ctx);
1993
1994 memset(ctx, 0, sizeof(*ctx));
1995 }
1996
1997 return -EINVAL;
1998}
1999
2000static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
2001 struct wil_ring *ring, struct sk_buff *skb)
2002{
2003 int ring_index = ring - wil->ring_tx;
2004 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
2005 int rc;
2006
2007 spin_lock(&txdata->lock);
2008
2009 if (test_bit(wil_status_suspending, wil->status) ||
2010 test_bit(wil_status_suspended, wil->status) ||
2011 test_bit(wil_status_resuming, wil->status)) {
2012 wil_dbg_txrx(wil,
2013 "suspend/resume in progress. drop packet\n");
2014 spin_unlock(&txdata->lock);
2015 return -EINVAL;
2016 }
2017
2018 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
2019 (wil, vif, ring, skb);
2020
2021 spin_unlock(&txdata->lock);
2022
2023 return rc;
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static inline void __wil_update_net_queues(struct wil6210_priv *wil,
2044 struct wil6210_vif *vif,
2045 struct wil_ring *ring,
2046 bool check_stop)
2047{
2048 int i;
2049 int min_ring_id = wil_get_min_tx_ring_id(wil);
2050
2051 if (unlikely(!vif))
2052 return;
2053
2054 if (ring)
2055 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
2056 (int)(ring - wil->ring_tx), vif->mid, check_stop,
2057 vif->net_queue_stopped);
2058 else
2059 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
2060 check_stop, vif->mid, vif->net_queue_stopped);
2061
2062 if (ring && drop_if_ring_full)
2063
2064 return;
2065
2066 if (check_stop == vif->net_queue_stopped)
2067
2068 return;
2069
2070 if (check_stop) {
2071 if (!ring || unlikely(wil_ring_avail_low(ring))) {
2072
2073 netif_tx_stop_all_queues(vif_to_ndev(vif));
2074 vif->net_queue_stopped = true;
2075 wil_dbg_txrx(wil, "netif_tx_stop called\n");
2076 }
2077 return;
2078 }
2079
2080
2081 if (test_bit(wil_status_suspending, wil->status) ||
2082 test_bit(wil_status_suspended, wil->status))
2083 return;
2084
2085
2086 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
2087 struct wil_ring *cur_ring = &wil->ring_tx[i];
2088 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
2089
2090 if (txdata->mid != vif->mid || !cur_ring->va ||
2091 !txdata->enabled || cur_ring == ring)
2092 continue;
2093
2094 if (wil_ring_avail_low(cur_ring)) {
2095 wil_dbg_txrx(wil, "ring %d full, can't wake\n",
2096 (int)(cur_ring - wil->ring_tx));
2097 return;
2098 }
2099 }
2100
2101 if (!ring || wil_ring_avail_high(ring)) {
2102
2103 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
2104 netif_tx_wake_all_queues(vif_to_ndev(vif));
2105 vif->net_queue_stopped = false;
2106 }
2107}
2108
2109void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2110 struct wil_ring *ring, bool check_stop)
2111{
2112 spin_lock(&wil->net_queue_lock);
2113 __wil_update_net_queues(wil, vif, ring, check_stop);
2114 spin_unlock(&wil->net_queue_lock);
2115}
2116
2117void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2118 struct wil_ring *ring, bool check_stop)
2119{
2120 spin_lock_bh(&wil->net_queue_lock);
2121 __wil_update_net_queues(wil, vif, ring, check_stop);
2122 spin_unlock_bh(&wil->net_queue_lock);
2123}
2124
2125netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2126{
2127 struct wil6210_vif *vif = ndev_to_vif(ndev);
2128 struct wil6210_priv *wil = vif_to_wil(vif);
2129 const u8 *da = wil_skb_get_da(skb);
2130 bool bcast = is_multicast_ether_addr(da);
2131 struct wil_ring *ring;
2132 static bool pr_once_fw;
2133 int rc;
2134
2135 wil_dbg_txrx(wil, "start_xmit\n");
2136 if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
2137 if (!pr_once_fw) {
2138 wil_err(wil, "FW not ready\n");
2139 pr_once_fw = true;
2140 }
2141 goto drop;
2142 }
2143 if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
2144 wil_dbg_ratelimited(wil,
2145 "VIF not connected, packet dropped\n");
2146 goto drop;
2147 }
2148 if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
2149 wil_err(wil, "Xmit in monitor mode not supported\n");
2150 goto drop;
2151 }
2152 pr_once_fw = false;
2153
2154
2155 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2156
2157 ring = wil_find_tx_ring_sta(wil, vif, skb);
2158 } else if (bcast) {
2159 if (vif->pbss)
2160
2161
2162
2163 ring = wil_find_tx_bcast_2(wil, vif, skb);
2164 else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2165
2166 ring = wil_find_tx_bcast_1(wil, vif, skb);
2167 else
2168
2169
2170
2171 ring = wil_find_tx_bcast_2(wil, vif, skb);
2172 } else {
2173
2174 ring = wil_find_tx_ucast(wil, vif, skb);
2175 }
2176 if (unlikely(!ring)) {
2177 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
2178 goto drop;
2179 }
2180
2181 rc = wil_tx_ring(wil, vif, ring, skb);
2182
2183 switch (rc) {
2184 case 0:
2185
2186 wil_update_net_queues_bh(wil, vif, ring, true);
2187
2188 dev_kfree_skb_any(skb);
2189 return NETDEV_TX_OK;
2190 case -ENOMEM:
2191 if (drop_if_ring_full)
2192 goto drop;
2193 return NETDEV_TX_BUSY;
2194 default:
2195 break;
2196 }
2197 drop:
2198 ndev->stats.tx_dropped++;
2199 dev_kfree_skb_any(skb);
2200
2201 return NET_XMIT_DROP;
2202}
2203
2204void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
2205 struct wil_sta_info *sta)
2206{
2207 int skb_time_us;
2208 int bin;
2209
2210 if (!wil->tx_latency)
2211 return;
2212
2213 if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
2214 return;
2215
2216 skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
2217 bin = skb_time_us / wil->tx_latency_res;
2218 bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
2219
2220 wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
2221 sta->tx_latency_bins[bin]++;
2222 sta->stats.tx_latency_total_us += skb_time_us;
2223 if (skb_time_us < sta->stats.tx_latency_min_us)
2224 sta->stats.tx_latency_min_us = skb_time_us;
2225 if (skb_time_us > sta->stats.tx_latency_max_us)
2226 sta->stats.tx_latency_max_us = skb_time_us;
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2237{
2238 struct wil6210_priv *wil = vif_to_wil(vif);
2239 struct net_device *ndev = vif_to_ndev(vif);
2240 struct device *dev = wil_to_dev(wil);
2241 struct wil_ring *vring = &wil->ring_tx[ringid];
2242 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
2243 int done = 0;
2244 int cid = wil->ring2cid_tid[ringid][0];
2245 struct wil_net_stats *stats = NULL;
2246 volatile struct vring_tx_desc *_d;
2247 int used_before_complete;
2248 int used_new;
2249
2250 if (unlikely(!vring->va)) {
2251 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2252 return 0;
2253 }
2254
2255 if (unlikely(!txdata->enabled)) {
2256 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2257 return 0;
2258 }
2259
2260 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2261
2262 used_before_complete = wil_ring_used_tx(vring);
2263
2264 if (cid < wil->max_assoc_sta)
2265 stats = &wil->sta[cid].stats;
2266
2267 while (!wil_ring_is_empty(vring)) {
2268 int new_swtail;
2269 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2270
2271
2272
2273
2274
2275 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2276
2277
2278 _d = &vring->va[lf].tx.legacy;
2279 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2280 break;
2281
2282 new_swtail = (lf + 1) % vring->size;
2283 while (vring->swtail != new_swtail) {
2284 struct vring_tx_desc dd, *d = ⅆ
2285 u16 dmalen;
2286 struct sk_buff *skb;
2287
2288 ctx = &vring->ctx[vring->swtail];
2289 skb = ctx->skb;
2290 _d = &vring->va[vring->swtail].tx.legacy;
2291
2292 *d = *_d;
2293
2294 dmalen = le16_to_cpu(d->dma.length);
2295 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2296 d->dma.error);
2297 wil_dbg_txrx(wil,
2298 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2299 ringid, vring->swtail, dmalen,
2300 d->dma.status, d->dma.error);
2301 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2302 (const void *)d, sizeof(*d), false);
2303
2304 wil->txrx_ops.tx_desc_unmap(dev,
2305 (union wil_tx_desc *)d,
2306 ctx);
2307
2308 if (skb) {
2309 if (likely(d->dma.error == 0)) {
2310 ndev->stats.tx_packets++;
2311 ndev->stats.tx_bytes += skb->len;
2312 if (stats) {
2313 stats->tx_packets++;
2314 stats->tx_bytes += skb->len;
2315
2316 wil_tx_latency_calc(wil, skb,
2317 &wil->sta[cid]);
2318 }
2319 } else {
2320 ndev->stats.tx_errors++;
2321 if (stats)
2322 stats->tx_errors++;
2323 }
2324 wil_consume_skb(skb, d->dma.error == 0);
2325 }
2326 memset(ctx, 0, sizeof(*ctx));
2327
2328
2329
2330
2331
2332 wmb();
2333
2334
2335
2336
2337
2338 vring->swtail = wil_ring_next_tail(vring);
2339 done++;
2340 }
2341 }
2342
2343
2344 used_new = wil_ring_used_tx(vring);
2345 if (wil_val_in_range(wil->ring_idle_trsh,
2346 used_new, used_before_complete)) {
2347 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2348 ringid, used_before_complete, used_new);
2349 txdata->last_idle = get_cycles();
2350 }
2351
2352
2353 if (done)
2354 wil_update_net_queues(wil, vif, vring, false);
2355
2356 return done;
2357}
2358
2359static inline int wil_tx_init(struct wil6210_priv *wil)
2360{
2361 return 0;
2362}
2363
2364static inline void wil_tx_fini(struct wil6210_priv *wil) {}
2365
2366static void wil_get_reorder_params(struct wil6210_priv *wil,
2367 struct sk_buff *skb, int *tid, int *cid,
2368 int *mid, u16 *seq, int *mcast, int *retry)
2369{
2370 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
2371
2372 *tid = wil_rxdesc_tid(d);
2373 *cid = wil_skb_get_cid(skb);
2374 *mid = wil_rxdesc_mid(d);
2375 *seq = wil_rxdesc_seq(d);
2376 *mcast = wil_rxdesc_mcast(d);
2377 *retry = wil_rxdesc_retry(d);
2378}
2379
2380void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
2381{
2382 wil->txrx_ops.configure_interrupt_moderation =
2383 wil_configure_interrupt_moderation;
2384
2385 wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
2386 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
2387 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
2388 wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
2389 wil->txrx_ops.ring_fini_tx = wil_vring_free;
2390 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
2391 wil->txrx_ops.tx_init = wil_tx_init;
2392 wil->txrx_ops.tx_fini = wil_tx_fini;
2393 wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
2394
2395 wil->txrx_ops.rx_init = wil_rx_init;
2396 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
2397 wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
2398 wil->txrx_ops.get_netif_rx_params =
2399 wil_get_netif_rx_params;
2400 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
2401 wil->txrx_ops.rx_error_check = wil_rx_error_check;
2402 wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
2403 wil->txrx_ops.rx_fini = wil_rx_fini;
2404}
2405