1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "core.h"
19#include "htc.h"
20#include "htt.h"
21#include "txrx.h"
22#include "debug.h"
23#include "trace.h"
24#include "mac.h"
25
26#include <linux/log2.h>
27
28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
30
31
32#define HTT_RX_RING_REFILL_RETRY_MS 50
33
34#define HTT_RX_RING_REFILL_RESCHED_MS 5
35
36static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37
38static struct sk_buff *
39ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
40{
41 struct ath10k_skb_rxcb *rxcb;
42
43 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
44 if (rxcb->paddr == paddr)
45 return ATH10K_RXCB_SKB(rxcb);
46
47 WARN_ON_ONCE(1);
48 return NULL;
49}
50
51static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
52{
53 struct sk_buff *skb;
54 struct ath10k_skb_rxcb *rxcb;
55 struct hlist_node *n;
56 int i;
57
58 if (htt->rx_ring.in_ord_rx) {
59 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
60 skb = ATH10K_RXCB_SKB(rxcb);
61 dma_unmap_single(htt->ar->dev, rxcb->paddr,
62 skb->len + skb_tailroom(skb),
63 DMA_FROM_DEVICE);
64 hash_del(&rxcb->hlist);
65 dev_kfree_skb_any(skb);
66 }
67 } else {
68 for (i = 0; i < htt->rx_ring.size; i++) {
69 skb = htt->rx_ring.netbufs_ring[i];
70 if (!skb)
71 continue;
72
73 rxcb = ATH10K_SKB_RXCB(skb);
74 dma_unmap_single(htt->ar->dev, rxcb->paddr,
75 skb->len + skb_tailroom(skb),
76 DMA_FROM_DEVICE);
77 dev_kfree_skb_any(skb);
78 }
79 }
80
81 htt->rx_ring.fill_cnt = 0;
82 hash_init(htt->rx_ring.skb_table);
83 memset(htt->rx_ring.netbufs_ring, 0,
84 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
85}
86
87static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
88{
89 struct htt_rx_desc *rx_desc;
90 struct ath10k_skb_rxcb *rxcb;
91 struct sk_buff *skb;
92 dma_addr_t paddr;
93 int ret = 0, idx;
94
95
96
97
98
99
100 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
101
102 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
103 while (num > 0) {
104 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
105 if (!skb) {
106 ret = -ENOMEM;
107 goto fail;
108 }
109
110 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
111 skb_pull(skb,
112 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
113 skb->data);
114
115
116 rx_desc = (struct htt_rx_desc *)skb->data;
117 rx_desc->attention.flags = __cpu_to_le32(0);
118
119 paddr = dma_map_single(htt->ar->dev, skb->data,
120 skb->len + skb_tailroom(skb),
121 DMA_FROM_DEVICE);
122
123 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
124 dev_kfree_skb_any(skb);
125 ret = -ENOMEM;
126 goto fail;
127 }
128
129 rxcb = ATH10K_SKB_RXCB(skb);
130 rxcb->paddr = paddr;
131 htt->rx_ring.netbufs_ring[idx] = skb;
132 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
133 htt->rx_ring.fill_cnt++;
134
135 if (htt->rx_ring.in_ord_rx) {
136 hash_add(htt->rx_ring.skb_table,
137 &ATH10K_SKB_RXCB(skb)->hlist,
138 (u32)paddr);
139 }
140
141 num--;
142 idx++;
143 idx &= htt->rx_ring.size_mask;
144 }
145
146fail:
147
148
149
150
151 mb();
152 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
153 return ret;
154}
155
156static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
157{
158 lockdep_assert_held(&htt->rx_ring.lock);
159 return __ath10k_htt_rx_ring_fill_n(htt, num);
160}
161
162static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
163{
164 int ret, num_deficit, num_to_fill;
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 spin_lock_bh(&htt->rx_ring.lock);
183 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
184 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
185 num_deficit -= num_to_fill;
186 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
187 if (ret == -ENOMEM) {
188
189
190
191
192
193
194 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
195 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
196 } else if (num_deficit > 0) {
197 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
198 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
199 }
200 spin_unlock_bh(&htt->rx_ring.lock);
201}
202
203static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
204{
205 struct ath10k_htt *htt = (struct ath10k_htt *)arg;
206
207 ath10k_htt_rx_msdu_buff_replenish(htt);
208}
209
210int ath10k_htt_rx_ring_refill(struct ath10k *ar)
211{
212 struct ath10k_htt *htt = &ar->htt;
213 int ret;
214
215 spin_lock_bh(&htt->rx_ring.lock);
216 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
217 htt->rx_ring.fill_cnt));
218 spin_unlock_bh(&htt->rx_ring.lock);
219
220 if (ret)
221 ath10k_htt_rx_ring_free(htt);
222
223 return ret;
224}
225
226void ath10k_htt_rx_free(struct ath10k_htt *htt)
227{
228 del_timer_sync(&htt->rx_ring.refill_retry_timer);
229
230 skb_queue_purge(&htt->rx_compl_q);
231 skb_queue_purge(&htt->rx_in_ord_compl_q);
232 skb_queue_purge(&htt->tx_fetch_ind_q);
233
234 ath10k_htt_rx_ring_free(htt);
235
236 dma_free_coherent(htt->ar->dev,
237 (htt->rx_ring.size *
238 sizeof(htt->rx_ring.paddrs_ring)),
239 htt->rx_ring.paddrs_ring,
240 htt->rx_ring.base_paddr);
241
242 dma_free_coherent(htt->ar->dev,
243 sizeof(*htt->rx_ring.alloc_idx.vaddr),
244 htt->rx_ring.alloc_idx.vaddr,
245 htt->rx_ring.alloc_idx.paddr);
246
247 kfree(htt->rx_ring.netbufs_ring);
248}
249
250static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
251{
252 struct ath10k *ar = htt->ar;
253 int idx;
254 struct sk_buff *msdu;
255
256 lockdep_assert_held(&htt->rx_ring.lock);
257
258 if (htt->rx_ring.fill_cnt == 0) {
259 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
260 return NULL;
261 }
262
263 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
264 msdu = htt->rx_ring.netbufs_ring[idx];
265 htt->rx_ring.netbufs_ring[idx] = NULL;
266 htt->rx_ring.paddrs_ring[idx] = 0;
267
268 idx++;
269 idx &= htt->rx_ring.size_mask;
270 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
271 htt->rx_ring.fill_cnt--;
272
273 dma_unmap_single(htt->ar->dev,
274 ATH10K_SKB_RXCB(msdu)->paddr,
275 msdu->len + skb_tailroom(msdu),
276 DMA_FROM_DEVICE);
277 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
278 msdu->data, msdu->len + skb_tailroom(msdu));
279
280 return msdu;
281}
282
283
284static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
285 struct sk_buff_head *amsdu)
286{
287 struct ath10k *ar = htt->ar;
288 int msdu_len, msdu_chaining = 0;
289 struct sk_buff *msdu;
290 struct htt_rx_desc *rx_desc;
291
292 lockdep_assert_held(&htt->rx_ring.lock);
293
294 for (;;) {
295 int last_msdu, msdu_len_invalid, msdu_chained;
296
297 msdu = ath10k_htt_rx_netbuf_pop(htt);
298 if (!msdu) {
299 __skb_queue_purge(amsdu);
300 return -ENOENT;
301 }
302
303 __skb_queue_tail(amsdu, msdu);
304
305 rx_desc = (struct htt_rx_desc *)msdu->data;
306
307
308
309
310 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
311 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
312
313
314
315
316
317
318
319
320
321 if (!(__le32_to_cpu(rx_desc->attention.flags)
322 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
323 __skb_queue_purge(amsdu);
324 return -EIO;
325 }
326
327 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
328 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
329 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
330 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
331 RX_MSDU_START_INFO0_MSDU_LENGTH);
332 msdu_chained = rx_desc->frag_info.ring2_more_count;
333
334 if (msdu_len_invalid)
335 msdu_len = 0;
336
337 skb_trim(msdu, 0);
338 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
339 msdu_len -= msdu->len;
340
341
342 while (msdu_chained--) {
343 msdu = ath10k_htt_rx_netbuf_pop(htt);
344 if (!msdu) {
345 __skb_queue_purge(amsdu);
346 return -ENOENT;
347 }
348
349 __skb_queue_tail(amsdu, msdu);
350 skb_trim(msdu, 0);
351 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
352 msdu_len -= msdu->len;
353 msdu_chaining = 1;
354 }
355
356 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
357 RX_MSDU_END_INFO0_LAST_MSDU;
358
359 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
360 sizeof(*rx_desc) - sizeof(u32));
361
362 if (last_msdu)
363 break;
364 }
365
366 if (skb_queue_empty(amsdu))
367 msdu_chaining = -1;
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 return msdu_chaining;
383}
384
385static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
386 u32 paddr)
387{
388 struct ath10k *ar = htt->ar;
389 struct ath10k_skb_rxcb *rxcb;
390 struct sk_buff *msdu;
391
392 lockdep_assert_held(&htt->rx_ring.lock);
393
394 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
395 if (!msdu)
396 return NULL;
397
398 rxcb = ATH10K_SKB_RXCB(msdu);
399 hash_del(&rxcb->hlist);
400 htt->rx_ring.fill_cnt--;
401
402 dma_unmap_single(htt->ar->dev, rxcb->paddr,
403 msdu->len + skb_tailroom(msdu),
404 DMA_FROM_DEVICE);
405 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
406 msdu->data, msdu->len + skb_tailroom(msdu));
407
408 return msdu;
409}
410
411static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
412 struct htt_rx_in_ord_ind *ev,
413 struct sk_buff_head *list)
414{
415 struct ath10k *ar = htt->ar;
416 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
417 struct htt_rx_desc *rxd;
418 struct sk_buff *msdu;
419 int msdu_count;
420 bool is_offload;
421 u32 paddr;
422
423 lockdep_assert_held(&htt->rx_ring.lock);
424
425 msdu_count = __le16_to_cpu(ev->msdu_count);
426 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
427
428 while (msdu_count--) {
429 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
430
431 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
432 if (!msdu) {
433 __skb_queue_purge(list);
434 return -ENOENT;
435 }
436
437 __skb_queue_tail(list, msdu);
438
439 if (!is_offload) {
440 rxd = (void *)msdu->data;
441
442 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
443
444 skb_put(msdu, sizeof(*rxd));
445 skb_pull(msdu, sizeof(*rxd));
446 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
447
448 if (!(__le32_to_cpu(rxd->attention.flags) &
449 RX_ATTENTION_FLAGS_MSDU_DONE)) {
450 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
451 return -EIO;
452 }
453 }
454
455 msdu_desc++;
456 }
457
458 return 0;
459}
460
461int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
462{
463 struct ath10k *ar = htt->ar;
464 dma_addr_t paddr;
465 void *vaddr;
466 size_t size;
467 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
468
469 htt->rx_confused = false;
470
471
472
473
474 htt->rx_ring.size = HTT_RX_RING_SIZE;
475 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
476 htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
477
478 if (!is_power_of_2(htt->rx_ring.size)) {
479 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
480 return -EINVAL;
481 }
482
483 htt->rx_ring.netbufs_ring =
484 kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
485 GFP_KERNEL);
486 if (!htt->rx_ring.netbufs_ring)
487 goto err_netbuf;
488
489 size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
490
491 vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
492 if (!vaddr)
493 goto err_dma_ring;
494
495 htt->rx_ring.paddrs_ring = vaddr;
496 htt->rx_ring.base_paddr = paddr;
497
498 vaddr = dma_alloc_coherent(htt->ar->dev,
499 sizeof(*htt->rx_ring.alloc_idx.vaddr),
500 &paddr, GFP_KERNEL);
501 if (!vaddr)
502 goto err_dma_idx;
503
504 htt->rx_ring.alloc_idx.vaddr = vaddr;
505 htt->rx_ring.alloc_idx.paddr = paddr;
506 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
507 *htt->rx_ring.alloc_idx.vaddr = 0;
508
509
510 setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
511
512 spin_lock_init(&htt->rx_ring.lock);
513
514 htt->rx_ring.fill_cnt = 0;
515 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
516 hash_init(htt->rx_ring.skb_table);
517
518 skb_queue_head_init(&htt->rx_compl_q);
519 skb_queue_head_init(&htt->rx_in_ord_compl_q);
520 skb_queue_head_init(&htt->tx_fetch_ind_q);
521 atomic_set(&htt->num_mpdus_ready, 0);
522
523 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
524 htt->rx_ring.size, htt->rx_ring.fill_level);
525 return 0;
526
527err_dma_idx:
528 dma_free_coherent(htt->ar->dev,
529 (htt->rx_ring.size *
530 sizeof(htt->rx_ring.paddrs_ring)),
531 htt->rx_ring.paddrs_ring,
532 htt->rx_ring.base_paddr);
533err_dma_ring:
534 kfree(htt->rx_ring.netbufs_ring);
535err_netbuf:
536 return -ENOMEM;
537}
538
539static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
540 enum htt_rx_mpdu_encrypt_type type)
541{
542 switch (type) {
543 case HTT_RX_MPDU_ENCRYPT_NONE:
544 return 0;
545 case HTT_RX_MPDU_ENCRYPT_WEP40:
546 case HTT_RX_MPDU_ENCRYPT_WEP104:
547 return IEEE80211_WEP_IV_LEN;
548 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
549 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
550 return IEEE80211_TKIP_IV_LEN;
551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
552 return IEEE80211_CCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
554 return IEEE80211_CCMP_256_HDR_LEN;
555 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
556 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
557 return IEEE80211_GCMP_HDR_LEN;
558 case HTT_RX_MPDU_ENCRYPT_WEP128:
559 case HTT_RX_MPDU_ENCRYPT_WAPI:
560 break;
561 }
562
563 ath10k_warn(ar, "unsupported encryption type %d\n", type);
564 return 0;
565}
566
567#define MICHAEL_MIC_LEN 8
568
569static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
570 enum htt_rx_mpdu_encrypt_type type)
571{
572 switch (type) {
573 case HTT_RX_MPDU_ENCRYPT_NONE:
574 return 0;
575 case HTT_RX_MPDU_ENCRYPT_WEP40:
576 case HTT_RX_MPDU_ENCRYPT_WEP104:
577 return IEEE80211_WEP_ICV_LEN;
578 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
579 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
580 return IEEE80211_TKIP_ICV_LEN;
581 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
582 return IEEE80211_CCMP_MIC_LEN;
583 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
584 return IEEE80211_CCMP_256_MIC_LEN;
585 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
586 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
587 return IEEE80211_GCMP_MIC_LEN;
588 case HTT_RX_MPDU_ENCRYPT_WEP128:
589 case HTT_RX_MPDU_ENCRYPT_WAPI:
590 break;
591 }
592
593 ath10k_warn(ar, "unsupported encryption type %d\n", type);
594 return 0;
595}
596
597struct amsdu_subframe_hdr {
598 u8 dst[ETH_ALEN];
599 u8 src[ETH_ALEN];
600 __be16 len;
601} __packed;
602
603#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
604
605static void ath10k_htt_rx_h_rates(struct ath10k *ar,
606 struct ieee80211_rx_status *status,
607 struct htt_rx_desc *rxd)
608{
609 struct ieee80211_supported_band *sband;
610 u8 cck, rate, bw, sgi, mcs, nss;
611 u8 preamble = 0;
612 u8 group_id;
613 u32 info1, info2, info3;
614
615 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
616 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
617 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
618
619 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
620
621 switch (preamble) {
622 case HTT_RX_LEGACY:
623
624
625
626 if (!status->freq)
627 return;
628
629 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
630 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
631 rate &= ~RX_PPDU_START_RATE_FLAG;
632
633 sband = &ar->mac.sbands[status->band];
634 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
635 break;
636 case HTT_RX_HT:
637 case HTT_RX_HT_WITH_TXBF:
638
639 mcs = info2 & 0x1F;
640 nss = mcs >> 3;
641 bw = (info2 >> 7) & 1;
642 sgi = (info3 >> 7) & 1;
643
644 status->rate_idx = mcs;
645 status->encoding = RX_ENC_HT;
646 if (sgi)
647 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
648 if (bw)
649 status->bw = RATE_INFO_BW_40;
650 break;
651 case HTT_RX_VHT:
652 case HTT_RX_VHT_WITH_TXBF:
653
654
655
656 bw = info2 & 3;
657 sgi = info3 & 1;
658 group_id = (info2 >> 4) & 0x3F;
659
660 if (GROUP_ID_IS_SU_MIMO(group_id)) {
661 mcs = (info3 >> 4) & 0x0F;
662 nss = ((info2 >> 10) & 0x07) + 1;
663 } else {
664
665
666
667
668
669
670
671
672
673
674 mcs = 0;
675 nss = 1;
676 }
677
678 if (mcs > 0x09) {
679 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
680 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
681 __le32_to_cpu(rxd->attention.flags),
682 __le32_to_cpu(rxd->mpdu_start.info0),
683 __le32_to_cpu(rxd->mpdu_start.info1),
684 __le32_to_cpu(rxd->msdu_start.common.info0),
685 __le32_to_cpu(rxd->msdu_start.common.info1),
686 rxd->ppdu_start.info0,
687 __le32_to_cpu(rxd->ppdu_start.info1),
688 __le32_to_cpu(rxd->ppdu_start.info2),
689 __le32_to_cpu(rxd->ppdu_start.info3),
690 __le32_to_cpu(rxd->ppdu_start.info4));
691
692 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
693 __le32_to_cpu(rxd->msdu_end.common.info0),
694 __le32_to_cpu(rxd->mpdu_end.info0));
695
696 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
697 "rx desc msdu payload: ",
698 rxd->msdu_payload, 50);
699 }
700
701 status->rate_idx = mcs;
702 status->nss = nss;
703
704 if (sgi)
705 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
706
707 switch (bw) {
708
709 case 0:
710 break;
711
712 case 1:
713 status->bw = RATE_INFO_BW_40;
714 break;
715
716 case 2:
717 status->bw = RATE_INFO_BW_80;
718 break;
719 case 3:
720 status->bw = RATE_INFO_BW_160;
721 break;
722 }
723
724 status->encoding = RX_ENC_VHT;
725 break;
726 default:
727 break;
728 }
729}
730
731static struct ieee80211_channel *
732ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
733{
734 struct ath10k_peer *peer;
735 struct ath10k_vif *arvif;
736 struct cfg80211_chan_def def;
737 u16 peer_id;
738
739 lockdep_assert_held(&ar->data_lock);
740
741 if (!rxd)
742 return NULL;
743
744 if (rxd->attention.flags &
745 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
746 return NULL;
747
748 if (!(rxd->msdu_end.common.info0 &
749 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
750 return NULL;
751
752 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
753 RX_MPDU_START_INFO0_PEER_IDX);
754
755 peer = ath10k_peer_find_by_id(ar, peer_id);
756 if (!peer)
757 return NULL;
758
759 arvif = ath10k_get_arvif(ar, peer->vdev_id);
760 if (WARN_ON_ONCE(!arvif))
761 return NULL;
762
763 if (ath10k_mac_vif_chan(arvif->vif, &def))
764 return NULL;
765
766 return def.chan;
767}
768
769static struct ieee80211_channel *
770ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
771{
772 struct ath10k_vif *arvif;
773 struct cfg80211_chan_def def;
774
775 lockdep_assert_held(&ar->data_lock);
776
777 list_for_each_entry(arvif, &ar->arvifs, list) {
778 if (arvif->vdev_id == vdev_id &&
779 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
780 return def.chan;
781 }
782
783 return NULL;
784}
785
786static void
787ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
788 struct ieee80211_chanctx_conf *conf,
789 void *data)
790{
791 struct cfg80211_chan_def *def = data;
792
793 *def = conf->def;
794}
795
796static struct ieee80211_channel *
797ath10k_htt_rx_h_any_channel(struct ath10k *ar)
798{
799 struct cfg80211_chan_def def = {};
800
801 ieee80211_iter_chan_contexts_atomic(ar->hw,
802 ath10k_htt_rx_h_any_chan_iter,
803 &def);
804
805 return def.chan;
806}
807
808static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
809 struct ieee80211_rx_status *status,
810 struct htt_rx_desc *rxd,
811 u32 vdev_id)
812{
813 struct ieee80211_channel *ch;
814
815 spin_lock_bh(&ar->data_lock);
816 ch = ar->scan_channel;
817 if (!ch)
818 ch = ar->rx_channel;
819 if (!ch)
820 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
821 if (!ch)
822 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
823 if (!ch)
824 ch = ath10k_htt_rx_h_any_channel(ar);
825 if (!ch)
826 ch = ar->tgt_oper_chan;
827 spin_unlock_bh(&ar->data_lock);
828
829 if (!ch)
830 return false;
831
832 status->band = ch->band;
833 status->freq = ch->center_freq;
834
835 return true;
836}
837
838static void ath10k_htt_rx_h_signal(struct ath10k *ar,
839 struct ieee80211_rx_status *status,
840 struct htt_rx_desc *rxd)
841{
842 int i;
843
844 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
845 status->chains &= ~BIT(i);
846
847 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
848 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
849 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
850
851 status->chains |= BIT(i);
852 }
853 }
854
855
856 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
857 rxd->ppdu_start.rssi_comb;
858 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
859}
860
861static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
862 struct ieee80211_rx_status *status,
863 struct htt_rx_desc *rxd)
864{
865
866
867
868
869
870
871 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
872 status->flag |= RX_FLAG_MACTIME_END;
873}
874
875static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
876 struct sk_buff_head *amsdu,
877 struct ieee80211_rx_status *status,
878 u32 vdev_id)
879{
880 struct sk_buff *first;
881 struct htt_rx_desc *rxd;
882 bool is_first_ppdu;
883 bool is_last_ppdu;
884
885 if (skb_queue_empty(amsdu))
886 return;
887
888 first = skb_peek(amsdu);
889 rxd = (void *)first->data - sizeof(*rxd);
890
891 is_first_ppdu = !!(rxd->attention.flags &
892 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
893 is_last_ppdu = !!(rxd->attention.flags &
894 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
895
896 if (is_first_ppdu) {
897
898 status->freq = 0;
899 status->rate_idx = 0;
900 status->nss = 0;
901 status->encoding = RX_ENC_LEGACY;
902 status->bw = RATE_INFO_BW_20;
903
904 status->flag &= ~RX_FLAG_MACTIME_END;
905 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
906
907 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
908 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
909 status->ampdu_reference = ar->ampdu_reference;
910
911 ath10k_htt_rx_h_signal(ar, status, rxd);
912 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
913 ath10k_htt_rx_h_rates(ar, status, rxd);
914 }
915
916 if (is_last_ppdu) {
917 ath10k_htt_rx_h_mactime(ar, status, rxd);
918
919
920 status->flag |= RX_FLAG_AMPDU_IS_LAST;
921 ar->ampdu_reference++;
922 }
923}
924
925static const char * const tid_to_ac[] = {
926 "BE",
927 "BK",
928 "BK",
929 "BE",
930 "VI",
931 "VI",
932 "VO",
933 "VO",
934};
935
936static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
937{
938 u8 *qc;
939 int tid;
940
941 if (!ieee80211_is_data_qos(hdr->frame_control))
942 return "";
943
944 qc = ieee80211_get_qos_ctl(hdr);
945 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
946 if (tid < 8)
947 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
948 else
949 snprintf(out, size, "tid %d", tid);
950
951 return out;
952}
953
954static void ath10k_process_rx(struct ath10k *ar,
955 struct ieee80211_rx_status *rx_status,
956 struct sk_buff *skb)
957{
958 struct ieee80211_rx_status *status;
959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
960 char tid[32];
961
962 status = IEEE80211_SKB_RXCB(skb);
963 *status = *rx_status;
964
965 ath10k_dbg(ar, ATH10K_DBG_DATA,
966 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
967 skb,
968 skb->len,
969 ieee80211_get_SA(hdr),
970 ath10k_get_tid(hdr, tid, sizeof(tid)),
971 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
972 "mcast" : "ucast",
973 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
974 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
975 (status->encoding == RX_ENC_HT) ? "ht" : "",
976 (status->encoding == RX_ENC_VHT) ? "vht" : "",
977 (status->bw == RATE_INFO_BW_40) ? "40" : "",
978 (status->bw == RATE_INFO_BW_80) ? "80" : "",
979 (status->bw == RATE_INFO_BW_160) ? "160" : "",
980 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
981 status->rate_idx,
982 status->nss,
983 status->freq,
984 status->band, status->flag,
985 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
986 !!(status->flag & RX_FLAG_MMIC_ERROR),
987 !!(status->flag & RX_FLAG_AMSDU_MORE));
988 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
989 skb->data, skb->len);
990 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
991 trace_ath10k_rx_payload(ar, skb->data, skb->len);
992
993 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
994}
995
996static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
997 struct ieee80211_hdr *hdr)
998{
999 int len = ieee80211_hdrlen(hdr->frame_control);
1000
1001 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1002 ar->running_fw->fw_file.fw_features))
1003 len = round_up(len, 4);
1004
1005 return len;
1006}
1007
1008static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1009 struct sk_buff *msdu,
1010 struct ieee80211_rx_status *status,
1011 enum htt_rx_mpdu_encrypt_type enctype,
1012 bool is_decrypted)
1013{
1014 struct ieee80211_hdr *hdr;
1015 struct htt_rx_desc *rxd;
1016 size_t hdr_len;
1017 size_t crypto_len;
1018 bool is_first;
1019 bool is_last;
1020
1021 rxd = (void *)msdu->data - sizeof(*rxd);
1022 is_first = !!(rxd->msdu_end.common.info0 &
1023 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1024 is_last = !!(rxd->msdu_end.common.info0 &
1025 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 if (unlikely(WARN_ON_ONCE(!is_first)))
1039 return;
1040
1041
1042 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1043 return;
1044
1045 skb_trim(msdu, msdu->len - FCS_LEN);
1046
1047
1048
1049
1050
1051
1052
1053
1054 if (!is_decrypted)
1055 return;
1056
1057
1058
1059
1060
1061 hdr = (void *)msdu->data;
1062
1063
1064 if (status->flag & RX_FLAG_IV_STRIPPED) {
1065 skb_trim(msdu, msdu->len -
1066 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1067 } else {
1068
1069 if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
1070 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1071 skb_trim(msdu, msdu->len - 8);
1072
1073
1074 if (status->flag & RX_FLAG_ICV_STRIPPED &&
1075 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1076 skb_trim(msdu, msdu->len -
1077 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1078 }
1079
1080
1081 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1082 !ieee80211_has_morefrags(hdr->frame_control) &&
1083 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1084 skb_trim(msdu, msdu->len - 8);
1085
1086
1087 if (status->flag & RX_FLAG_IV_STRIPPED) {
1088 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1089 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1090
1091 memmove((void *)msdu->data + crypto_len,
1092 (void *)msdu->data, hdr_len);
1093 skb_pull(msdu, crypto_len);
1094 }
1095}
1096
1097static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1098 struct sk_buff *msdu,
1099 struct ieee80211_rx_status *status,
1100 const u8 first_hdr[64],
1101 enum htt_rx_mpdu_encrypt_type enctype)
1102{
1103 struct ieee80211_hdr *hdr;
1104 struct htt_rx_desc *rxd;
1105 size_t hdr_len;
1106 u8 da[ETH_ALEN];
1107 u8 sa[ETH_ALEN];
1108 int l3_pad_bytes;
1109 int bytes_aligned = ar->hw_params.decap_align_bytes;
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 rxd = (void *)msdu->data - sizeof(*rxd);
1124
1125 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1126 skb_put(msdu, l3_pad_bytes);
1127
1128 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1129
1130 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1131 ether_addr_copy(da, ieee80211_get_DA(hdr));
1132 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1133 skb_pull(msdu, hdr_len);
1134
1135
1136 hdr = (struct ieee80211_hdr *)first_hdr;
1137 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1138
1139 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1140 memcpy(skb_push(msdu,
1141 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1142 (void *)hdr + round_up(hdr_len, bytes_aligned),
1143 ath10k_htt_rx_crypto_param_len(ar, enctype));
1144 }
1145
1146 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1147
1148
1149
1150
1151 hdr = (struct ieee80211_hdr *)msdu->data;
1152 ether_addr_copy(ieee80211_get_DA(hdr), da);
1153 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1154}
1155
1156static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1157 struct sk_buff *msdu,
1158 enum htt_rx_mpdu_encrypt_type enctype)
1159{
1160 struct ieee80211_hdr *hdr;
1161 struct htt_rx_desc *rxd;
1162 size_t hdr_len, crypto_len;
1163 void *rfc1042;
1164 bool is_first, is_last, is_amsdu;
1165 int bytes_aligned = ar->hw_params.decap_align_bytes;
1166
1167 rxd = (void *)msdu->data - sizeof(*rxd);
1168 hdr = (void *)rxd->rx_hdr_status;
1169
1170 is_first = !!(rxd->msdu_end.common.info0 &
1171 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1172 is_last = !!(rxd->msdu_end.common.info0 &
1173 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1174 is_amsdu = !(is_first && is_last);
1175
1176 rfc1042 = hdr;
1177
1178 if (is_first) {
1179 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1180 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1181
1182 rfc1042 += round_up(hdr_len, bytes_aligned) +
1183 round_up(crypto_len, bytes_aligned);
1184 }
1185
1186 if (is_amsdu)
1187 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1188
1189 return rfc1042;
1190}
1191
1192static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1193 struct sk_buff *msdu,
1194 struct ieee80211_rx_status *status,
1195 const u8 first_hdr[64],
1196 enum htt_rx_mpdu_encrypt_type enctype)
1197{
1198 struct ieee80211_hdr *hdr;
1199 struct ethhdr *eth;
1200 size_t hdr_len;
1201 void *rfc1042;
1202 u8 da[ETH_ALEN];
1203 u8 sa[ETH_ALEN];
1204 int l3_pad_bytes;
1205 struct htt_rx_desc *rxd;
1206 int bytes_aligned = ar->hw_params.decap_align_bytes;
1207
1208
1209
1210
1211
1212
1213 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1214 if (WARN_ON_ONCE(!rfc1042))
1215 return;
1216
1217 rxd = (void *)msdu->data - sizeof(*rxd);
1218 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1219 skb_put(msdu, l3_pad_bytes);
1220 skb_pull(msdu, l3_pad_bytes);
1221
1222
1223 eth = (struct ethhdr *)msdu->data;
1224 ether_addr_copy(da, eth->h_dest);
1225 ether_addr_copy(sa, eth->h_source);
1226 skb_pull(msdu, sizeof(struct ethhdr));
1227
1228
1229 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1230 sizeof(struct rfc1042_hdr));
1231
1232
1233 hdr = (struct ieee80211_hdr *)first_hdr;
1234 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1235
1236 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1237 memcpy(skb_push(msdu,
1238 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1239 (void *)hdr + round_up(hdr_len, bytes_aligned),
1240 ath10k_htt_rx_crypto_param_len(ar, enctype));
1241 }
1242
1243 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1244
1245
1246
1247
1248 hdr = (struct ieee80211_hdr *)msdu->data;
1249 ether_addr_copy(ieee80211_get_DA(hdr), da);
1250 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1251}
1252
1253static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1254 struct sk_buff *msdu,
1255 struct ieee80211_rx_status *status,
1256 const u8 first_hdr[64],
1257 enum htt_rx_mpdu_encrypt_type enctype)
1258{
1259 struct ieee80211_hdr *hdr;
1260 size_t hdr_len;
1261 int l3_pad_bytes;
1262 struct htt_rx_desc *rxd;
1263 int bytes_aligned = ar->hw_params.decap_align_bytes;
1264
1265
1266
1267
1268
1269
1270
1271 rxd = (void *)msdu->data - sizeof(*rxd);
1272 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1273
1274 skb_put(msdu, l3_pad_bytes);
1275 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1276
1277 hdr = (struct ieee80211_hdr *)first_hdr;
1278 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1279
1280 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1281 memcpy(skb_push(msdu,
1282 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1283 (void *)hdr + round_up(hdr_len, bytes_aligned),
1284 ath10k_htt_rx_crypto_param_len(ar, enctype));
1285 }
1286
1287 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1288}
1289
1290static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1291 struct sk_buff *msdu,
1292 struct ieee80211_rx_status *status,
1293 u8 first_hdr[64],
1294 enum htt_rx_mpdu_encrypt_type enctype,
1295 bool is_decrypted)
1296{
1297 struct htt_rx_desc *rxd;
1298 enum rx_msdu_decap_format decap;
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 rxd = (void *)msdu->data - sizeof(*rxd);
1312 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1313 RX_MSDU_START_INFO1_DECAP_FORMAT);
1314
1315 switch (decap) {
1316 case RX_MSDU_DECAP_RAW:
1317 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1318 is_decrypted);
1319 break;
1320 case RX_MSDU_DECAP_NATIVE_WIFI:
1321 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1322 enctype);
1323 break;
1324 case RX_MSDU_DECAP_ETHERNET2_DIX:
1325 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1326 break;
1327 case RX_MSDU_DECAP_8023_SNAP_LLC:
1328 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1329 enctype);
1330 break;
1331 }
1332}
1333
1334static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1335{
1336 struct htt_rx_desc *rxd;
1337 u32 flags, info;
1338 bool is_ip4, is_ip6;
1339 bool is_tcp, is_udp;
1340 bool ip_csum_ok, tcpudp_csum_ok;
1341
1342 rxd = (void *)skb->data - sizeof(*rxd);
1343 flags = __le32_to_cpu(rxd->attention.flags);
1344 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1345
1346 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1347 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1348 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1349 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1350 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1351 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1352
1353 if (!is_ip4 && !is_ip6)
1354 return CHECKSUM_NONE;
1355 if (!is_tcp && !is_udp)
1356 return CHECKSUM_NONE;
1357 if (!ip_csum_ok)
1358 return CHECKSUM_NONE;
1359 if (!tcpudp_csum_ok)
1360 return CHECKSUM_NONE;
1361
1362 return CHECKSUM_UNNECESSARY;
1363}
1364
1365static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1366{
1367 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1368}
1369
1370static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1371 struct sk_buff_head *amsdu,
1372 struct ieee80211_rx_status *status,
1373 bool fill_crypt_header)
1374{
1375 struct sk_buff *first;
1376 struct sk_buff *last;
1377 struct sk_buff *msdu;
1378 struct htt_rx_desc *rxd;
1379 struct ieee80211_hdr *hdr;
1380 enum htt_rx_mpdu_encrypt_type enctype;
1381 u8 first_hdr[64];
1382 u8 *qos;
1383 bool has_fcs_err;
1384 bool has_crypto_err;
1385 bool has_tkip_err;
1386 bool has_peer_idx_invalid;
1387 bool is_decrypted;
1388 bool is_mgmt;
1389 u32 attention;
1390
1391 if (skb_queue_empty(amsdu))
1392 return;
1393
1394 first = skb_peek(amsdu);
1395 rxd = (void *)first->data - sizeof(*rxd);
1396
1397 is_mgmt = !!(rxd->attention.flags &
1398 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1399
1400 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1401 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1402
1403
1404
1405
1406 hdr = (void *)rxd->rx_hdr_status;
1407 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1408
1409
1410
1411
1412 hdr = (void *)first_hdr;
1413
1414 if (ieee80211_is_data_qos(hdr->frame_control)) {
1415 qos = ieee80211_get_qos_ctl(hdr);
1416 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1417 }
1418
1419
1420 last = skb_peek_tail(amsdu);
1421 rxd = (void *)last->data - sizeof(*rxd);
1422 attention = __le32_to_cpu(rxd->attention.flags);
1423
1424 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1425 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1426 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1427 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1428
1429
1430
1431
1432
1433 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1434 !has_fcs_err &&
1435 !has_crypto_err &&
1436 !has_peer_idx_invalid);
1437
1438
1439 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1440 RX_FLAG_MMIC_ERROR |
1441 RX_FLAG_DECRYPTED |
1442 RX_FLAG_IV_STRIPPED |
1443 RX_FLAG_ONLY_MONITOR |
1444 RX_FLAG_MMIC_STRIPPED);
1445
1446 if (has_fcs_err)
1447 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1448
1449 if (has_tkip_err)
1450 status->flag |= RX_FLAG_MMIC_ERROR;
1451
1452
1453
1454
1455
1456
1457 if (is_mgmt)
1458 status->flag |= RX_FLAG_ONLY_MONITOR;
1459
1460 if (is_decrypted) {
1461 status->flag |= RX_FLAG_DECRYPTED;
1462
1463 if (likely(!is_mgmt))
1464 status->flag |= RX_FLAG_MMIC_STRIPPED;
1465
1466 if (fill_crypt_header)
1467 status->flag |= RX_FLAG_MIC_STRIPPED |
1468 RX_FLAG_ICV_STRIPPED;
1469 else
1470 status->flag |= RX_FLAG_IV_STRIPPED;
1471 }
1472
1473 skb_queue_walk(amsdu, msdu) {
1474 ath10k_htt_rx_h_csum_offload(msdu);
1475 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1476 is_decrypted);
1477
1478
1479
1480
1481
1482 if (!is_decrypted)
1483 continue;
1484 if (is_mgmt)
1485 continue;
1486
1487 if (fill_crypt_header)
1488 continue;
1489
1490 hdr = (void *)msdu->data;
1491 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1492 }
1493}
1494
1495static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1496 struct sk_buff_head *amsdu,
1497 struct ieee80211_rx_status *status)
1498{
1499 struct sk_buff *msdu;
1500 struct sk_buff *first_subframe;
1501
1502 first_subframe = skb_peek(amsdu);
1503
1504 while ((msdu = __skb_dequeue(amsdu))) {
1505
1506 if (skb_queue_empty(amsdu))
1507 status->flag &= ~RX_FLAG_AMSDU_MORE;
1508 else
1509 status->flag |= RX_FLAG_AMSDU_MORE;
1510
1511 if (msdu == first_subframe) {
1512 first_subframe = NULL;
1513 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1514 } else {
1515 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1516 }
1517
1518 ath10k_process_rx(ar, status, msdu);
1519 }
1520}
1521
1522static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1523{
1524 struct sk_buff *skb, *first;
1525 int space;
1526 int total_len = 0;
1527
1528
1529
1530
1531
1532
1533
1534
1535 first = __skb_dequeue(amsdu);
1536
1537
1538 skb_queue_walk(amsdu, skb)
1539 total_len += skb->len;
1540
1541 space = total_len - skb_tailroom(first);
1542 if ((space > 0) &&
1543 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1544
1545
1546
1547
1548 __skb_queue_head(amsdu, first);
1549 return -1;
1550 }
1551
1552
1553
1554
1555 while ((skb = __skb_dequeue(amsdu))) {
1556 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1557 skb->len);
1558 dev_kfree_skb_any(skb);
1559 }
1560
1561 __skb_queue_head(amsdu, first);
1562 return 0;
1563}
1564
1565static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1566 struct sk_buff_head *amsdu)
1567{
1568 struct sk_buff *first;
1569 struct htt_rx_desc *rxd;
1570 enum rx_msdu_decap_format decap;
1571
1572 first = skb_peek(amsdu);
1573 rxd = (void *)first->data - sizeof(*rxd);
1574 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1575 RX_MSDU_START_INFO1_DECAP_FORMAT);
1576
1577
1578
1579
1580
1581
1582 if (decap != RX_MSDU_DECAP_RAW ||
1583 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1584 __skb_queue_purge(amsdu);
1585 return;
1586 }
1587
1588 ath10k_unchain_msdu(amsdu);
1589}
1590
1591static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1592 struct sk_buff_head *amsdu,
1593 struct ieee80211_rx_status *rx_status)
1594{
1595
1596
1597
1598
1599 if (!rx_status->freq) {
1600 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1601 return false;
1602 }
1603
1604 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1605 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1606 return false;
1607 }
1608
1609 return true;
1610}
1611
1612static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1613 struct sk_buff_head *amsdu,
1614 struct ieee80211_rx_status *rx_status)
1615{
1616 if (skb_queue_empty(amsdu))
1617 return;
1618
1619 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1620 return;
1621
1622 __skb_queue_purge(amsdu);
1623}
1624
1625static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1626{
1627 struct ath10k *ar = htt->ar;
1628 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1629 struct sk_buff_head amsdu;
1630 int ret, num_msdus;
1631
1632 __skb_queue_head_init(&amsdu);
1633
1634 spin_lock_bh(&htt->rx_ring.lock);
1635 if (htt->rx_confused) {
1636 spin_unlock_bh(&htt->rx_ring.lock);
1637 return -EIO;
1638 }
1639 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1640 spin_unlock_bh(&htt->rx_ring.lock);
1641
1642 if (ret < 0) {
1643 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1644 __skb_queue_purge(&amsdu);
1645
1646
1647
1648 htt->rx_confused = true;
1649 return ret;
1650 }
1651
1652 num_msdus = skb_queue_len(&amsdu);
1653 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1654
1655
1656 if (ret > 0)
1657 ath10k_htt_rx_h_unchain(ar, &amsdu);
1658
1659 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1660 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1661 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1662
1663 return num_msdus;
1664}
1665
1666static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1667 struct htt_rx_indication *rx)
1668{
1669 struct ath10k *ar = htt->ar;
1670 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1671 int num_mpdu_ranges;
1672 int i, mpdu_count = 0;
1673
1674 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1675 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1676 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1677
1678 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1679 rx, sizeof(*rx) +
1680 (sizeof(struct htt_rx_indication_mpdu_range) *
1681 num_mpdu_ranges));
1682
1683 for (i = 0; i < num_mpdu_ranges; i++)
1684 mpdu_count += mpdu_ranges[i].mpdu_count;
1685
1686 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1687}
1688
1689static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1690 struct sk_buff *skb)
1691{
1692 struct ath10k_htt *htt = &ar->htt;
1693 struct htt_resp *resp = (struct htt_resp *)skb->data;
1694 struct htt_tx_done tx_done = {};
1695 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1696 __le16 msdu_id;
1697 int i;
1698
1699 switch (status) {
1700 case HTT_DATA_TX_STATUS_NO_ACK:
1701 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1702 break;
1703 case HTT_DATA_TX_STATUS_OK:
1704 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1705 break;
1706 case HTT_DATA_TX_STATUS_DISCARD:
1707 case HTT_DATA_TX_STATUS_POSTPONE:
1708 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1709 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1710 break;
1711 default:
1712 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1713 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1714 break;
1715 }
1716
1717 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1718 resp->data_tx_completion.num_msdus);
1719
1720 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1721 msdu_id = resp->data_tx_completion.msdus[i];
1722 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732 if (!kfifo_put(&htt->txdone_fifo, &tx_done)) {
1733 gmb();
1734 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1735 tx_done.msdu_id, tx_done.status);
1736 ath10k_txrx_tx_unref(htt, &tx_done);
1737 }
1738 }
1739}
1740
1741static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1742{
1743 struct htt_rx_addba *ev = &resp->rx_addba;
1744 struct ath10k_peer *peer;
1745 struct ath10k_vif *arvif;
1746 u16 info0, tid, peer_id;
1747
1748 info0 = __le16_to_cpu(ev->info0);
1749 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1750 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1751
1752 ath10k_dbg(ar, ATH10K_DBG_HTT,
1753 "htt rx addba tid %hu peer_id %hu size %hhu\n",
1754 tid, peer_id, ev->window_size);
1755
1756 spin_lock_bh(&ar->data_lock);
1757 peer = ath10k_peer_find_by_id(ar, peer_id);
1758 if (!peer) {
1759 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1760 peer_id);
1761 spin_unlock_bh(&ar->data_lock);
1762 return;
1763 }
1764
1765 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1766 if (!arvif) {
1767 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1768 peer->vdev_id);
1769 spin_unlock_bh(&ar->data_lock);
1770 return;
1771 }
1772
1773 ath10k_dbg(ar, ATH10K_DBG_HTT,
1774 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1775 peer->addr, tid, ev->window_size);
1776
1777 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1778 spin_unlock_bh(&ar->data_lock);
1779}
1780
1781static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1782{
1783 struct htt_rx_delba *ev = &resp->rx_delba;
1784 struct ath10k_peer *peer;
1785 struct ath10k_vif *arvif;
1786 u16 info0, tid, peer_id;
1787
1788 info0 = __le16_to_cpu(ev->info0);
1789 tid = MS(info0, HTT_RX_BA_INFO0_TID);
1790 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1791
1792 ath10k_dbg(ar, ATH10K_DBG_HTT,
1793 "htt rx delba tid %hu peer_id %hu\n",
1794 tid, peer_id);
1795
1796 spin_lock_bh(&ar->data_lock);
1797 peer = ath10k_peer_find_by_id(ar, peer_id);
1798 if (!peer) {
1799 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1800 peer_id);
1801 spin_unlock_bh(&ar->data_lock);
1802 return;
1803 }
1804
1805 arvif = ath10k_get_arvif(ar, peer->vdev_id);
1806 if (!arvif) {
1807 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1808 peer->vdev_id);
1809 spin_unlock_bh(&ar->data_lock);
1810 return;
1811 }
1812
1813 ath10k_dbg(ar, ATH10K_DBG_HTT,
1814 "htt rx stop rx ba session sta %pM tid %hu\n",
1815 peer->addr, tid);
1816
1817 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1818 spin_unlock_bh(&ar->data_lock);
1819}
1820
1821static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1822 struct sk_buff_head *amsdu)
1823{
1824 struct sk_buff *msdu;
1825 struct htt_rx_desc *rxd;
1826
1827 if (skb_queue_empty(list))
1828 return -ENOBUFS;
1829
1830 if (WARN_ON(!skb_queue_empty(amsdu)))
1831 return -EINVAL;
1832
1833 while ((msdu = __skb_dequeue(list))) {
1834 __skb_queue_tail(amsdu, msdu);
1835
1836 rxd = (void *)msdu->data - sizeof(*rxd);
1837 if (rxd->msdu_end.common.info0 &
1838 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1839 break;
1840 }
1841
1842 msdu = skb_peek_tail(amsdu);
1843 rxd = (void *)msdu->data - sizeof(*rxd);
1844 if (!(rxd->msdu_end.common.info0 &
1845 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1846 skb_queue_splice_init(amsdu, list);
1847 return -EAGAIN;
1848 }
1849
1850 return 0;
1851}
1852
1853static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1854 struct sk_buff *skb)
1855{
1856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1857
1858 if (!ieee80211_has_protected(hdr->frame_control))
1859 return;
1860
1861
1862
1863
1864
1865
1866 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1867 status->flag |= RX_FLAG_DECRYPTED |
1868 RX_FLAG_IV_STRIPPED |
1869 RX_FLAG_MMIC_STRIPPED;
1870}
1871
1872static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1873 struct sk_buff_head *list)
1874{
1875 struct ath10k_htt *htt = &ar->htt;
1876 struct ieee80211_rx_status *status = &htt->rx_status;
1877 struct htt_rx_offload_msdu *rx;
1878 struct sk_buff *msdu;
1879 size_t offset;
1880 int num_msdu = 0;
1881
1882 while ((msdu = __skb_dequeue(list))) {
1883
1884
1885
1886
1887 rx = (void *)msdu->data;
1888
1889 skb_put(msdu, sizeof(*rx));
1890 skb_pull(msdu, sizeof(*rx));
1891
1892 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1893 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1894 dev_kfree_skb_any(msdu);
1895 continue;
1896 }
1897
1898 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1899
1900
1901
1902
1903
1904
1905 offset = 4 - ((unsigned long)msdu->data & 3);
1906 skb_put(msdu, offset);
1907 memmove(msdu->data + offset, msdu->data, msdu->len);
1908 skb_pull(msdu, offset);
1909
1910
1911
1912
1913
1914 memset(status, 0, sizeof(*status));
1915 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1916
1917 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1918 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1919 ath10k_process_rx(ar, status, msdu);
1920 num_msdu++;
1921 }
1922 return num_msdu;
1923}
1924
1925static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1926{
1927 struct ath10k_htt *htt = &ar->htt;
1928 struct htt_resp *resp = (void *)skb->data;
1929 struct ieee80211_rx_status *status = &htt->rx_status;
1930 struct sk_buff_head list;
1931 struct sk_buff_head amsdu;
1932 u16 peer_id;
1933 u16 msdu_count;
1934 u8 vdev_id;
1935 u8 tid;
1936 bool offload;
1937 bool frag;
1938 int ret, num_msdus = 0;
1939
1940 lockdep_assert_held(&htt->rx_ring.lock);
1941
1942 if (htt->rx_confused)
1943 return -EIO;
1944
1945 skb_pull(skb, sizeof(resp->hdr));
1946 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1947
1948 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1949 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1950 vdev_id = resp->rx_in_ord_ind.vdev_id;
1951 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1952 offload = !!(resp->rx_in_ord_ind.info &
1953 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1954 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1955
1956 ath10k_dbg(ar, ATH10K_DBG_HTT,
1957 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1958 vdev_id, peer_id, tid, offload, frag, msdu_count);
1959
1960 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1961 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1962 return -EINVAL;
1963 }
1964
1965
1966
1967
1968 __skb_queue_head_init(&list);
1969 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1970 if (ret < 0) {
1971 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1972 htt->rx_confused = true;
1973 return -EIO;
1974 }
1975
1976
1977
1978
1979 if (offload)
1980 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
1981
1982 while (!skb_queue_empty(&list)) {
1983 __skb_queue_head_init(&amsdu);
1984 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1985 switch (ret) {
1986 case 0:
1987
1988
1989
1990
1991
1992
1993 num_msdus += skb_queue_len(&amsdu);
1994 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1995 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1996 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
1997 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1998 break;
1999 case -EAGAIN:
2000
2001 default:
2002
2003 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2004 htt->rx_confused = true;
2005 __skb_queue_purge(&list);
2006 return -EIO;
2007 }
2008 }
2009 return num_msdus;
2010}
2011
2012static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2013 const __le32 *resp_ids,
2014 int num_resp_ids)
2015{
2016 int i;
2017 u32 resp_id;
2018
2019 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2020 num_resp_ids);
2021
2022 for (i = 0; i < num_resp_ids; i++) {
2023 resp_id = le32_to_cpu(resp_ids[i]);
2024
2025 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2026 resp_id);
2027
2028
2029 }
2030}
2031
2032static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2033{
2034 struct ieee80211_hw *hw = ar->hw;
2035 struct ieee80211_txq *txq;
2036 struct htt_resp *resp = (struct htt_resp *)skb->data;
2037 struct htt_tx_fetch_record *record;
2038 size_t len;
2039 size_t max_num_bytes;
2040 size_t max_num_msdus;
2041 size_t num_bytes;
2042 size_t num_msdus;
2043 const __le32 *resp_ids;
2044 u16 num_records;
2045 u16 num_resp_ids;
2046 u16 peer_id;
2047 u8 tid;
2048 int ret;
2049 int i;
2050
2051 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2052
2053 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2054 if (unlikely(skb->len < len)) {
2055 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2056 return;
2057 }
2058
2059 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2060 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2061
2062 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2063 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2064
2065 if (unlikely(skb->len < len)) {
2066 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2067 return;
2068 }
2069
2070 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2071 num_records, num_resp_ids,
2072 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2073
2074 if (!ar->htt.tx_q_state.enabled) {
2075 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2076 return;
2077 }
2078
2079 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2080 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2081 return;
2082 }
2083
2084 rcu_read_lock();
2085
2086 for (i = 0; i < num_records; i++) {
2087 record = &resp->tx_fetch_ind.records[i];
2088 peer_id = MS(le16_to_cpu(record->info),
2089 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2090 tid = MS(le16_to_cpu(record->info),
2091 HTT_TX_FETCH_RECORD_INFO_TID);
2092 max_num_msdus = le16_to_cpu(record->num_msdus);
2093 max_num_bytes = le32_to_cpu(record->num_bytes);
2094
2095 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2096 i, peer_id, tid, max_num_msdus, max_num_bytes);
2097
2098 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2099 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2100 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2101 peer_id, tid);
2102 continue;
2103 }
2104
2105 spin_lock_bh(&ar->data_lock);
2106 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2107 spin_unlock_bh(&ar->data_lock);
2108
2109
2110
2111
2112
2113 if (unlikely(!txq)) {
2114 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2115 peer_id, tid);
2116 continue;
2117 }
2118
2119 num_msdus = 0;
2120 num_bytes = 0;
2121
2122 while (num_msdus < max_num_msdus &&
2123 num_bytes < max_num_bytes) {
2124 ret = ath10k_mac_tx_push_txq(hw, txq);
2125 if (ret < 0)
2126 break;
2127
2128 num_msdus++;
2129 num_bytes += ret;
2130 }
2131
2132 record->num_msdus = cpu_to_le16(num_msdus);
2133 record->num_bytes = cpu_to_le32(num_bytes);
2134
2135 ath10k_htt_tx_txq_recalc(hw, txq);
2136 }
2137
2138 rcu_read_unlock();
2139
2140 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2141 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2142
2143 ret = ath10k_htt_tx_fetch_resp(ar,
2144 resp->tx_fetch_ind.token,
2145 resp->tx_fetch_ind.fetch_seq_num,
2146 resp->tx_fetch_ind.records,
2147 num_records);
2148 if (unlikely(ret)) {
2149 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2150 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2151
2152 }
2153
2154 ath10k_htt_tx_txq_sync(ar);
2155}
2156
2157static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2158 struct sk_buff *skb)
2159{
2160 const struct htt_resp *resp = (void *)skb->data;
2161 size_t len;
2162 int num_resp_ids;
2163
2164 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2165
2166 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2167 if (unlikely(skb->len < len)) {
2168 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2169 return;
2170 }
2171
2172 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2173 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2174
2175 if (unlikely(skb->len < len)) {
2176 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2177 return;
2178 }
2179
2180 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2181 resp->tx_fetch_confirm.resp_ids,
2182 num_resp_ids);
2183}
2184
2185static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2186 struct sk_buff *skb)
2187{
2188 const struct htt_resp *resp = (void *)skb->data;
2189 const struct htt_tx_mode_switch_record *record;
2190 struct ieee80211_txq *txq;
2191 struct ath10k_txq *artxq;
2192 size_t len;
2193 size_t num_records;
2194 enum htt_tx_mode_switch_mode mode;
2195 bool enable;
2196 u16 info0;
2197 u16 info1;
2198 u16 threshold;
2199 u16 peer_id;
2200 u8 tid;
2201 int i;
2202
2203 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2204
2205 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2206 if (unlikely(skb->len < len)) {
2207 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2208 return;
2209 }
2210
2211 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2212 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2213
2214 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2215 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2216 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2217 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2218
2219 ath10k_dbg(ar, ATH10K_DBG_HTT,
2220 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2221 info0, info1, enable, num_records, mode, threshold);
2222
2223 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2224
2225 if (unlikely(skb->len < len)) {
2226 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2227 return;
2228 }
2229
2230 switch (mode) {
2231 case HTT_TX_MODE_SWITCH_PUSH:
2232 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2233 break;
2234 default:
2235 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2236 mode);
2237 return;
2238 }
2239
2240 if (!enable)
2241 return;
2242
2243 ar->htt.tx_q_state.enabled = enable;
2244 ar->htt.tx_q_state.mode = mode;
2245 ar->htt.tx_q_state.num_push_allowed = threshold;
2246
2247 rcu_read_lock();
2248
2249 for (i = 0; i < num_records; i++) {
2250 record = &resp->tx_mode_switch_ind.records[i];
2251 info0 = le16_to_cpu(record->info0);
2252 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2253 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2254
2255 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2256 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2257 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2258 peer_id, tid);
2259 continue;
2260 }
2261
2262 spin_lock_bh(&ar->data_lock);
2263 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2264 spin_unlock_bh(&ar->data_lock);
2265
2266
2267
2268
2269
2270 if (unlikely(!txq)) {
2271 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2272 peer_id, tid);
2273 continue;
2274 }
2275
2276 spin_lock_bh(&ar->htt.tx_lock);
2277 artxq = (void *)txq->drv_priv;
2278 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2279 spin_unlock_bh(&ar->htt.tx_lock);
2280 }
2281
2282 rcu_read_unlock();
2283
2284 ath10k_mac_tx_push_pending(ar);
2285}
2286
2287void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2288{
2289 bool release;
2290
2291 release = ath10k_htt_t2h_msg_handler(ar, skb);
2292
2293
2294 if (release)
2295 dev_kfree_skb_any(skb);
2296}
2297
2298static inline bool is_valid_legacy_rate(u8 rate)
2299{
2300 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2301 18, 24, 36, 48, 54};
2302 int i;
2303
2304 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2305 if (rate == legacy_rates[i])
2306 return true;
2307 }
2308
2309 return false;
2310}
2311
2312static void
2313ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2314 struct ieee80211_sta *sta,
2315 struct ath10k_per_peer_tx_stats *peer_stats)
2316{
2317 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2318 u8 rate = 0, sgi;
2319 struct rate_info txrate;
2320
2321 lockdep_assert_held(&ar->data_lock);
2322
2323 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2324 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2325 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2326 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2327 sgi = ATH10K_HW_GI(peer_stats->flags);
2328
2329 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2330 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
2331 return;
2332 }
2333
2334 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2335 (txrate.mcs > 7 || txrate.nss < 1)) {
2336 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2337 txrate.mcs, txrate.nss);
2338 return;
2339 }
2340
2341 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2342
2343 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2344 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2345 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2346
2347 if (!is_valid_legacy_rate(rate)) {
2348 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2349 rate);
2350 return;
2351 }
2352
2353
2354 rate *= 10;
2355 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2356 rate = rate - 5;
2357 arsta->txrate.legacy = rate;
2358 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2359 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2360 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2361 } else {
2362 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2363 arsta->txrate.mcs = txrate.mcs;
2364 }
2365
2366 if (sgi)
2367 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2368
2369 arsta->txrate.nss = txrate.nss;
2370 arsta->txrate.bw = txrate.bw + RATE_INFO_BW_20;
2371}
2372
2373static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2374 struct sk_buff *skb)
2375{
2376 struct htt_resp *resp = (struct htt_resp *)skb->data;
2377 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2378 struct htt_per_peer_tx_stats_ind *tx_stats;
2379 struct ieee80211_sta *sta;
2380 struct ath10k_peer *peer;
2381 int peer_id, i;
2382 u8 ppdu_len, num_ppdu;
2383
2384 num_ppdu = resp->peer_tx_stats.num_ppdu;
2385 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2386
2387 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2388 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2389 return;
2390 }
2391
2392 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2393 (resp->peer_tx_stats.payload);
2394 peer_id = __le16_to_cpu(tx_stats->peer_id);
2395
2396 rcu_read_lock();
2397 spin_lock_bh(&ar->data_lock);
2398 peer = ath10k_peer_find_by_id(ar, peer_id);
2399 if (!peer) {
2400 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2401 peer_id);
2402 goto out;
2403 }
2404
2405 sta = peer->sta;
2406 for (i = 0; i < num_ppdu; i++) {
2407 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2408 (resp->peer_tx_stats.payload + i * ppdu_len);
2409
2410 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2411 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2412 p_tx_stats->failed_bytes =
2413 __le32_to_cpu(tx_stats->failed_bytes);
2414 p_tx_stats->ratecode = tx_stats->ratecode;
2415 p_tx_stats->flags = tx_stats->flags;
2416 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2417 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2418 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2419
2420 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2421 }
2422
2423out:
2424 spin_unlock_bh(&ar->data_lock);
2425 rcu_read_unlock();
2426}
2427
2428bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2429{
2430 struct ath10k_htt *htt = &ar->htt;
2431 struct htt_resp *resp = (struct htt_resp *)skb->data;
2432 enum htt_t2h_msg_type type;
2433
2434
2435 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2436 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2437
2438 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2439 resp->hdr.msg_type);
2440
2441 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2442 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2443 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2444 return true;
2445 }
2446 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2447
2448 switch (type) {
2449 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2450 htt->target_version_major = resp->ver_resp.major;
2451 htt->target_version_minor = resp->ver_resp.minor;
2452 complete(&htt->target_version_received);
2453 break;
2454 }
2455 case HTT_T2H_MSG_TYPE_RX_IND:
2456 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2457 break;
2458 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2459 struct htt_peer_map_event ev = {
2460 .vdev_id = resp->peer_map.vdev_id,
2461 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2462 };
2463 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2464 ath10k_peer_map_event(htt, &ev);
2465 break;
2466 }
2467 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2468 struct htt_peer_unmap_event ev = {
2469 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2470 };
2471 ath10k_peer_unmap_event(htt, &ev);
2472 break;
2473 }
2474 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2475 struct htt_tx_done tx_done = {};
2476 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2477
2478 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2479
2480 switch (status) {
2481 case HTT_MGMT_TX_STATUS_OK:
2482 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2483 break;
2484 case HTT_MGMT_TX_STATUS_RETRY:
2485 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2486 break;
2487 case HTT_MGMT_TX_STATUS_DROP:
2488 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2489 break;
2490 }
2491
2492 status = ath10k_txrx_tx_unref(htt, &tx_done);
2493 if (!status) {
2494 spin_lock_bh(&htt->tx_lock);
2495 ath10k_htt_tx_mgmt_dec_pending(htt);
2496 spin_unlock_bh(&htt->tx_lock);
2497 }
2498 break;
2499 }
2500 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2501 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2502 break;
2503 case HTT_T2H_MSG_TYPE_SEC_IND: {
2504 struct ath10k *ar = htt->ar;
2505 struct htt_security_indication *ev = &resp->security_indication;
2506
2507 ath10k_dbg(ar, ATH10K_DBG_HTT,
2508 "sec ind peer_id %d unicast %d type %d\n",
2509 __le16_to_cpu(ev->peer_id),
2510 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2511 MS(ev->flags, HTT_SECURITY_TYPE));
2512 complete(&ar->install_key_done);
2513 break;
2514 }
2515 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2516 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2517 skb->data, skb->len);
2518 atomic_inc(&htt->num_mpdus_ready);
2519 break;
2520 }
2521 case HTT_T2H_MSG_TYPE_TEST:
2522 break;
2523 case HTT_T2H_MSG_TYPE_STATS_CONF:
2524 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2525 break;
2526 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2527
2528
2529
2530
2531
2532 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2533 break;
2534 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2535 ath10k_htt_rx_addba(ar, resp);
2536 break;
2537 case HTT_T2H_MSG_TYPE_RX_DELBA:
2538 ath10k_htt_rx_delba(ar, resp);
2539 break;
2540 case HTT_T2H_MSG_TYPE_PKTLOG: {
2541 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2542 skb->len -
2543 offsetof(struct htt_resp,
2544 pktlog_msg.payload));
2545 break;
2546 }
2547 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2548
2549
2550
2551 break;
2552 }
2553 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2554 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2555 return false;
2556 }
2557 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2558 break;
2559 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2560 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2561 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2562
2563 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2564 ath10k_dbg(ar, ATH10K_DBG_HTT,
2565 "htt chan change freq %u phymode %s\n",
2566 freq, ath10k_wmi_phymode_str(phymode));
2567 break;
2568 }
2569 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2570 break;
2571 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2572 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2573
2574 if (!tx_fetch_ind) {
2575 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2576 break;
2577 }
2578 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2579 break;
2580 }
2581 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2582 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2583 break;
2584 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2585 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2586 break;
2587 case HTT_T2H_MSG_TYPE_PEER_STATS:
2588 ath10k_htt_fetch_peer_stats(ar, skb);
2589 break;
2590 case HTT_T2H_MSG_TYPE_EN_STATS:
2591 default:
2592 ath10k_warn(ar, "htt event (%d) not handled\n",
2593 resp->hdr.msg_type);
2594 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2595 skb->data, skb->len);
2596 break;
2597 }
2598 return true;
2599}
2600EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2601
2602void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2603 struct sk_buff *skb)
2604{
2605 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2606 dev_kfree_skb_any(skb);
2607}
2608EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2609
2610int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2611{
2612 struct ath10k_htt *htt = &ar->htt;
2613 struct htt_tx_done tx_done = {};
2614 struct sk_buff_head tx_ind_q;
2615 struct sk_buff *skb;
2616 unsigned long flags;
2617 int quota = 0, done, num_rx_msdus;
2618 bool resched_napi = false;
2619
2620 __skb_queue_head_init(&tx_ind_q);
2621
2622
2623
2624
2625 while (quota < budget) {
2626 if (skb_queue_empty(&htt->rx_in_ord_compl_q))
2627 break;
2628
2629 skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
2630 if (!skb) {
2631 resched_napi = true;
2632 goto exit;
2633 }
2634
2635 spin_lock_bh(&htt->rx_ring.lock);
2636 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2637 spin_unlock_bh(&htt->rx_ring.lock);
2638 if (num_rx_msdus < 0) {
2639 resched_napi = true;
2640 goto exit;
2641 }
2642
2643 dev_kfree_skb_any(skb);
2644 if (num_rx_msdus > 0)
2645 quota += num_rx_msdus;
2646
2647 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2648 !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
2649 resched_napi = true;
2650 goto exit;
2651 }
2652 }
2653
2654 while (quota < budget) {
2655
2656 if (!atomic_read(&htt->num_mpdus_ready))
2657 break;
2658
2659 num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
2660 if (num_rx_msdus < 0) {
2661 resched_napi = true;
2662 goto exit;
2663 }
2664
2665 quota += num_rx_msdus;
2666 atomic_dec(&htt->num_mpdus_ready);
2667 if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
2668 atomic_read(&htt->num_mpdus_ready)) {
2669 resched_napi = true;
2670 goto exit;
2671 }
2672 }
2673
2674
2675
2676
2677
2678
2679 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
2680 quota = budget;
2681
2682
2683
2684
2685
2686
2687 while (kfifo_get(&htt->txdone_fifo, &tx_done))
2688 ath10k_txrx_tx_unref(htt, &tx_done);
2689
2690 ath10k_mac_tx_push_pending(ar);
2691
2692 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2693 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2694 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2695
2696 while ((skb = __skb_dequeue(&tx_ind_q))) {
2697 ath10k_htt_rx_tx_fetch_ind(ar, skb);
2698 dev_kfree_skb_any(skb);
2699 }
2700
2701exit:
2702 ath10k_htt_rx_msdu_buff_replenish(htt);
2703
2704
2705
2706 done = resched_napi ? budget : quota;
2707
2708 return done;
2709}
2710EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
2711