linux/drivers/net/wireless/ath/ath10k/htt_rx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include "core.h"
  19#include "htc.h"
  20#include "htt.h"
  21#include "txrx.h"
  22#include "debug.h"
  23#include "trace.h"
  24#include "mac.h"
  25
  26#include <linux/log2.h>
  27
  28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  30
  31/* when under memory pressure rx ring refill may fail and needs a retry */
  32#define HTT_RX_RING_REFILL_RETRY_MS 50
  33
  34#define HTT_RX_RING_REFILL_RESCHED_MS 5
  35
  36static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  37static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  38
  39static struct sk_buff *
  40ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  41{
  42        struct ath10k_skb_rxcb *rxcb;
  43
  44        hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  45                if (rxcb->paddr == paddr)
  46                        return ATH10K_RXCB_SKB(rxcb);
  47
  48        WARN_ON_ONCE(1);
  49        return NULL;
  50}
  51
  52static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  53{
  54        struct sk_buff *skb;
  55        struct ath10k_skb_rxcb *rxcb;
  56        struct hlist_node *n;
  57        int i;
  58
  59        if (htt->rx_ring.in_ord_rx) {
  60                hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  61                        skb = ATH10K_RXCB_SKB(rxcb);
  62                        dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63                                         skb->len + skb_tailroom(skb),
  64                                         DMA_FROM_DEVICE);
  65                        hash_del(&rxcb->hlist);
  66                        dev_kfree_skb_any(skb);
  67                }
  68        } else {
  69                for (i = 0; i < htt->rx_ring.size; i++) {
  70                        skb = htt->rx_ring.netbufs_ring[i];
  71                        if (!skb)
  72                                continue;
  73
  74                        rxcb = ATH10K_SKB_RXCB(skb);
  75                        dma_unmap_single(htt->ar->dev, rxcb->paddr,
  76                                         skb->len + skb_tailroom(skb),
  77                                         DMA_FROM_DEVICE);
  78                        dev_kfree_skb_any(skb);
  79                }
  80        }
  81
  82        htt->rx_ring.fill_cnt = 0;
  83        hash_init(htt->rx_ring.skb_table);
  84        memset(htt->rx_ring.netbufs_ring, 0,
  85               htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  86}
  87
  88static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  89{
  90        struct htt_rx_desc *rx_desc;
  91        struct ath10k_skb_rxcb *rxcb;
  92        struct sk_buff *skb;
  93        dma_addr_t paddr;
  94        int ret = 0, idx;
  95
  96        /* The Full Rx Reorder firmware has no way of telling the host
  97         * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  98         * To keep things simple make sure ring is always half empty. This
  99         * guarantees there'll be no replenishment overruns possible.
 100         */
 101        BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
 102
 103        idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 104        while (num > 0) {
 105                skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
 106                if (!skb) {
 107                        ret = -ENOMEM;
 108                        goto fail;
 109                }
 110
 111                if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
 112                        skb_pull(skb,
 113                                 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
 114                                 skb->data);
 115
 116                /* Clear rx_desc attention word before posting to Rx ring */
 117                rx_desc = (struct htt_rx_desc *)skb->data;
 118                rx_desc->attention.flags = __cpu_to_le32(0);
 119
 120                paddr = dma_map_single(htt->ar->dev, skb->data,
 121                                       skb->len + skb_tailroom(skb),
 122                                       DMA_FROM_DEVICE);
 123
 124                if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
 125                        dev_kfree_skb_any(skb);
 126                        ret = -ENOMEM;
 127                        goto fail;
 128                }
 129
 130                rxcb = ATH10K_SKB_RXCB(skb);
 131                rxcb->paddr = paddr;
 132                htt->rx_ring.netbufs_ring[idx] = skb;
 133                htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
 134                htt->rx_ring.fill_cnt++;
 135
 136                if (htt->rx_ring.in_ord_rx) {
 137                        hash_add(htt->rx_ring.skb_table,
 138                                 &ATH10K_SKB_RXCB(skb)->hlist,
 139                                 (u32)paddr);
 140                }
 141
 142                num--;
 143                idx++;
 144                idx &= htt->rx_ring.size_mask;
 145        }
 146
 147fail:
 148        /*
 149         * Make sure the rx buffer is updated before available buffer
 150         * index to avoid any potential rx ring corruption.
 151         */
 152        mb();
 153        *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
 154        return ret;
 155}
 156
 157static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 158{
 159        lockdep_assert_held(&htt->rx_ring.lock);
 160        return __ath10k_htt_rx_ring_fill_n(htt, num);
 161}
 162
 163static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 164{
 165        int ret, num_deficit, num_to_fill;
 166
 167        /* Refilling the whole RX ring buffer proves to be a bad idea. The
 168         * reason is RX may take up significant amount of CPU cycles and starve
 169         * other tasks, e.g. TX on an ethernet device while acting as a bridge
 170         * with ath10k wlan interface. This ended up with very poor performance
 171         * once CPU the host system was overwhelmed with RX on ath10k.
 172         *
 173         * By limiting the number of refills the replenishing occurs
 174         * progressively. This in turns makes use of the fact tasklets are
 175         * processed in FIFO order. This means actual RX processing can starve
 176         * out refilling. If there's not enough buffers on RX ring FW will not
 177         * report RX until it is refilled with enough buffers. This
 178         * automatically balances load wrt to CPU power.
 179         *
 180         * This probably comes at a cost of lower maximum throughput but
 181         * improves the average and stability. */
 182        spin_lock_bh(&htt->rx_ring.lock);
 183        num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
 184        num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
 185        num_deficit -= num_to_fill;
 186        ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
 187        if (ret == -ENOMEM) {
 188                /*
 189                 * Failed to fill it to the desired level -
 190                 * we'll start a timer and try again next time.
 191                 * As long as enough buffers are left in the ring for
 192                 * another A-MPDU rx, no special recovery is needed.
 193                 */
 194                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 195                          msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 196        } else if (num_deficit > 0) {
 197                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 198                          msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
 199        }
 200        spin_unlock_bh(&htt->rx_ring.lock);
 201}
 202
 203static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
 204{
 205        struct ath10k_htt *htt = (struct ath10k_htt *)arg;
 206
 207        ath10k_htt_rx_msdu_buff_replenish(htt);
 208}
 209
 210int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 211{
 212        struct ath10k_htt *htt = &ar->htt;
 213        int ret;
 214
 215        spin_lock_bh(&htt->rx_ring.lock);
 216        ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 217                                              htt->rx_ring.fill_cnt));
 218        spin_unlock_bh(&htt->rx_ring.lock);
 219
 220        if (ret)
 221                ath10k_htt_rx_ring_free(htt);
 222
 223        return ret;
 224}
 225
 226void ath10k_htt_rx_free(struct ath10k_htt *htt)
 227{
 228        del_timer_sync(&htt->rx_ring.refill_retry_timer);
 229        tasklet_kill(&htt->txrx_compl_task);
 230
 231        skb_queue_purge(&htt->rx_compl_q);
 232        skb_queue_purge(&htt->rx_in_ord_compl_q);
 233        skb_queue_purge(&htt->tx_fetch_ind_q);
 234
 235        ath10k_htt_rx_ring_free(htt);
 236
 237        dma_free_coherent(htt->ar->dev,
 238                          (htt->rx_ring.size *
 239                           sizeof(htt->rx_ring.paddrs_ring)),
 240                          htt->rx_ring.paddrs_ring,
 241                          htt->rx_ring.base_paddr);
 242
 243        dma_free_coherent(htt->ar->dev,
 244                          sizeof(*htt->rx_ring.alloc_idx.vaddr),
 245                          htt->rx_ring.alloc_idx.vaddr,
 246                          htt->rx_ring.alloc_idx.paddr);
 247
 248        kfree(htt->rx_ring.netbufs_ring);
 249}
 250
 251static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 252{
 253        struct ath10k *ar = htt->ar;
 254        int idx;
 255        struct sk_buff *msdu;
 256
 257        lockdep_assert_held(&htt->rx_ring.lock);
 258
 259        if (htt->rx_ring.fill_cnt == 0) {
 260                ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
 261                return NULL;
 262        }
 263
 264        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 265        msdu = htt->rx_ring.netbufs_ring[idx];
 266        htt->rx_ring.netbufs_ring[idx] = NULL;
 267        htt->rx_ring.paddrs_ring[idx] = 0;
 268
 269        idx++;
 270        idx &= htt->rx_ring.size_mask;
 271        htt->rx_ring.sw_rd_idx.msdu_payld = idx;
 272        htt->rx_ring.fill_cnt--;
 273
 274        dma_unmap_single(htt->ar->dev,
 275                         ATH10K_SKB_RXCB(msdu)->paddr,
 276                         msdu->len + skb_tailroom(msdu),
 277                         DMA_FROM_DEVICE);
 278        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 279                        msdu->data, msdu->len + skb_tailroom(msdu));
 280
 281        return msdu;
 282}
 283
 284/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 285static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 286                                   struct sk_buff_head *amsdu)
 287{
 288        struct ath10k *ar = htt->ar;
 289        int msdu_len, msdu_chaining = 0;
 290        struct sk_buff *msdu;
 291        struct htt_rx_desc *rx_desc;
 292
 293        lockdep_assert_held(&htt->rx_ring.lock);
 294
 295        for (;;) {
 296                int last_msdu, msdu_len_invalid, msdu_chained;
 297
 298                msdu = ath10k_htt_rx_netbuf_pop(htt);
 299                if (!msdu) {
 300                        __skb_queue_purge(amsdu);
 301                        return -ENOENT;
 302                }
 303
 304                __skb_queue_tail(amsdu, msdu);
 305
 306                rx_desc = (struct htt_rx_desc *)msdu->data;
 307
 308                /* FIXME: we must report msdu payload since this is what caller
 309                 *        expects now */
 310                skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 311                skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 312
 313                /*
 314                 * Sanity check - confirm the HW is finished filling in the
 315                 * rx data.
 316                 * If the HW and SW are working correctly, then it's guaranteed
 317                 * that the HW's MAC DMA is done before this point in the SW.
 318                 * To prevent the case that we handle a stale Rx descriptor,
 319                 * just assert for now until we have a way to recover.
 320                 */
 321                if (!(__le32_to_cpu(rx_desc->attention.flags)
 322                                & RX_ATTENTION_FLAGS_MSDU_DONE)) {
 323                        __skb_queue_purge(amsdu);
 324                        return -EIO;
 325                }
 326
 327                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 328                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 329                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
 330                msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
 331                              RX_MSDU_START_INFO0_MSDU_LENGTH);
 332                msdu_chained = rx_desc->frag_info.ring2_more_count;
 333
 334                if (msdu_len_invalid)
 335                        msdu_len = 0;
 336
 337                skb_trim(msdu, 0);
 338                skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
 339                msdu_len -= msdu->len;
 340
 341                /* Note: Chained buffers do not contain rx descriptor */
 342                while (msdu_chained--) {
 343                        msdu = ath10k_htt_rx_netbuf_pop(htt);
 344                        if (!msdu) {
 345                                __skb_queue_purge(amsdu);
 346                                return -ENOENT;
 347                        }
 348
 349                        __skb_queue_tail(amsdu, msdu);
 350                        skb_trim(msdu, 0);
 351                        skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
 352                        msdu_len -= msdu->len;
 353                        msdu_chaining = 1;
 354                }
 355
 356                last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
 357                                RX_MSDU_END_INFO0_LAST_MSDU;
 358
 359                trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
 360                                         sizeof(*rx_desc) - sizeof(u32));
 361
 362                if (last_msdu)
 363                        break;
 364        }
 365
 366        if (skb_queue_empty(amsdu))
 367                msdu_chaining = -1;
 368
 369        /*
 370         * Don't refill the ring yet.
 371         *
 372         * First, the elements popped here are still in use - it is not
 373         * safe to overwrite them until the matching call to
 374         * mpdu_desc_list_next. Second, for efficiency it is preferable to
 375         * refill the rx ring with 1 PPDU's worth of rx buffers (something
 376         * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
 377         * (something like 3 buffers). Consequently, we'll rely on the txrx
 378         * SW to tell us when it is done pulling all the PPDU's rx buffers
 379         * out of the rx ring, and then refill it just once.
 380         */
 381
 382        return msdu_chaining;
 383}
 384
 385static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 386                                               u32 paddr)
 387{
 388        struct ath10k *ar = htt->ar;
 389        struct ath10k_skb_rxcb *rxcb;
 390        struct sk_buff *msdu;
 391
 392        lockdep_assert_held(&htt->rx_ring.lock);
 393
 394        msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
 395        if (!msdu)
 396                return NULL;
 397
 398        rxcb = ATH10K_SKB_RXCB(msdu);
 399        hash_del(&rxcb->hlist);
 400        htt->rx_ring.fill_cnt--;
 401
 402        dma_unmap_single(htt->ar->dev, rxcb->paddr,
 403                         msdu->len + skb_tailroom(msdu),
 404                         DMA_FROM_DEVICE);
 405        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 406                        msdu->data, msdu->len + skb_tailroom(msdu));
 407
 408        return msdu;
 409}
 410
 411static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
 412                                        struct htt_rx_in_ord_ind *ev,
 413                                        struct sk_buff_head *list)
 414{
 415        struct ath10k *ar = htt->ar;
 416        struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
 417        struct htt_rx_desc *rxd;
 418        struct sk_buff *msdu;
 419        int msdu_count;
 420        bool is_offload;
 421        u32 paddr;
 422
 423        lockdep_assert_held(&htt->rx_ring.lock);
 424
 425        msdu_count = __le16_to_cpu(ev->msdu_count);
 426        is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
 427
 428        while (msdu_count--) {
 429                paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
 430
 431                msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
 432                if (!msdu) {
 433                        __skb_queue_purge(list);
 434                        return -ENOENT;
 435                }
 436
 437                __skb_queue_tail(list, msdu);
 438
 439                if (!is_offload) {
 440                        rxd = (void *)msdu->data;
 441
 442                        trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
 443
 444                        skb_put(msdu, sizeof(*rxd));
 445                        skb_pull(msdu, sizeof(*rxd));
 446                        skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
 447
 448                        if (!(__le32_to_cpu(rxd->attention.flags) &
 449                              RX_ATTENTION_FLAGS_MSDU_DONE)) {
 450                                ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
 451                                return -EIO;
 452                        }
 453                }
 454
 455                msdu_desc++;
 456        }
 457
 458        return 0;
 459}
 460
 461int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 462{
 463        struct ath10k *ar = htt->ar;
 464        dma_addr_t paddr;
 465        void *vaddr;
 466        size_t size;
 467        struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 468
 469        htt->rx_confused = false;
 470
 471        /* XXX: The fill level could be changed during runtime in response to
 472         * the host processing latency. Is this really worth it?
 473         */
 474        htt->rx_ring.size = HTT_RX_RING_SIZE;
 475        htt->rx_ring.size_mask = htt->rx_ring.size - 1;
 476        htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
 477
 478        if (!is_power_of_2(htt->rx_ring.size)) {
 479                ath10k_warn(ar, "htt rx ring size is not power of 2\n");
 480                return -EINVAL;
 481        }
 482
 483        htt->rx_ring.netbufs_ring =
 484                kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
 485                        GFP_KERNEL);
 486        if (!htt->rx_ring.netbufs_ring)
 487                goto err_netbuf;
 488
 489        size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
 490
 491        vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
 492        if (!vaddr)
 493                goto err_dma_ring;
 494
 495        htt->rx_ring.paddrs_ring = vaddr;
 496        htt->rx_ring.base_paddr = paddr;
 497
 498        vaddr = dma_alloc_coherent(htt->ar->dev,
 499                                   sizeof(*htt->rx_ring.alloc_idx.vaddr),
 500                                   &paddr, GFP_KERNEL);
 501        if (!vaddr)
 502                goto err_dma_idx;
 503
 504        htt->rx_ring.alloc_idx.vaddr = vaddr;
 505        htt->rx_ring.alloc_idx.paddr = paddr;
 506        htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
 507        *htt->rx_ring.alloc_idx.vaddr = 0;
 508
 509        /* Initialize the Rx refill retry timer */
 510        setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
 511
 512        spin_lock_init(&htt->rx_ring.lock);
 513
 514        htt->rx_ring.fill_cnt = 0;
 515        htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 516        hash_init(htt->rx_ring.skb_table);
 517
 518        skb_queue_head_init(&htt->rx_compl_q);
 519        skb_queue_head_init(&htt->rx_in_ord_compl_q);
 520        skb_queue_head_init(&htt->tx_fetch_ind_q);
 521        atomic_set(&htt->num_mpdus_ready, 0);
 522
 523        tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
 524                     (unsigned long)htt);
 525
 526        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
 527                   htt->rx_ring.size, htt->rx_ring.fill_level);
 528        return 0;
 529
 530err_dma_idx:
 531        dma_free_coherent(htt->ar->dev,
 532                          (htt->rx_ring.size *
 533                           sizeof(htt->rx_ring.paddrs_ring)),
 534                          htt->rx_ring.paddrs_ring,
 535                          htt->rx_ring.base_paddr);
 536err_dma_ring:
 537        kfree(htt->rx_ring.netbufs_ring);
 538err_netbuf:
 539        return -ENOMEM;
 540}
 541
 542static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
 543                                          enum htt_rx_mpdu_encrypt_type type)
 544{
 545        switch (type) {
 546        case HTT_RX_MPDU_ENCRYPT_NONE:
 547                return 0;
 548        case HTT_RX_MPDU_ENCRYPT_WEP40:
 549        case HTT_RX_MPDU_ENCRYPT_WEP104:
 550                return IEEE80211_WEP_IV_LEN;
 551        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 552        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 553                return IEEE80211_TKIP_IV_LEN;
 554        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 555                return IEEE80211_CCMP_HDR_LEN;
 556        case HTT_RX_MPDU_ENCRYPT_WEP128:
 557        case HTT_RX_MPDU_ENCRYPT_WAPI:
 558                break;
 559        }
 560
 561        ath10k_warn(ar, "unsupported encryption type %d\n", type);
 562        return 0;
 563}
 564
 565#define MICHAEL_MIC_LEN 8
 566
 567static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
 568                                         enum htt_rx_mpdu_encrypt_type type)
 569{
 570        switch (type) {
 571        case HTT_RX_MPDU_ENCRYPT_NONE:
 572                return 0;
 573        case HTT_RX_MPDU_ENCRYPT_WEP40:
 574        case HTT_RX_MPDU_ENCRYPT_WEP104:
 575                return IEEE80211_WEP_ICV_LEN;
 576        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 577        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 578                return IEEE80211_TKIP_ICV_LEN;
 579        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 580                return IEEE80211_CCMP_MIC_LEN;
 581        case HTT_RX_MPDU_ENCRYPT_WEP128:
 582        case HTT_RX_MPDU_ENCRYPT_WAPI:
 583                break;
 584        }
 585
 586        ath10k_warn(ar, "unsupported encryption type %d\n", type);
 587        return 0;
 588}
 589
 590struct amsdu_subframe_hdr {
 591        u8 dst[ETH_ALEN];
 592        u8 src[ETH_ALEN];
 593        __be16 len;
 594} __packed;
 595
 596#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
 597
 598static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 599                                  struct ieee80211_rx_status *status,
 600                                  struct htt_rx_desc *rxd)
 601{
 602        struct ieee80211_supported_band *sband;
 603        u8 cck, rate, bw, sgi, mcs, nss;
 604        u8 preamble = 0;
 605        u8 group_id;
 606        u32 info1, info2, info3;
 607
 608        info1 = __le32_to_cpu(rxd->ppdu_start.info1);
 609        info2 = __le32_to_cpu(rxd->ppdu_start.info2);
 610        info3 = __le32_to_cpu(rxd->ppdu_start.info3);
 611
 612        preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
 613
 614        switch (preamble) {
 615        case HTT_RX_LEGACY:
 616                /* To get legacy rate index band is required. Since band can't
 617                 * be undefined check if freq is non-zero.
 618                 */
 619                if (!status->freq)
 620                        return;
 621
 622                cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
 623                rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
 624                rate &= ~RX_PPDU_START_RATE_FLAG;
 625
 626                sband = &ar->mac.sbands[status->band];
 627                status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
 628                break;
 629        case HTT_RX_HT:
 630        case HTT_RX_HT_WITH_TXBF:
 631                /* HT-SIG - Table 20-11 in info2 and info3 */
 632                mcs = info2 & 0x1F;
 633                nss = mcs >> 3;
 634                bw = (info2 >> 7) & 1;
 635                sgi = (info3 >> 7) & 1;
 636
 637                status->rate_idx = mcs;
 638                status->flag |= RX_FLAG_HT;
 639                if (sgi)
 640                        status->flag |= RX_FLAG_SHORT_GI;
 641                if (bw)
 642                        status->flag |= RX_FLAG_40MHZ;
 643                break;
 644        case HTT_RX_VHT:
 645        case HTT_RX_VHT_WITH_TXBF:
 646                /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
 647                   TODO check this */
 648                bw = info2 & 3;
 649                sgi = info3 & 1;
 650                group_id = (info2 >> 4) & 0x3F;
 651
 652                if (GROUP_ID_IS_SU_MIMO(group_id)) {
 653                        mcs = (info3 >> 4) & 0x0F;
 654                        nss = ((info2 >> 10) & 0x07) + 1;
 655                } else {
 656                        /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
 657                         * so it's impossible to decode MCS. Also since
 658                         * firmware consumes Group Id Management frames host
 659                         * has no knowledge regarding group/user position
 660                         * mapping so it's impossible to pick the correct Nsts
 661                         * from VHT-SIG-A1.
 662                         *
 663                         * Bandwidth and SGI are valid so report the rateinfo
 664                         * on best-effort basis.
 665                         */
 666                        mcs = 0;
 667                        nss = 1;
 668                }
 669
 670                if (mcs > 0x09) {
 671                        ath10k_warn(ar, "invalid MCS received %u\n", mcs);
 672                        ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
 673                                    __le32_to_cpu(rxd->attention.flags),
 674                                    __le32_to_cpu(rxd->mpdu_start.info0),
 675                                    __le32_to_cpu(rxd->mpdu_start.info1),
 676                                    __le32_to_cpu(rxd->msdu_start.common.info0),
 677                                    __le32_to_cpu(rxd->msdu_start.common.info1),
 678                                    rxd->ppdu_start.info0,
 679                                    __le32_to_cpu(rxd->ppdu_start.info1),
 680                                    __le32_to_cpu(rxd->ppdu_start.info2),
 681                                    __le32_to_cpu(rxd->ppdu_start.info3),
 682                                    __le32_to_cpu(rxd->ppdu_start.info4));
 683
 684                        ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
 685                                    __le32_to_cpu(rxd->msdu_end.common.info0),
 686                                    __le32_to_cpu(rxd->mpdu_end.info0));
 687
 688                        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
 689                                        "rx desc msdu payload: ",
 690                                        rxd->msdu_payload, 50);
 691                }
 692
 693                status->rate_idx = mcs;
 694                status->vht_nss = nss;
 695
 696                if (sgi)
 697                        status->flag |= RX_FLAG_SHORT_GI;
 698
 699                switch (bw) {
 700                /* 20MHZ */
 701                case 0:
 702                        break;
 703                /* 40MHZ */
 704                case 1:
 705                        status->flag |= RX_FLAG_40MHZ;
 706                        break;
 707                /* 80MHZ */
 708                case 2:
 709                        status->vht_flag |= RX_VHT_FLAG_80MHZ;
 710                }
 711
 712                status->flag |= RX_FLAG_VHT;
 713                break;
 714        default:
 715                break;
 716        }
 717}
 718
 719static struct ieee80211_channel *
 720ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
 721{
 722        struct ath10k_peer *peer;
 723        struct ath10k_vif *arvif;
 724        struct cfg80211_chan_def def;
 725        u16 peer_id;
 726
 727        lockdep_assert_held(&ar->data_lock);
 728
 729        if (!rxd)
 730                return NULL;
 731
 732        if (rxd->attention.flags &
 733            __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
 734                return NULL;
 735
 736        if (!(rxd->msdu_end.common.info0 &
 737              __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
 738                return NULL;
 739
 740        peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 741                     RX_MPDU_START_INFO0_PEER_IDX);
 742
 743        peer = ath10k_peer_find_by_id(ar, peer_id);
 744        if (!peer)
 745                return NULL;
 746
 747        arvif = ath10k_get_arvif(ar, peer->vdev_id);
 748        if (WARN_ON_ONCE(!arvif))
 749                return NULL;
 750
 751        if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
 752                return NULL;
 753
 754        return def.chan;
 755}
 756
 757static struct ieee80211_channel *
 758ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
 759{
 760        struct ath10k_vif *arvif;
 761        struct cfg80211_chan_def def;
 762
 763        lockdep_assert_held(&ar->data_lock);
 764
 765        list_for_each_entry(arvif, &ar->arvifs, list) {
 766                if (arvif->vdev_id == vdev_id &&
 767                    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
 768                        return def.chan;
 769        }
 770
 771        return NULL;
 772}
 773
 774static void
 775ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
 776                              struct ieee80211_chanctx_conf *conf,
 777                              void *data)
 778{
 779        struct cfg80211_chan_def *def = data;
 780
 781        *def = conf->def;
 782}
 783
 784static struct ieee80211_channel *
 785ath10k_htt_rx_h_any_channel(struct ath10k *ar)
 786{
 787        struct cfg80211_chan_def def = {};
 788
 789        ieee80211_iter_chan_contexts_atomic(ar->hw,
 790                                            ath10k_htt_rx_h_any_chan_iter,
 791                                            &def);
 792
 793        return def.chan;
 794}
 795
 796static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
 797                                    struct ieee80211_rx_status *status,
 798                                    struct htt_rx_desc *rxd,
 799                                    u32 vdev_id)
 800{
 801        struct ieee80211_channel *ch;
 802
 803        spin_lock_bh(&ar->data_lock);
 804        ch = ar->scan_channel;
 805        if (!ch)
 806                ch = ar->rx_channel;
 807        if (!ch)
 808                ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
 809        if (!ch)
 810                ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
 811        if (!ch)
 812                ch = ath10k_htt_rx_h_any_channel(ar);
 813        if (!ch)
 814                ch = ar->tgt_oper_chan;
 815        spin_unlock_bh(&ar->data_lock);
 816
 817        if (!ch)
 818                return false;
 819
 820        status->band = ch->band;
 821        status->freq = ch->center_freq;
 822
 823        return true;
 824}
 825
 826static void ath10k_htt_rx_h_signal(struct ath10k *ar,
 827                                   struct ieee80211_rx_status *status,
 828                                   struct htt_rx_desc *rxd)
 829{
 830        /* FIXME: Get real NF */
 831        status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
 832                         rxd->ppdu_start.rssi_comb;
 833        status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
 834}
 835
 836static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
 837                                    struct ieee80211_rx_status *status,
 838                                    struct htt_rx_desc *rxd)
 839{
 840        /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
 841         * means all prior MSDUs in a PPDU are reported to mac80211 without the
 842         * TSF. Is it worth holding frames until end of PPDU is known?
 843         *
 844         * FIXME: Can we get/compute 64bit TSF?
 845         */
 846        status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
 847        status->flag |= RX_FLAG_MACTIME_END;
 848}
 849
 850static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
 851                                 struct sk_buff_head *amsdu,
 852                                 struct ieee80211_rx_status *status,
 853                                 u32 vdev_id)
 854{
 855        struct sk_buff *first;
 856        struct htt_rx_desc *rxd;
 857        bool is_first_ppdu;
 858        bool is_last_ppdu;
 859
 860        if (skb_queue_empty(amsdu))
 861                return;
 862
 863        first = skb_peek(amsdu);
 864        rxd = (void *)first->data - sizeof(*rxd);
 865
 866        is_first_ppdu = !!(rxd->attention.flags &
 867                           __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
 868        is_last_ppdu = !!(rxd->attention.flags &
 869                          __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
 870
 871        if (is_first_ppdu) {
 872                /* New PPDU starts so clear out the old per-PPDU status. */
 873                status->freq = 0;
 874                status->rate_idx = 0;
 875                status->vht_nss = 0;
 876                status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
 877                status->flag &= ~(RX_FLAG_HT |
 878                                  RX_FLAG_VHT |
 879                                  RX_FLAG_SHORT_GI |
 880                                  RX_FLAG_40MHZ |
 881                                  RX_FLAG_MACTIME_END);
 882                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 883
 884                ath10k_htt_rx_h_signal(ar, status, rxd);
 885                ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
 886                ath10k_htt_rx_h_rates(ar, status, rxd);
 887        }
 888
 889        if (is_last_ppdu)
 890                ath10k_htt_rx_h_mactime(ar, status, rxd);
 891}
 892
 893static const char * const tid_to_ac[] = {
 894        "BE",
 895        "BK",
 896        "BK",
 897        "BE",
 898        "VI",
 899        "VI",
 900        "VO",
 901        "VO",
 902};
 903
 904static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
 905{
 906        u8 *qc;
 907        int tid;
 908
 909        if (!ieee80211_is_data_qos(hdr->frame_control))
 910                return "";
 911
 912        qc = ieee80211_get_qos_ctl(hdr);
 913        tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 914        if (tid < 8)
 915                snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
 916        else
 917                snprintf(out, size, "tid %d", tid);
 918
 919        return out;
 920}
 921
 922static void ath10k_process_rx(struct ath10k *ar,
 923                              struct ieee80211_rx_status *rx_status,
 924                              struct sk_buff *skb)
 925{
 926        struct ieee80211_rx_status *status;
 927        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 928        char tid[32];
 929
 930        status = IEEE80211_SKB_RXCB(skb);
 931        *status = *rx_status;
 932
 933        ath10k_dbg(ar, ATH10K_DBG_DATA,
 934                   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
 935                   skb,
 936                   skb->len,
 937                   ieee80211_get_SA(hdr),
 938                   ath10k_get_tid(hdr, tid, sizeof(tid)),
 939                   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
 940                                                        "mcast" : "ucast",
 941                   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
 942                   status->flag == 0 ? "legacy" : "",
 943                   status->flag & RX_FLAG_HT ? "ht" : "",
 944                   status->flag & RX_FLAG_VHT ? "vht" : "",
 945                   status->flag & RX_FLAG_40MHZ ? "40" : "",
 946                   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
 947                   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
 948                   status->rate_idx,
 949                   status->vht_nss,
 950                   status->freq,
 951                   status->band, status->flag,
 952                   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
 953                   !!(status->flag & RX_FLAG_MMIC_ERROR),
 954                   !!(status->flag & RX_FLAG_AMSDU_MORE));
 955        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
 956                        skb->data, skb->len);
 957        trace_ath10k_rx_hdr(ar, skb->data, skb->len);
 958        trace_ath10k_rx_payload(ar, skb->data, skb->len);
 959
 960        ieee80211_rx(ar->hw, skb);
 961}
 962
 963static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
 964                                      struct ieee80211_hdr *hdr)
 965{
 966        int len = ieee80211_hdrlen(hdr->frame_control);
 967
 968        if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
 969                      ar->running_fw->fw_file.fw_features))
 970                len = round_up(len, 4);
 971
 972        return len;
 973}
 974
 975static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
 976                                        struct sk_buff *msdu,
 977                                        struct ieee80211_rx_status *status,
 978                                        enum htt_rx_mpdu_encrypt_type enctype,
 979                                        bool is_decrypted)
 980{
 981        struct ieee80211_hdr *hdr;
 982        struct htt_rx_desc *rxd;
 983        size_t hdr_len;
 984        size_t crypto_len;
 985        bool is_first;
 986        bool is_last;
 987
 988        rxd = (void *)msdu->data - sizeof(*rxd);
 989        is_first = !!(rxd->msdu_end.common.info0 &
 990                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
 991        is_last = !!(rxd->msdu_end.common.info0 &
 992                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 993
 994        /* Delivered decapped frame:
 995         * [802.11 header]
 996         * [crypto param] <-- can be trimmed if !fcs_err &&
 997         *                    !decrypt_err && !peer_idx_invalid
 998         * [amsdu header] <-- only if A-MSDU
 999         * [rfc1042/llc]
1000         * [payload]
1001         * [FCS] <-- at end, needs to be trimmed
1002         */
1003
1004        /* This probably shouldn't happen but warn just in case */
1005        if (unlikely(WARN_ON_ONCE(!is_first)))
1006                return;
1007
1008        /* This probably shouldn't happen but warn just in case */
1009        if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1010                return;
1011
1012        skb_trim(msdu, msdu->len - FCS_LEN);
1013
1014        /* In most cases this will be true for sniffed frames. It makes sense
1015         * to deliver them as-is without stripping the crypto param. This is
1016         * necessary for software based decryption.
1017         *
1018         * If there's no error then the frame is decrypted. At least that is
1019         * the case for frames that come in via fragmented rx indication.
1020         */
1021        if (!is_decrypted)
1022                return;
1023
1024        /* The payload is decrypted so strip crypto params. Start from tail
1025         * since hdr is used to compute some stuff.
1026         */
1027
1028        hdr = (void *)msdu->data;
1029
1030        /* Tail */
1031        if (status->flag & RX_FLAG_IV_STRIPPED)
1032                skb_trim(msdu, msdu->len -
1033                         ath10k_htt_rx_crypto_tail_len(ar, enctype));
1034
1035        /* MMIC */
1036        if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1037            !ieee80211_has_morefrags(hdr->frame_control) &&
1038            enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1039                skb_trim(msdu, msdu->len - 8);
1040
1041        /* Head */
1042        if (status->flag & RX_FLAG_IV_STRIPPED) {
1043                hdr_len = ieee80211_hdrlen(hdr->frame_control);
1044                crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1045
1046                memmove((void *)msdu->data + crypto_len,
1047                        (void *)msdu->data, hdr_len);
1048                skb_pull(msdu, crypto_len);
1049        }
1050}
1051
1052static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1053                                          struct sk_buff *msdu,
1054                                          struct ieee80211_rx_status *status,
1055                                          const u8 first_hdr[64])
1056{
1057        struct ieee80211_hdr *hdr;
1058        size_t hdr_len;
1059        u8 da[ETH_ALEN];
1060        u8 sa[ETH_ALEN];
1061
1062        /* Delivered decapped frame:
1063         * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1064         * [rfc1042/llc]
1065         *
1066         * Note: The nwifi header doesn't have QoS Control and is
1067         * (always?) a 3addr frame.
1068         *
1069         * Note2: There's no A-MSDU subframe header. Even if it's part
1070         * of an A-MSDU.
1071         */
1072
1073        /* pull decapped header and copy SA & DA */
1074        if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
1075            ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
1076                /* The QCA99X0 4 address mode pad 2 bytes at the
1077                 * beginning of MSDU
1078                 */
1079                hdr = (struct ieee80211_hdr *)(msdu->data + 2);
1080                /* The skb length need be extended 2 as the 2 bytes at the tail
1081                 * be excluded due to the padding
1082                 */
1083                skb_put(msdu, 2);
1084        } else {
1085                hdr = (struct ieee80211_hdr *)(msdu->data);
1086        }
1087
1088        hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1089        ether_addr_copy(da, ieee80211_get_DA(hdr));
1090        ether_addr_copy(sa, ieee80211_get_SA(hdr));
1091        skb_pull(msdu, hdr_len);
1092
1093        /* push original 802.11 header */
1094        hdr = (struct ieee80211_hdr *)first_hdr;
1095        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1096        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1097
1098        /* original 802.11 header has a different DA and in
1099         * case of 4addr it may also have different SA
1100         */
1101        hdr = (struct ieee80211_hdr *)msdu->data;
1102        ether_addr_copy(ieee80211_get_DA(hdr), da);
1103        ether_addr_copy(ieee80211_get_SA(hdr), sa);
1104}
1105
1106static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1107                                          struct sk_buff *msdu,
1108                                          enum htt_rx_mpdu_encrypt_type enctype)
1109{
1110        struct ieee80211_hdr *hdr;
1111        struct htt_rx_desc *rxd;
1112        size_t hdr_len, crypto_len;
1113        void *rfc1042;
1114        bool is_first, is_last, is_amsdu;
1115
1116        rxd = (void *)msdu->data - sizeof(*rxd);
1117        hdr = (void *)rxd->rx_hdr_status;
1118
1119        is_first = !!(rxd->msdu_end.common.info0 &
1120                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1121        is_last = !!(rxd->msdu_end.common.info0 &
1122                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1123        is_amsdu = !(is_first && is_last);
1124
1125        rfc1042 = hdr;
1126
1127        if (is_first) {
1128                hdr_len = ieee80211_hdrlen(hdr->frame_control);
1129                crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1130
1131                rfc1042 += round_up(hdr_len, 4) +
1132                           round_up(crypto_len, 4);
1133        }
1134
1135        if (is_amsdu)
1136                rfc1042 += sizeof(struct amsdu_subframe_hdr);
1137
1138        return rfc1042;
1139}
1140
1141static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1142                                        struct sk_buff *msdu,
1143                                        struct ieee80211_rx_status *status,
1144                                        const u8 first_hdr[64],
1145                                        enum htt_rx_mpdu_encrypt_type enctype)
1146{
1147        struct ieee80211_hdr *hdr;
1148        struct ethhdr *eth;
1149        size_t hdr_len;
1150        void *rfc1042;
1151        u8 da[ETH_ALEN];
1152        u8 sa[ETH_ALEN];
1153
1154        /* Delivered decapped frame:
1155         * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1156         * [payload]
1157         */
1158
1159        rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1160        if (WARN_ON_ONCE(!rfc1042))
1161                return;
1162
1163        /* pull decapped header and copy SA & DA */
1164        eth = (struct ethhdr *)msdu->data;
1165        ether_addr_copy(da, eth->h_dest);
1166        ether_addr_copy(sa, eth->h_source);
1167        skb_pull(msdu, sizeof(struct ethhdr));
1168
1169        /* push rfc1042/llc/snap */
1170        memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1171               sizeof(struct rfc1042_hdr));
1172
1173        /* push original 802.11 header */
1174        hdr = (struct ieee80211_hdr *)first_hdr;
1175        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1176        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1177
1178        /* original 802.11 header has a different DA and in
1179         * case of 4addr it may also have different SA
1180         */
1181        hdr = (struct ieee80211_hdr *)msdu->data;
1182        ether_addr_copy(ieee80211_get_DA(hdr), da);
1183        ether_addr_copy(ieee80211_get_SA(hdr), sa);
1184}
1185
1186static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1187                                         struct sk_buff *msdu,
1188                                         struct ieee80211_rx_status *status,
1189                                         const u8 first_hdr[64])
1190{
1191        struct ieee80211_hdr *hdr;
1192        size_t hdr_len;
1193
1194        /* Delivered decapped frame:
1195         * [amsdu header] <-- replaced with 802.11 hdr
1196         * [rfc1042/llc]
1197         * [payload]
1198         */
1199
1200        skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1201
1202        hdr = (struct ieee80211_hdr *)first_hdr;
1203        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1204        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1205}
1206
1207static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1208                                    struct sk_buff *msdu,
1209                                    struct ieee80211_rx_status *status,
1210                                    u8 first_hdr[64],
1211                                    enum htt_rx_mpdu_encrypt_type enctype,
1212                                    bool is_decrypted)
1213{
1214        struct htt_rx_desc *rxd;
1215        enum rx_msdu_decap_format decap;
1216
1217        /* First msdu's decapped header:
1218         * [802.11 header] <-- padded to 4 bytes long
1219         * [crypto param] <-- padded to 4 bytes long
1220         * [amsdu header] <-- only if A-MSDU
1221         * [rfc1042/llc]
1222         *
1223         * Other (2nd, 3rd, ..) msdu's decapped header:
1224         * [amsdu header] <-- only if A-MSDU
1225         * [rfc1042/llc]
1226         */
1227
1228        rxd = (void *)msdu->data - sizeof(*rxd);
1229        decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1230                   RX_MSDU_START_INFO1_DECAP_FORMAT);
1231
1232        switch (decap) {
1233        case RX_MSDU_DECAP_RAW:
1234                ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1235                                            is_decrypted);
1236                break;
1237        case RX_MSDU_DECAP_NATIVE_WIFI:
1238                ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1239                break;
1240        case RX_MSDU_DECAP_ETHERNET2_DIX:
1241                ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1242                break;
1243        case RX_MSDU_DECAP_8023_SNAP_LLC:
1244                ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1245                break;
1246        }
1247}
1248
1249static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1250{
1251        struct htt_rx_desc *rxd;
1252        u32 flags, info;
1253        bool is_ip4, is_ip6;
1254        bool is_tcp, is_udp;
1255        bool ip_csum_ok, tcpudp_csum_ok;
1256
1257        rxd = (void *)skb->data - sizeof(*rxd);
1258        flags = __le32_to_cpu(rxd->attention.flags);
1259        info = __le32_to_cpu(rxd->msdu_start.common.info1);
1260
1261        is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1262        is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1263        is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1264        is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1265        ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1266        tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1267
1268        if (!is_ip4 && !is_ip6)
1269                return CHECKSUM_NONE;
1270        if (!is_tcp && !is_udp)
1271                return CHECKSUM_NONE;
1272        if (!ip_csum_ok)
1273                return CHECKSUM_NONE;
1274        if (!tcpudp_csum_ok)
1275                return CHECKSUM_NONE;
1276
1277        return CHECKSUM_UNNECESSARY;
1278}
1279
1280static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1281{
1282        msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1283}
1284
1285static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1286                                 struct sk_buff_head *amsdu,
1287                                 struct ieee80211_rx_status *status)
1288{
1289        struct sk_buff *first;
1290        struct sk_buff *last;
1291        struct sk_buff *msdu;
1292        struct htt_rx_desc *rxd;
1293        struct ieee80211_hdr *hdr;
1294        enum htt_rx_mpdu_encrypt_type enctype;
1295        u8 first_hdr[64];
1296        u8 *qos;
1297        size_t hdr_len;
1298        bool has_fcs_err;
1299        bool has_crypto_err;
1300        bool has_tkip_err;
1301        bool has_peer_idx_invalid;
1302        bool is_decrypted;
1303        bool is_mgmt;
1304        u32 attention;
1305
1306        if (skb_queue_empty(amsdu))
1307                return;
1308
1309        first = skb_peek(amsdu);
1310        rxd = (void *)first->data - sizeof(*rxd);
1311
1312        is_mgmt = !!(rxd->attention.flags &
1313                     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1314
1315        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1316                     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1317
1318        /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1319         * decapped header. It'll be used for undecapping of each MSDU.
1320         */
1321        hdr = (void *)rxd->rx_hdr_status;
1322        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1323        memcpy(first_hdr, hdr, hdr_len);
1324
1325        /* Each A-MSDU subframe will use the original header as the base and be
1326         * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1327         */
1328        hdr = (void *)first_hdr;
1329        qos = ieee80211_get_qos_ctl(hdr);
1330        qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1331
1332        /* Some attention flags are valid only in the last MSDU. */
1333        last = skb_peek_tail(amsdu);
1334        rxd = (void *)last->data - sizeof(*rxd);
1335        attention = __le32_to_cpu(rxd->attention.flags);
1336
1337        has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1338        has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1339        has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1340        has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1341
1342        /* Note: If hardware captures an encrypted frame that it can't decrypt,
1343         * e.g. due to fcs error, missing peer or invalid key data it will
1344         * report the frame as raw.
1345         */
1346        is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1347                        !has_fcs_err &&
1348                        !has_crypto_err &&
1349                        !has_peer_idx_invalid);
1350
1351        /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1352        status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1353                          RX_FLAG_MMIC_ERROR |
1354                          RX_FLAG_DECRYPTED |
1355                          RX_FLAG_IV_STRIPPED |
1356                          RX_FLAG_ONLY_MONITOR |
1357                          RX_FLAG_MMIC_STRIPPED);
1358
1359        if (has_fcs_err)
1360                status->flag |= RX_FLAG_FAILED_FCS_CRC;
1361
1362        if (has_tkip_err)
1363                status->flag |= RX_FLAG_MMIC_ERROR;
1364
1365        /* Firmware reports all necessary management frames via WMI already.
1366         * They are not reported to monitor interfaces at all so pass the ones
1367         * coming via HTT to monitor interfaces instead. This simplifies
1368         * matters a lot.
1369         */
1370        if (is_mgmt)
1371                status->flag |= RX_FLAG_ONLY_MONITOR;
1372
1373        if (is_decrypted) {
1374                status->flag |= RX_FLAG_DECRYPTED;
1375
1376                if (likely(!is_mgmt))
1377                        status->flag |= RX_FLAG_IV_STRIPPED |
1378                                        RX_FLAG_MMIC_STRIPPED;
1379}
1380
1381        skb_queue_walk(amsdu, msdu) {
1382                ath10k_htt_rx_h_csum_offload(msdu);
1383                ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1384                                        is_decrypted);
1385
1386                /* Undecapping involves copying the original 802.11 header back
1387                 * to sk_buff. If frame is protected and hardware has decrypted
1388                 * it then remove the protected bit.
1389                 */
1390                if (!is_decrypted)
1391                        continue;
1392                if (is_mgmt)
1393                        continue;
1394
1395                hdr = (void *)msdu->data;
1396                hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1397        }
1398}
1399
1400static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1401                                    struct sk_buff_head *amsdu,
1402                                    struct ieee80211_rx_status *status)
1403{
1404        struct sk_buff *msdu;
1405
1406        while ((msdu = __skb_dequeue(amsdu))) {
1407                /* Setup per-MSDU flags */
1408                if (skb_queue_empty(amsdu))
1409                        status->flag &= ~RX_FLAG_AMSDU_MORE;
1410                else
1411                        status->flag |= RX_FLAG_AMSDU_MORE;
1412
1413                ath10k_process_rx(ar, status, msdu);
1414        }
1415}
1416
1417static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1418{
1419        struct sk_buff *skb, *first;
1420        int space;
1421        int total_len = 0;
1422
1423        /* TODO:  Might could optimize this by using
1424         * skb_try_coalesce or similar method to
1425         * decrease copying, or maybe get mac80211 to
1426         * provide a way to just receive a list of
1427         * skb?
1428         */
1429
1430        first = __skb_dequeue(amsdu);
1431
1432        /* Allocate total length all at once. */
1433        skb_queue_walk(amsdu, skb)
1434                total_len += skb->len;
1435
1436        space = total_len - skb_tailroom(first);
1437        if ((space > 0) &&
1438            (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1439                /* TODO:  bump some rx-oom error stat */
1440                /* put it back together so we can free the
1441                 * whole list at once.
1442                 */
1443                __skb_queue_head(amsdu, first);
1444                return -1;
1445        }
1446
1447        /* Walk list again, copying contents into
1448         * msdu_head
1449         */
1450        while ((skb = __skb_dequeue(amsdu))) {
1451                skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1452                                          skb->len);
1453                dev_kfree_skb_any(skb);
1454        }
1455
1456        __skb_queue_head(amsdu, first);
1457        return 0;
1458}
1459
1460static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1461                                    struct sk_buff_head *amsdu,
1462                                    bool chained)
1463{
1464        struct sk_buff *first;
1465        struct htt_rx_desc *rxd;
1466        enum rx_msdu_decap_format decap;
1467
1468        first = skb_peek(amsdu);
1469        rxd = (void *)first->data - sizeof(*rxd);
1470        decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1471                   RX_MSDU_START_INFO1_DECAP_FORMAT);
1472
1473        if (!chained)
1474                return;
1475
1476        /* FIXME: Current unchaining logic can only handle simple case of raw
1477         * msdu chaining. If decapping is other than raw the chaining may be
1478         * more complex and this isn't handled by the current code. Don't even
1479         * try re-constructing such frames - it'll be pretty much garbage.
1480         */
1481        if (decap != RX_MSDU_DECAP_RAW ||
1482            skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1483                __skb_queue_purge(amsdu);
1484                return;
1485        }
1486
1487        ath10k_unchain_msdu(amsdu);
1488}
1489
1490static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1491                                        struct sk_buff_head *amsdu,
1492                                        struct ieee80211_rx_status *rx_status)
1493{
1494        /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1495         * invalid/dangerous frames.
1496         */
1497
1498        if (!rx_status->freq) {
1499                ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1500                return false;
1501        }
1502
1503        if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1504                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1505                return false;
1506        }
1507
1508        return true;
1509}
1510
1511static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1512                                   struct sk_buff_head *amsdu,
1513                                   struct ieee80211_rx_status *rx_status)
1514{
1515        if (skb_queue_empty(amsdu))
1516                return;
1517
1518        if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1519                return;
1520
1521        __skb_queue_purge(amsdu);
1522}
1523
1524static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1525{
1526        struct ath10k *ar = htt->ar;
1527        static struct ieee80211_rx_status rx_status;
1528        struct sk_buff_head amsdu;
1529        int ret;
1530
1531        __skb_queue_head_init(&amsdu);
1532
1533        spin_lock_bh(&htt->rx_ring.lock);
1534        if (htt->rx_confused) {
1535                spin_unlock_bh(&htt->rx_ring.lock);
1536                return -EIO;
1537        }
1538        ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1539        spin_unlock_bh(&htt->rx_ring.lock);
1540
1541        if (ret < 0) {
1542                ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1543                __skb_queue_purge(&amsdu);
1544                /* FIXME: It's probably a good idea to reboot the
1545                 * device instead of leaving it inoperable.
1546                 */
1547                htt->rx_confused = true;
1548                return ret;
1549        }
1550
1551        ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
1552        ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1553        ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
1554        ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
1555        ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
1556
1557        return 0;
1558}
1559
1560static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1561                                      struct htt_rx_indication *rx)
1562{
1563        struct ath10k *ar = htt->ar;
1564        struct htt_rx_indication_mpdu_range *mpdu_ranges;
1565        int num_mpdu_ranges;
1566        int i, mpdu_count = 0;
1567
1568        num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1569                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1570        mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1571
1572        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1573                        rx, sizeof(*rx) +
1574                        (sizeof(struct htt_rx_indication_mpdu_range) *
1575                                num_mpdu_ranges));
1576
1577        for (i = 0; i < num_mpdu_ranges; i++)
1578                mpdu_count += mpdu_ranges[i].mpdu_count;
1579
1580        atomic_add(mpdu_count, &htt->num_mpdus_ready);
1581
1582        tasklet_schedule(&htt->txrx_compl_task);
1583}
1584
1585static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
1586{
1587        atomic_inc(&htt->num_mpdus_ready);
1588
1589        tasklet_schedule(&htt->txrx_compl_task);
1590}
1591
1592static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1593                                       struct sk_buff *skb)
1594{
1595        struct ath10k_htt *htt = &ar->htt;
1596        struct htt_resp *resp = (struct htt_resp *)skb->data;
1597        struct htt_tx_done tx_done = {};
1598        int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1599        __le16 msdu_id;
1600        int i;
1601
1602        switch (status) {
1603        case HTT_DATA_TX_STATUS_NO_ACK:
1604                tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1605                break;
1606        case HTT_DATA_TX_STATUS_OK:
1607                tx_done.status = HTT_TX_COMPL_STATE_ACK;
1608                break;
1609        case HTT_DATA_TX_STATUS_DISCARD:
1610        case HTT_DATA_TX_STATUS_POSTPONE:
1611        case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1612                tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1613                break;
1614        default:
1615                ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1616                tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1617                break;
1618        }
1619
1620        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1621                   resp->data_tx_completion.num_msdus);
1622
1623        for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1624                msdu_id = resp->data_tx_completion.msdus[i];
1625                tx_done.msdu_id = __le16_to_cpu(msdu_id);
1626
1627                /* kfifo_put: In practice firmware shouldn't fire off per-CE
1628                 * interrupt and main interrupt (MSI/-X range case) for the same
1629                 * HTC service so it should be safe to use kfifo_put w/o lock.
1630                 *
1631                 * From kfifo_put() documentation:
1632                 *  Note that with only one concurrent reader and one concurrent
1633                 *  writer, you don't need extra locking to use these macro.
1634                 */
1635                if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1636                        ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1637                                    tx_done.msdu_id, tx_done.status);
1638                        ath10k_txrx_tx_unref(htt, &tx_done);
1639                }
1640        }
1641}
1642
1643static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1644{
1645        struct htt_rx_addba *ev = &resp->rx_addba;
1646        struct ath10k_peer *peer;
1647        struct ath10k_vif *arvif;
1648        u16 info0, tid, peer_id;
1649
1650        info0 = __le16_to_cpu(ev->info0);
1651        tid = MS(info0, HTT_RX_BA_INFO0_TID);
1652        peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1653
1654        ath10k_dbg(ar, ATH10K_DBG_HTT,
1655                   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1656                   tid, peer_id, ev->window_size);
1657
1658        spin_lock_bh(&ar->data_lock);
1659        peer = ath10k_peer_find_by_id(ar, peer_id);
1660        if (!peer) {
1661                ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1662                            peer_id);
1663                spin_unlock_bh(&ar->data_lock);
1664                return;
1665        }
1666
1667        arvif = ath10k_get_arvif(ar, peer->vdev_id);
1668        if (!arvif) {
1669                ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1670                            peer->vdev_id);
1671                spin_unlock_bh(&ar->data_lock);
1672                return;
1673        }
1674
1675        ath10k_dbg(ar, ATH10K_DBG_HTT,
1676                   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1677                   peer->addr, tid, ev->window_size);
1678
1679        ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1680        spin_unlock_bh(&ar->data_lock);
1681}
1682
1683static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1684{
1685        struct htt_rx_delba *ev = &resp->rx_delba;
1686        struct ath10k_peer *peer;
1687        struct ath10k_vif *arvif;
1688        u16 info0, tid, peer_id;
1689
1690        info0 = __le16_to_cpu(ev->info0);
1691        tid = MS(info0, HTT_RX_BA_INFO0_TID);
1692        peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1693
1694        ath10k_dbg(ar, ATH10K_DBG_HTT,
1695                   "htt rx delba tid %hu peer_id %hu\n",
1696                   tid, peer_id);
1697
1698        spin_lock_bh(&ar->data_lock);
1699        peer = ath10k_peer_find_by_id(ar, peer_id);
1700        if (!peer) {
1701                ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1702                            peer_id);
1703                spin_unlock_bh(&ar->data_lock);
1704                return;
1705        }
1706
1707        arvif = ath10k_get_arvif(ar, peer->vdev_id);
1708        if (!arvif) {
1709                ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1710                            peer->vdev_id);
1711                spin_unlock_bh(&ar->data_lock);
1712                return;
1713        }
1714
1715        ath10k_dbg(ar, ATH10K_DBG_HTT,
1716                   "htt rx stop rx ba session sta %pM tid %hu\n",
1717                   peer->addr, tid);
1718
1719        ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1720        spin_unlock_bh(&ar->data_lock);
1721}
1722
1723static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1724                                       struct sk_buff_head *amsdu)
1725{
1726        struct sk_buff *msdu;
1727        struct htt_rx_desc *rxd;
1728
1729        if (skb_queue_empty(list))
1730                return -ENOBUFS;
1731
1732        if (WARN_ON(!skb_queue_empty(amsdu)))
1733                return -EINVAL;
1734
1735        while ((msdu = __skb_dequeue(list))) {
1736                __skb_queue_tail(amsdu, msdu);
1737
1738                rxd = (void *)msdu->data - sizeof(*rxd);
1739                if (rxd->msdu_end.common.info0 &
1740                    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1741                        break;
1742        }
1743
1744        msdu = skb_peek_tail(amsdu);
1745        rxd = (void *)msdu->data - sizeof(*rxd);
1746        if (!(rxd->msdu_end.common.info0 &
1747              __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1748                skb_queue_splice_init(amsdu, list);
1749                return -EAGAIN;
1750        }
1751
1752        return 0;
1753}
1754
1755static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1756                                            struct sk_buff *skb)
1757{
1758        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1759
1760        if (!ieee80211_has_protected(hdr->frame_control))
1761                return;
1762
1763        /* Offloaded frames are already decrypted but firmware insists they are
1764         * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
1765         * will drop the frame.
1766         */
1767
1768        hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1769        status->flag |= RX_FLAG_DECRYPTED |
1770                        RX_FLAG_IV_STRIPPED |
1771                        RX_FLAG_MMIC_STRIPPED;
1772}
1773
1774static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1775                                       struct sk_buff_head *list)
1776{
1777        struct ath10k_htt *htt = &ar->htt;
1778        struct ieee80211_rx_status *status = &htt->rx_status;
1779        struct htt_rx_offload_msdu *rx;
1780        struct sk_buff *msdu;
1781        size_t offset;
1782
1783        while ((msdu = __skb_dequeue(list))) {
1784                /* Offloaded frames don't have Rx descriptor. Instead they have
1785                 * a short meta information header.
1786                 */
1787
1788                rx = (void *)msdu->data;
1789
1790                skb_put(msdu, sizeof(*rx));
1791                skb_pull(msdu, sizeof(*rx));
1792
1793                if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1794                        ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1795                        dev_kfree_skb_any(msdu);
1796                        continue;
1797                }
1798
1799                skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1800
1801                /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1802                 * actual payload is unaligned. Align the frame.  Otherwise
1803                 * mac80211 complains.  This shouldn't reduce performance much
1804                 * because these offloaded frames are rare.
1805                 */
1806                offset = 4 - ((unsigned long)msdu->data & 3);
1807                skb_put(msdu, offset);
1808                memmove(msdu->data + offset, msdu->data, msdu->len);
1809                skb_pull(msdu, offset);
1810
1811                /* FIXME: The frame is NWifi. Re-construct QoS Control
1812                 * if possible later.
1813                 */
1814
1815                memset(status, 0, sizeof(*status));
1816                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1817
1818                ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1819                ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1820                ath10k_process_rx(ar, status, msdu);
1821        }
1822}
1823
1824static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1825{
1826        struct ath10k_htt *htt = &ar->htt;
1827        struct htt_resp *resp = (void *)skb->data;
1828        struct ieee80211_rx_status *status = &htt->rx_status;
1829        struct sk_buff_head list;
1830        struct sk_buff_head amsdu;
1831        u16 peer_id;
1832        u16 msdu_count;
1833        u8 vdev_id;
1834        u8 tid;
1835        bool offload;
1836        bool frag;
1837        int ret;
1838
1839        lockdep_assert_held(&htt->rx_ring.lock);
1840
1841        if (htt->rx_confused)
1842                return;
1843
1844        skb_pull(skb, sizeof(resp->hdr));
1845        skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1846
1847        peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1848        msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1849        vdev_id = resp->rx_in_ord_ind.vdev_id;
1850        tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1851        offload = !!(resp->rx_in_ord_ind.info &
1852                        HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1853        frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1854
1855        ath10k_dbg(ar, ATH10K_DBG_HTT,
1856                   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1857                   vdev_id, peer_id, tid, offload, frag, msdu_count);
1858
1859        if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1860                ath10k_warn(ar, "dropping invalid in order rx indication\n");
1861                return;
1862        }
1863
1864        /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1865         * extracted and processed.
1866         */
1867        __skb_queue_head_init(&list);
1868        ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1869        if (ret < 0) {
1870                ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1871                htt->rx_confused = true;
1872                return;
1873        }
1874
1875        /* Offloaded frames are very different and need to be handled
1876         * separately.
1877         */
1878        if (offload)
1879                ath10k_htt_rx_h_rx_offload(ar, &list);
1880
1881        while (!skb_queue_empty(&list)) {
1882                __skb_queue_head_init(&amsdu);
1883                ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1884                switch (ret) {
1885                case 0:
1886                        /* Note: The in-order indication may report interleaved
1887                         * frames from different PPDUs meaning reported rx rate
1888                         * to mac80211 isn't accurate/reliable. It's still
1889                         * better to report something than nothing though. This
1890                         * should still give an idea about rx rate to the user.
1891                         */
1892                        ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1893                        ath10k_htt_rx_h_filter(ar, &amsdu, status);
1894                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1895                        ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1896                        break;
1897                case -EAGAIN:
1898                        /* fall through */
1899                default:
1900                        /* Should not happen. */
1901                        ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1902                        htt->rx_confused = true;
1903                        __skb_queue_purge(&list);
1904                        return;
1905                }
1906        }
1907}
1908
1909static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
1910                                                   const __le32 *resp_ids,
1911                                                   int num_resp_ids)
1912{
1913        int i;
1914        u32 resp_id;
1915
1916        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
1917                   num_resp_ids);
1918
1919        for (i = 0; i < num_resp_ids; i++) {
1920                resp_id = le32_to_cpu(resp_ids[i]);
1921
1922                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
1923                           resp_id);
1924
1925                /* TODO: free resp_id */
1926        }
1927}
1928
1929static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
1930{
1931        struct ieee80211_hw *hw = ar->hw;
1932        struct ieee80211_txq *txq;
1933        struct htt_resp *resp = (struct htt_resp *)skb->data;
1934        struct htt_tx_fetch_record *record;
1935        size_t len;
1936        size_t max_num_bytes;
1937        size_t max_num_msdus;
1938        size_t num_bytes;
1939        size_t num_msdus;
1940        const __le32 *resp_ids;
1941        u16 num_records;
1942        u16 num_resp_ids;
1943        u16 peer_id;
1944        u8 tid;
1945        int ret;
1946        int i;
1947
1948        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
1949
1950        len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
1951        if (unlikely(skb->len < len)) {
1952                ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
1953                return;
1954        }
1955
1956        num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
1957        num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
1958
1959        len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
1960        len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
1961
1962        if (unlikely(skb->len < len)) {
1963                ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
1964                return;
1965        }
1966
1967        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
1968                   num_records, num_resp_ids,
1969                   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
1970
1971        if (!ar->htt.tx_q_state.enabled) {
1972                ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
1973                return;
1974        }
1975
1976        if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
1977                ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
1978                return;
1979        }
1980
1981        rcu_read_lock();
1982
1983        for (i = 0; i < num_records; i++) {
1984                record = &resp->tx_fetch_ind.records[i];
1985                peer_id = MS(le16_to_cpu(record->info),
1986                             HTT_TX_FETCH_RECORD_INFO_PEER_ID);
1987                tid = MS(le16_to_cpu(record->info),
1988                         HTT_TX_FETCH_RECORD_INFO_TID);
1989                max_num_msdus = le16_to_cpu(record->num_msdus);
1990                max_num_bytes = le32_to_cpu(record->num_bytes);
1991
1992                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
1993                           i, peer_id, tid, max_num_msdus, max_num_bytes);
1994
1995                if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
1996                    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
1997                        ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
1998                                    peer_id, tid);
1999                        continue;
2000                }
2001
2002                spin_lock_bh(&ar->data_lock);
2003                txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2004                spin_unlock_bh(&ar->data_lock);
2005
2006                /* It is okay to release the lock and use txq because RCU read
2007                 * lock is held.
2008                 */
2009
2010                if (unlikely(!txq)) {
2011                        ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2012                                    peer_id, tid);
2013                        continue;
2014                }
2015
2016                num_msdus = 0;
2017                num_bytes = 0;
2018
2019                while (num_msdus < max_num_msdus &&
2020                       num_bytes < max_num_bytes) {
2021                        ret = ath10k_mac_tx_push_txq(hw, txq);
2022                        if (ret < 0)
2023                                break;
2024
2025                        num_msdus++;
2026                        num_bytes += ret;
2027                }
2028
2029                record->num_msdus = cpu_to_le16(num_msdus);
2030                record->num_bytes = cpu_to_le32(num_bytes);
2031
2032                ath10k_htt_tx_txq_recalc(hw, txq);
2033        }
2034
2035        rcu_read_unlock();
2036
2037        resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2038        ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2039
2040        ret = ath10k_htt_tx_fetch_resp(ar,
2041                                       resp->tx_fetch_ind.token,
2042                                       resp->tx_fetch_ind.fetch_seq_num,
2043                                       resp->tx_fetch_ind.records,
2044                                       num_records);
2045        if (unlikely(ret)) {
2046                ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2047                            le32_to_cpu(resp->tx_fetch_ind.token), ret);
2048                /* FIXME: request fw restart */
2049        }
2050
2051        ath10k_htt_tx_txq_sync(ar);
2052}
2053
2054static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2055                                           struct sk_buff *skb)
2056{
2057        const struct htt_resp *resp = (void *)skb->data;
2058        size_t len;
2059        int num_resp_ids;
2060
2061        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2062
2063        len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2064        if (unlikely(skb->len < len)) {
2065                ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2066                return;
2067        }
2068
2069        num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2070        len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2071
2072        if (unlikely(skb->len < len)) {
2073                ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2074                return;
2075        }
2076
2077        ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2078                                               resp->tx_fetch_confirm.resp_ids,
2079                                               num_resp_ids);
2080}
2081
2082static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2083                                             struct sk_buff *skb)
2084{
2085        const struct htt_resp *resp = (void *)skb->data;
2086        const struct htt_tx_mode_switch_record *record;
2087        struct ieee80211_txq *txq;
2088        struct ath10k_txq *artxq;
2089        size_t len;
2090        size_t num_records;
2091        enum htt_tx_mode_switch_mode mode;
2092        bool enable;
2093        u16 info0;
2094        u16 info1;
2095        u16 threshold;
2096        u16 peer_id;
2097        u8 tid;
2098        int i;
2099
2100        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2101
2102        len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2103        if (unlikely(skb->len < len)) {
2104                ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2105                return;
2106        }
2107
2108        info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2109        info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2110
2111        enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2112        num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2113        mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2114        threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2115
2116        ath10k_dbg(ar, ATH10K_DBG_HTT,
2117                   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2118                   info0, info1, enable, num_records, mode, threshold);
2119
2120        len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2121
2122        if (unlikely(skb->len < len)) {
2123                ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2124                return;
2125        }
2126
2127        switch (mode) {
2128        case HTT_TX_MODE_SWITCH_PUSH:
2129        case HTT_TX_MODE_SWITCH_PUSH_PULL:
2130                break;
2131        default:
2132                ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2133                            mode);
2134                return;
2135        }
2136
2137        if (!enable)
2138                return;
2139
2140        ar->htt.tx_q_state.enabled = enable;
2141        ar->htt.tx_q_state.mode = mode;
2142        ar->htt.tx_q_state.num_push_allowed = threshold;
2143
2144        rcu_read_lock();
2145
2146        for (i = 0; i < num_records; i++) {
2147                record = &resp->tx_mode_switch_ind.records[i];
2148                info0 = le16_to_cpu(record->info0);
2149                peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2150                tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2151
2152                if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2153                    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2154                        ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2155                                    peer_id, tid);
2156                        continue;
2157                }
2158
2159                spin_lock_bh(&ar->data_lock);
2160                txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2161                spin_unlock_bh(&ar->data_lock);
2162
2163                /* It is okay to release the lock and use txq because RCU read
2164                 * lock is held.
2165                 */
2166
2167                if (unlikely(!txq)) {
2168                        ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2169                                    peer_id, tid);
2170                        continue;
2171                }
2172
2173                spin_lock_bh(&ar->htt.tx_lock);
2174                artxq = (void *)txq->drv_priv;
2175                artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2176                spin_unlock_bh(&ar->htt.tx_lock);
2177        }
2178
2179        rcu_read_unlock();
2180
2181        ath10k_mac_tx_push_pending(ar);
2182}
2183
2184static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
2185{
2186        enum nl80211_band band;
2187
2188        switch (phy_mode) {
2189        case MODE_11A:
2190        case MODE_11NA_HT20:
2191        case MODE_11NA_HT40:
2192        case MODE_11AC_VHT20:
2193        case MODE_11AC_VHT40:
2194        case MODE_11AC_VHT80:
2195                band = NL80211_BAND_5GHZ;
2196                break;
2197        case MODE_11G:
2198        case MODE_11B:
2199        case MODE_11GONLY:
2200        case MODE_11NG_HT20:
2201        case MODE_11NG_HT40:
2202        case MODE_11AC_VHT20_2G:
2203        case MODE_11AC_VHT40_2G:
2204        case MODE_11AC_VHT80_2G:
2205        default:
2206                band = NL80211_BAND_2GHZ;
2207        }
2208
2209        return band;
2210}
2211
2212void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2213{
2214        bool release;
2215
2216        release = ath10k_htt_t2h_msg_handler(ar, skb);
2217
2218        /* Free the indication buffer */
2219        if (release)
2220                dev_kfree_skb_any(skb);
2221}
2222
2223bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2224{
2225        struct ath10k_htt *htt = &ar->htt;
2226        struct htt_resp *resp = (struct htt_resp *)skb->data;
2227        enum htt_t2h_msg_type type;
2228
2229        /* confirm alignment */
2230        if (!IS_ALIGNED((unsigned long)skb->data, 4))
2231                ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2232
2233        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2234                   resp->hdr.msg_type);
2235
2236        if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2237                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2238                           resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2239                return true;
2240        }
2241        type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2242
2243        switch (type) {
2244        case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2245                htt->target_version_major = resp->ver_resp.major;
2246                htt->target_version_minor = resp->ver_resp.minor;
2247                complete(&htt->target_version_received);
2248                break;
2249        }
2250        case HTT_T2H_MSG_TYPE_RX_IND:
2251                ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2252                break;
2253        case HTT_T2H_MSG_TYPE_PEER_MAP: {
2254                struct htt_peer_map_event ev = {
2255                        .vdev_id = resp->peer_map.vdev_id,
2256                        .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2257                };
2258                memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2259                ath10k_peer_map_event(htt, &ev);
2260                break;
2261        }
2262        case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2263                struct htt_peer_unmap_event ev = {
2264                        .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2265                };
2266                ath10k_peer_unmap_event(htt, &ev);
2267                break;
2268        }
2269        case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2270                struct htt_tx_done tx_done = {};
2271                int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2272
2273                tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2274
2275                switch (status) {
2276                case HTT_MGMT_TX_STATUS_OK:
2277                        tx_done.status = HTT_TX_COMPL_STATE_ACK;
2278                        break;
2279                case HTT_MGMT_TX_STATUS_RETRY:
2280                        tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2281                        break;
2282                case HTT_MGMT_TX_STATUS_DROP:
2283                        tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2284                        break;
2285                }
2286
2287                status = ath10k_txrx_tx_unref(htt, &tx_done);
2288                if (!status) {
2289                        spin_lock_bh(&htt->tx_lock);
2290                        ath10k_htt_tx_mgmt_dec_pending(htt);
2291                        spin_unlock_bh(&htt->tx_lock);
2292                }
2293                ath10k_mac_tx_push_pending(ar);
2294                break;
2295        }
2296        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2297                ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2298                tasklet_schedule(&htt->txrx_compl_task);
2299                break;
2300        case HTT_T2H_MSG_TYPE_SEC_IND: {
2301                struct ath10k *ar = htt->ar;
2302                struct htt_security_indication *ev = &resp->security_indication;
2303
2304                ath10k_dbg(ar, ATH10K_DBG_HTT,
2305                           "sec ind peer_id %d unicast %d type %d\n",
2306                          __le16_to_cpu(ev->peer_id),
2307                          !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2308                          MS(ev->flags, HTT_SECURITY_TYPE));
2309                complete(&ar->install_key_done);
2310                break;
2311        }
2312        case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2313                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2314                                skb->data, skb->len);
2315                ath10k_htt_rx_frag_handler(htt);
2316                break;
2317        }
2318        case HTT_T2H_MSG_TYPE_TEST:
2319                break;
2320        case HTT_T2H_MSG_TYPE_STATS_CONF:
2321                trace_ath10k_htt_stats(ar, skb->data, skb->len);
2322                break;
2323        case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2324                /* Firmware can return tx frames if it's unable to fully
2325                 * process them and suspects host may be able to fix it. ath10k
2326                 * sends all tx frames as already inspected so this shouldn't
2327                 * happen unless fw has a bug.
2328                 */
2329                ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2330                break;
2331        case HTT_T2H_MSG_TYPE_RX_ADDBA:
2332                ath10k_htt_rx_addba(ar, resp);
2333                break;
2334        case HTT_T2H_MSG_TYPE_RX_DELBA:
2335                ath10k_htt_rx_delba(ar, resp);
2336                break;
2337        case HTT_T2H_MSG_TYPE_PKTLOG: {
2338                struct ath10k_pktlog_hdr *hdr =
2339                        (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2340
2341                trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2342                                        sizeof(*hdr) +
2343                                        __le16_to_cpu(hdr->size));
2344                break;
2345        }
2346        case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2347                /* Ignore this event because mac80211 takes care of Rx
2348                 * aggregation reordering.
2349                 */
2350                break;
2351        }
2352        case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2353                skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2354                tasklet_schedule(&htt->txrx_compl_task);
2355                return false;
2356        }
2357        case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2358                break;
2359        case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2360                u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2361                u32 freq = __le32_to_cpu(resp->chan_change.freq);
2362
2363                ar->tgt_oper_chan =
2364                        __ieee80211_get_channel(ar->hw->wiphy, freq);
2365                ath10k_dbg(ar, ATH10K_DBG_HTT,
2366                           "htt chan change freq %u phymode %s\n",
2367                           freq, ath10k_wmi_phymode_str(phymode));
2368                break;
2369        }
2370        case HTT_T2H_MSG_TYPE_AGGR_CONF:
2371                break;
2372        case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2373                struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2374
2375                if (!tx_fetch_ind) {
2376                        ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2377                        break;
2378                }
2379                skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2380                tasklet_schedule(&htt->txrx_compl_task);
2381                break;
2382        }
2383        case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2384                ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2385                break;
2386        case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2387                ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2388                break;
2389        case HTT_T2H_MSG_TYPE_EN_STATS:
2390        default:
2391                ath10k_warn(ar, "htt event (%d) not handled\n",
2392                            resp->hdr.msg_type);
2393                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2394                                skb->data, skb->len);
2395                break;
2396        };
2397        return true;
2398}
2399EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2400
2401void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2402                                             struct sk_buff *skb)
2403{
2404        trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2405        dev_kfree_skb_any(skb);
2406}
2407EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2408
2409static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2410{
2411        struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2412        struct ath10k *ar = htt->ar;
2413        struct htt_tx_done tx_done = {};
2414        struct sk_buff_head rx_ind_q;
2415        struct sk_buff_head tx_ind_q;
2416        struct sk_buff *skb;
2417        unsigned long flags;
2418        int num_mpdus;
2419
2420        __skb_queue_head_init(&rx_ind_q);
2421        __skb_queue_head_init(&tx_ind_q);
2422
2423        spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2424        skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2425        spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2426
2427        spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
2428        skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
2429        spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
2430
2431        /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
2432         * From kfifo_get() documentation:
2433         *  Note that with only one concurrent reader and one concurrent writer,
2434         *  you don't need extra locking to use these macro.
2435         */
2436        while (kfifo_get(&htt->txdone_fifo, &tx_done))
2437                ath10k_txrx_tx_unref(htt, &tx_done);
2438
2439        while ((skb = __skb_dequeue(&tx_ind_q))) {
2440                ath10k_htt_rx_tx_fetch_ind(ar, skb);
2441                dev_kfree_skb_any(skb);
2442        }
2443
2444        ath10k_mac_tx_push_pending(ar);
2445
2446        num_mpdus = atomic_read(&htt->num_mpdus_ready);
2447
2448        while (num_mpdus) {
2449                if (ath10k_htt_rx_handle_amsdu(htt))
2450                        break;
2451
2452                num_mpdus--;
2453                atomic_dec(&htt->num_mpdus_ready);
2454        }
2455
2456        while ((skb = __skb_dequeue(&rx_ind_q))) {
2457                spin_lock_bh(&htt->rx_ring.lock);
2458                ath10k_htt_rx_in_ord_ind(ar, skb);
2459                spin_unlock_bh(&htt->rx_ring.lock);
2460                dev_kfree_skb_any(skb);
2461        }
2462
2463        ath10k_htt_rx_msdu_buff_replenish(htt);
2464}
2465