linux/drivers/net/wireless/ath/ath10k/htt_rx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include "core.h"
  19#include "htc.h"
  20#include "htt.h"
  21#include "txrx.h"
  22#include "debug.h"
  23#include "trace.h"
  24#include "mac.h"
  25
  26#include <linux/log2.h>
  27
  28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  30
  31/* when under memory pressure rx ring refill may fail and needs a retry */
  32#define HTT_RX_RING_REFILL_RETRY_MS 50
  33
  34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  35static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  36
  37static struct sk_buff *
  38ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  39{
  40        struct ath10k_skb_rxcb *rxcb;
  41
  42        hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  43                if (rxcb->paddr == paddr)
  44                        return ATH10K_RXCB_SKB(rxcb);
  45
  46        WARN_ON_ONCE(1);
  47        return NULL;
  48}
  49
  50static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  51{
  52        struct sk_buff *skb;
  53        struct ath10k_skb_rxcb *rxcb;
  54        struct hlist_node *n;
  55        int i;
  56
  57        if (htt->rx_ring.in_ord_rx) {
  58                hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  59                        skb = ATH10K_RXCB_SKB(rxcb);
  60                        dma_unmap_single(htt->ar->dev, rxcb->paddr,
  61                                         skb->len + skb_tailroom(skb),
  62                                         DMA_FROM_DEVICE);
  63                        hash_del(&rxcb->hlist);
  64                        dev_kfree_skb_any(skb);
  65                }
  66        } else {
  67                for (i = 0; i < htt->rx_ring.size; i++) {
  68                        skb = htt->rx_ring.netbufs_ring[i];
  69                        if (!skb)
  70                                continue;
  71
  72                        rxcb = ATH10K_SKB_RXCB(skb);
  73                        dma_unmap_single(htt->ar->dev, rxcb->paddr,
  74                                         skb->len + skb_tailroom(skb),
  75                                         DMA_FROM_DEVICE);
  76                        dev_kfree_skb_any(skb);
  77                }
  78        }
  79
  80        htt->rx_ring.fill_cnt = 0;
  81        hash_init(htt->rx_ring.skb_table);
  82        memset(htt->rx_ring.netbufs_ring, 0,
  83               htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  84}
  85
  86static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  87{
  88        struct htt_rx_desc *rx_desc;
  89        struct ath10k_skb_rxcb *rxcb;
  90        struct sk_buff *skb;
  91        dma_addr_t paddr;
  92        int ret = 0, idx;
  93
  94        /* The Full Rx Reorder firmware has no way of telling the host
  95         * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  96         * To keep things simple make sure ring is always half empty. This
  97         * guarantees there'll be no replenishment overruns possible.
  98         */
  99        BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
 100
 101        idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
 102        while (num > 0) {
 103                skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
 104                if (!skb) {
 105                        ret = -ENOMEM;
 106                        goto fail;
 107                }
 108
 109                if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
 110                        skb_pull(skb,
 111                                 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
 112                                 skb->data);
 113
 114                /* Clear rx_desc attention word before posting to Rx ring */
 115                rx_desc = (struct htt_rx_desc *)skb->data;
 116                rx_desc->attention.flags = __cpu_to_le32(0);
 117
 118                paddr = dma_map_single(htt->ar->dev, skb->data,
 119                                       skb->len + skb_tailroom(skb),
 120                                       DMA_FROM_DEVICE);
 121
 122                if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
 123                        dev_kfree_skb_any(skb);
 124                        ret = -ENOMEM;
 125                        goto fail;
 126                }
 127
 128                rxcb = ATH10K_SKB_RXCB(skb);
 129                rxcb->paddr = paddr;
 130                htt->rx_ring.netbufs_ring[idx] = skb;
 131                htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
 132                htt->rx_ring.fill_cnt++;
 133
 134                if (htt->rx_ring.in_ord_rx) {
 135                        hash_add(htt->rx_ring.skb_table,
 136                                 &ATH10K_SKB_RXCB(skb)->hlist,
 137                                 (u32)paddr);
 138                }
 139
 140                num--;
 141                idx++;
 142                idx &= htt->rx_ring.size_mask;
 143        }
 144
 145fail:
 146        /*
 147         * Make sure the rx buffer is updated before available buffer
 148         * index to avoid any potential rx ring corruption.
 149         */
 150        mb();
 151        *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
 152        return ret;
 153}
 154
 155static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 156{
 157        lockdep_assert_held(&htt->rx_ring.lock);
 158        return __ath10k_htt_rx_ring_fill_n(htt, num);
 159}
 160
 161static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 162{
 163        int ret, num_deficit, num_to_fill;
 164
 165        /* Refilling the whole RX ring buffer proves to be a bad idea. The
 166         * reason is RX may take up significant amount of CPU cycles and starve
 167         * other tasks, e.g. TX on an ethernet device while acting as a bridge
 168         * with ath10k wlan interface. This ended up with very poor performance
 169         * once CPU the host system was overwhelmed with RX on ath10k.
 170         *
 171         * By limiting the number of refills the replenishing occurs
 172         * progressively. This in turns makes use of the fact tasklets are
 173         * processed in FIFO order. This means actual RX processing can starve
 174         * out refilling. If there's not enough buffers on RX ring FW will not
 175         * report RX until it is refilled with enough buffers. This
 176         * automatically balances load wrt to CPU power.
 177         *
 178         * This probably comes at a cost of lower maximum throughput but
 179         * improves the average and stability. */
 180        spin_lock_bh(&htt->rx_ring.lock);
 181        num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
 182        num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
 183        num_deficit -= num_to_fill;
 184        ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
 185        if (ret == -ENOMEM) {
 186                /*
 187                 * Failed to fill it to the desired level -
 188                 * we'll start a timer and try again next time.
 189                 * As long as enough buffers are left in the ring for
 190                 * another A-MPDU rx, no special recovery is needed.
 191                 */
 192                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 193                          msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 194        } else if (num_deficit > 0) {
 195                tasklet_schedule(&htt->rx_replenish_task);
 196        }
 197        spin_unlock_bh(&htt->rx_ring.lock);
 198}
 199
 200static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
 201{
 202        struct ath10k_htt *htt = (struct ath10k_htt *)arg;
 203
 204        ath10k_htt_rx_msdu_buff_replenish(htt);
 205}
 206
 207int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 208{
 209        struct ath10k_htt *htt = &ar->htt;
 210        int ret;
 211
 212        spin_lock_bh(&htt->rx_ring.lock);
 213        ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
 214                                              htt->rx_ring.fill_cnt));
 215        spin_unlock_bh(&htt->rx_ring.lock);
 216
 217        if (ret)
 218                ath10k_htt_rx_ring_free(htt);
 219
 220        return ret;
 221}
 222
 223void ath10k_htt_rx_free(struct ath10k_htt *htt)
 224{
 225        del_timer_sync(&htt->rx_ring.refill_retry_timer);
 226        tasklet_kill(&htt->rx_replenish_task);
 227        tasklet_kill(&htt->txrx_compl_task);
 228
 229        skb_queue_purge(&htt->tx_compl_q);
 230        skb_queue_purge(&htt->rx_compl_q);
 231        skb_queue_purge(&htt->rx_in_ord_compl_q);
 232
 233        ath10k_htt_rx_ring_free(htt);
 234
 235        dma_free_coherent(htt->ar->dev,
 236                          (htt->rx_ring.size *
 237                           sizeof(htt->rx_ring.paddrs_ring)),
 238                          htt->rx_ring.paddrs_ring,
 239                          htt->rx_ring.base_paddr);
 240
 241        dma_free_coherent(htt->ar->dev,
 242                          sizeof(*htt->rx_ring.alloc_idx.vaddr),
 243                          htt->rx_ring.alloc_idx.vaddr,
 244                          htt->rx_ring.alloc_idx.paddr);
 245
 246        kfree(htt->rx_ring.netbufs_ring);
 247}
 248
 249static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 250{
 251        struct ath10k *ar = htt->ar;
 252        int idx;
 253        struct sk_buff *msdu;
 254
 255        lockdep_assert_held(&htt->rx_ring.lock);
 256
 257        if (htt->rx_ring.fill_cnt == 0) {
 258                ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
 259                return NULL;
 260        }
 261
 262        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 263        msdu = htt->rx_ring.netbufs_ring[idx];
 264        htt->rx_ring.netbufs_ring[idx] = NULL;
 265        htt->rx_ring.paddrs_ring[idx] = 0;
 266
 267        idx++;
 268        idx &= htt->rx_ring.size_mask;
 269        htt->rx_ring.sw_rd_idx.msdu_payld = idx;
 270        htt->rx_ring.fill_cnt--;
 271
 272        dma_unmap_single(htt->ar->dev,
 273                         ATH10K_SKB_RXCB(msdu)->paddr,
 274                         msdu->len + skb_tailroom(msdu),
 275                         DMA_FROM_DEVICE);
 276        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 277                        msdu->data, msdu->len + skb_tailroom(msdu));
 278
 279        return msdu;
 280}
 281
 282/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 283static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 284                                   u8 **fw_desc, int *fw_desc_len,
 285                                   struct sk_buff_head *amsdu)
 286{
 287        struct ath10k *ar = htt->ar;
 288        int msdu_len, msdu_chaining = 0;
 289        struct sk_buff *msdu;
 290        struct htt_rx_desc *rx_desc;
 291
 292        lockdep_assert_held(&htt->rx_ring.lock);
 293
 294        for (;;) {
 295                int last_msdu, msdu_len_invalid, msdu_chained;
 296
 297                msdu = ath10k_htt_rx_netbuf_pop(htt);
 298                if (!msdu) {
 299                        __skb_queue_purge(amsdu);
 300                        return -ENOENT;
 301                }
 302
 303                __skb_queue_tail(amsdu, msdu);
 304
 305                rx_desc = (struct htt_rx_desc *)msdu->data;
 306
 307                /* FIXME: we must report msdu payload since this is what caller
 308                 *        expects now */
 309                skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 310                skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 311
 312                /*
 313                 * Sanity check - confirm the HW is finished filling in the
 314                 * rx data.
 315                 * If the HW and SW are working correctly, then it's guaranteed
 316                 * that the HW's MAC DMA is done before this point in the SW.
 317                 * To prevent the case that we handle a stale Rx descriptor,
 318                 * just assert for now until we have a way to recover.
 319                 */
 320                if (!(__le32_to_cpu(rx_desc->attention.flags)
 321                                & RX_ATTENTION_FLAGS_MSDU_DONE)) {
 322                        __skb_queue_purge(amsdu);
 323                        return -EIO;
 324                }
 325
 326                /*
 327                 * Copy the FW rx descriptor for this MSDU from the rx
 328                 * indication message into the MSDU's netbuf. HL uses the
 329                 * same rx indication message definition as LL, and simply
 330                 * appends new info (fields from the HW rx desc, and the
 331                 * MSDU payload itself). So, the offset into the rx
 332                 * indication message only has to account for the standard
 333                 * offset of the per-MSDU FW rx desc info within the
 334                 * message, and how many bytes of the per-MSDU FW rx desc
 335                 * info have already been consumed. (And the endianness of
 336                 * the host, since for a big-endian host, the rx ind
 337                 * message contents, including the per-MSDU rx desc bytes,
 338                 * were byteswapped during upload.)
 339                 */
 340                if (*fw_desc_len > 0) {
 341                        rx_desc->fw_desc.info0 = **fw_desc;
 342                        /*
 343                         * The target is expected to only provide the basic
 344                         * per-MSDU rx descriptors. Just to be sure, verify
 345                         * that the target has not attached extension data
 346                         * (e.g. LRO flow ID).
 347                         */
 348
 349                        /* or more, if there's extension data */
 350                        (*fw_desc)++;
 351                        (*fw_desc_len)--;
 352                } else {
 353                        /*
 354                         * When an oversized AMSDU happened, FW will lost
 355                         * some of MSDU status - in this case, the FW
 356                         * descriptors provided will be less than the
 357                         * actual MSDUs inside this MPDU. Mark the FW
 358                         * descriptors so that it will still deliver to
 359                         * upper stack, if no CRC error for this MPDU.
 360                         *
 361                         * FIX THIS - the FW descriptors are actually for
 362                         * MSDUs in the end of this A-MSDU instead of the
 363                         * beginning.
 364                         */
 365                        rx_desc->fw_desc.info0 = 0;
 366                }
 367
 368                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 369                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 370                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
 371                msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
 372                              RX_MSDU_START_INFO0_MSDU_LENGTH);
 373                msdu_chained = rx_desc->frag_info.ring2_more_count;
 374
 375                if (msdu_len_invalid)
 376                        msdu_len = 0;
 377
 378                skb_trim(msdu, 0);
 379                skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
 380                msdu_len -= msdu->len;
 381
 382                /* Note: Chained buffers do not contain rx descriptor */
 383                while (msdu_chained--) {
 384                        msdu = ath10k_htt_rx_netbuf_pop(htt);
 385                        if (!msdu) {
 386                                __skb_queue_purge(amsdu);
 387                                return -ENOENT;
 388                        }
 389
 390                        __skb_queue_tail(amsdu, msdu);
 391                        skb_trim(msdu, 0);
 392                        skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
 393                        msdu_len -= msdu->len;
 394                        msdu_chaining = 1;
 395                }
 396
 397                last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
 398                                RX_MSDU_END_INFO0_LAST_MSDU;
 399
 400                trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
 401                                         sizeof(*rx_desc) - sizeof(u32));
 402
 403                if (last_msdu)
 404                        break;
 405        }
 406
 407        if (skb_queue_empty(amsdu))
 408                msdu_chaining = -1;
 409
 410        /*
 411         * Don't refill the ring yet.
 412         *
 413         * First, the elements popped here are still in use - it is not
 414         * safe to overwrite them until the matching call to
 415         * mpdu_desc_list_next. Second, for efficiency it is preferable to
 416         * refill the rx ring with 1 PPDU's worth of rx buffers (something
 417         * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
 418         * (something like 3 buffers). Consequently, we'll rely on the txrx
 419         * SW to tell us when it is done pulling all the PPDU's rx buffers
 420         * out of the rx ring, and then refill it just once.
 421         */
 422
 423        return msdu_chaining;
 424}
 425
 426static void ath10k_htt_rx_replenish_task(unsigned long ptr)
 427{
 428        struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
 429
 430        ath10k_htt_rx_msdu_buff_replenish(htt);
 431}
 432
 433static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 434                                               u32 paddr)
 435{
 436        struct ath10k *ar = htt->ar;
 437        struct ath10k_skb_rxcb *rxcb;
 438        struct sk_buff *msdu;
 439
 440        lockdep_assert_held(&htt->rx_ring.lock);
 441
 442        msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
 443        if (!msdu)
 444                return NULL;
 445
 446        rxcb = ATH10K_SKB_RXCB(msdu);
 447        hash_del(&rxcb->hlist);
 448        htt->rx_ring.fill_cnt--;
 449
 450        dma_unmap_single(htt->ar->dev, rxcb->paddr,
 451                         msdu->len + skb_tailroom(msdu),
 452                         DMA_FROM_DEVICE);
 453        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
 454                        msdu->data, msdu->len + skb_tailroom(msdu));
 455
 456        return msdu;
 457}
 458
 459static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
 460                                        struct htt_rx_in_ord_ind *ev,
 461                                        struct sk_buff_head *list)
 462{
 463        struct ath10k *ar = htt->ar;
 464        struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
 465        struct htt_rx_desc *rxd;
 466        struct sk_buff *msdu;
 467        int msdu_count;
 468        bool is_offload;
 469        u32 paddr;
 470
 471        lockdep_assert_held(&htt->rx_ring.lock);
 472
 473        msdu_count = __le16_to_cpu(ev->msdu_count);
 474        is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
 475
 476        while (msdu_count--) {
 477                paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
 478
 479                msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
 480                if (!msdu) {
 481                        __skb_queue_purge(list);
 482                        return -ENOENT;
 483                }
 484
 485                __skb_queue_tail(list, msdu);
 486
 487                if (!is_offload) {
 488                        rxd = (void *)msdu->data;
 489
 490                        trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
 491
 492                        skb_put(msdu, sizeof(*rxd));
 493                        skb_pull(msdu, sizeof(*rxd));
 494                        skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
 495
 496                        if (!(__le32_to_cpu(rxd->attention.flags) &
 497                              RX_ATTENTION_FLAGS_MSDU_DONE)) {
 498                                ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
 499                                return -EIO;
 500                        }
 501                }
 502
 503                msdu_desc++;
 504        }
 505
 506        return 0;
 507}
 508
 509int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 510{
 511        struct ath10k *ar = htt->ar;
 512        dma_addr_t paddr;
 513        void *vaddr;
 514        size_t size;
 515        struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 516
 517        htt->rx_confused = false;
 518
 519        /* XXX: The fill level could be changed during runtime in response to
 520         * the host processing latency. Is this really worth it?
 521         */
 522        htt->rx_ring.size = HTT_RX_RING_SIZE;
 523        htt->rx_ring.size_mask = htt->rx_ring.size - 1;
 524        htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
 525
 526        if (!is_power_of_2(htt->rx_ring.size)) {
 527                ath10k_warn(ar, "htt rx ring size is not power of 2\n");
 528                return -EINVAL;
 529        }
 530
 531        htt->rx_ring.netbufs_ring =
 532                kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
 533                        GFP_KERNEL);
 534        if (!htt->rx_ring.netbufs_ring)
 535                goto err_netbuf;
 536
 537        size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
 538
 539        vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
 540        if (!vaddr)
 541                goto err_dma_ring;
 542
 543        htt->rx_ring.paddrs_ring = vaddr;
 544        htt->rx_ring.base_paddr = paddr;
 545
 546        vaddr = dma_alloc_coherent(htt->ar->dev,
 547                                   sizeof(*htt->rx_ring.alloc_idx.vaddr),
 548                                   &paddr, GFP_KERNEL);
 549        if (!vaddr)
 550                goto err_dma_idx;
 551
 552        htt->rx_ring.alloc_idx.vaddr = vaddr;
 553        htt->rx_ring.alloc_idx.paddr = paddr;
 554        htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
 555        *htt->rx_ring.alloc_idx.vaddr = 0;
 556
 557        /* Initialize the Rx refill retry timer */
 558        setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
 559
 560        spin_lock_init(&htt->rx_ring.lock);
 561
 562        htt->rx_ring.fill_cnt = 0;
 563        htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 564        hash_init(htt->rx_ring.skb_table);
 565
 566        tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
 567                     (unsigned long)htt);
 568
 569        skb_queue_head_init(&htt->tx_compl_q);
 570        skb_queue_head_init(&htt->rx_compl_q);
 571        skb_queue_head_init(&htt->rx_in_ord_compl_q);
 572
 573        tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
 574                     (unsigned long)htt);
 575
 576        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
 577                   htt->rx_ring.size, htt->rx_ring.fill_level);
 578        return 0;
 579
 580err_dma_idx:
 581        dma_free_coherent(htt->ar->dev,
 582                          (htt->rx_ring.size *
 583                           sizeof(htt->rx_ring.paddrs_ring)),
 584                          htt->rx_ring.paddrs_ring,
 585                          htt->rx_ring.base_paddr);
 586err_dma_ring:
 587        kfree(htt->rx_ring.netbufs_ring);
 588err_netbuf:
 589        return -ENOMEM;
 590}
 591
 592static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
 593                                          enum htt_rx_mpdu_encrypt_type type)
 594{
 595        switch (type) {
 596        case HTT_RX_MPDU_ENCRYPT_NONE:
 597                return 0;
 598        case HTT_RX_MPDU_ENCRYPT_WEP40:
 599        case HTT_RX_MPDU_ENCRYPT_WEP104:
 600                return IEEE80211_WEP_IV_LEN;
 601        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 602        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 603                return IEEE80211_TKIP_IV_LEN;
 604        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 605                return IEEE80211_CCMP_HDR_LEN;
 606        case HTT_RX_MPDU_ENCRYPT_WEP128:
 607        case HTT_RX_MPDU_ENCRYPT_WAPI:
 608                break;
 609        }
 610
 611        ath10k_warn(ar, "unsupported encryption type %d\n", type);
 612        return 0;
 613}
 614
 615#define MICHAEL_MIC_LEN 8
 616
 617static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
 618                                         enum htt_rx_mpdu_encrypt_type type)
 619{
 620        switch (type) {
 621        case HTT_RX_MPDU_ENCRYPT_NONE:
 622                return 0;
 623        case HTT_RX_MPDU_ENCRYPT_WEP40:
 624        case HTT_RX_MPDU_ENCRYPT_WEP104:
 625                return IEEE80211_WEP_ICV_LEN;
 626        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 627        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 628                return IEEE80211_TKIP_ICV_LEN;
 629        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 630                return IEEE80211_CCMP_MIC_LEN;
 631        case HTT_RX_MPDU_ENCRYPT_WEP128:
 632        case HTT_RX_MPDU_ENCRYPT_WAPI:
 633                break;
 634        }
 635
 636        ath10k_warn(ar, "unsupported encryption type %d\n", type);
 637        return 0;
 638}
 639
 640struct amsdu_subframe_hdr {
 641        u8 dst[ETH_ALEN];
 642        u8 src[ETH_ALEN];
 643        __be16 len;
 644} __packed;
 645
 646#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
 647
 648static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 649                                  struct ieee80211_rx_status *status,
 650                                  struct htt_rx_desc *rxd)
 651{
 652        struct ieee80211_supported_band *sband;
 653        u8 cck, rate, bw, sgi, mcs, nss;
 654        u8 preamble = 0;
 655        u8 group_id;
 656        u32 info1, info2, info3;
 657
 658        info1 = __le32_to_cpu(rxd->ppdu_start.info1);
 659        info2 = __le32_to_cpu(rxd->ppdu_start.info2);
 660        info3 = __le32_to_cpu(rxd->ppdu_start.info3);
 661
 662        preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
 663
 664        switch (preamble) {
 665        case HTT_RX_LEGACY:
 666                /* To get legacy rate index band is required. Since band can't
 667                 * be undefined check if freq is non-zero.
 668                 */
 669                if (!status->freq)
 670                        return;
 671
 672                cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
 673                rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
 674                rate &= ~RX_PPDU_START_RATE_FLAG;
 675
 676                sband = &ar->mac.sbands[status->band];
 677                status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
 678                break;
 679        case HTT_RX_HT:
 680        case HTT_RX_HT_WITH_TXBF:
 681                /* HT-SIG - Table 20-11 in info2 and info3 */
 682                mcs = info2 & 0x1F;
 683                nss = mcs >> 3;
 684                bw = (info2 >> 7) & 1;
 685                sgi = (info3 >> 7) & 1;
 686
 687                status->rate_idx = mcs;
 688                status->flag |= RX_FLAG_HT;
 689                if (sgi)
 690                        status->flag |= RX_FLAG_SHORT_GI;
 691                if (bw)
 692                        status->flag |= RX_FLAG_40MHZ;
 693                break;
 694        case HTT_RX_VHT:
 695        case HTT_RX_VHT_WITH_TXBF:
 696                /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
 697                   TODO check this */
 698                bw = info2 & 3;
 699                sgi = info3 & 1;
 700                group_id = (info2 >> 4) & 0x3F;
 701
 702                if (GROUP_ID_IS_SU_MIMO(group_id)) {
 703                        mcs = (info3 >> 4) & 0x0F;
 704                        nss = ((info2 >> 10) & 0x07) + 1;
 705                } else {
 706                        /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
 707                         * so it's impossible to decode MCS. Also since
 708                         * firmware consumes Group Id Management frames host
 709                         * has no knowledge regarding group/user position
 710                         * mapping so it's impossible to pick the correct Nsts
 711                         * from VHT-SIG-A1.
 712                         *
 713                         * Bandwidth and SGI are valid so report the rateinfo
 714                         * on best-effort basis.
 715                         */
 716                        mcs = 0;
 717                        nss = 1;
 718                }
 719
 720                if (mcs > 0x09) {
 721                        ath10k_warn(ar, "invalid MCS received %u\n", mcs);
 722                        ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
 723                                    __le32_to_cpu(rxd->attention.flags),
 724                                    __le32_to_cpu(rxd->mpdu_start.info0),
 725                                    __le32_to_cpu(rxd->mpdu_start.info1),
 726                                    __le32_to_cpu(rxd->msdu_start.common.info0),
 727                                    __le32_to_cpu(rxd->msdu_start.common.info1),
 728                                    rxd->ppdu_start.info0,
 729                                    __le32_to_cpu(rxd->ppdu_start.info1),
 730                                    __le32_to_cpu(rxd->ppdu_start.info2),
 731                                    __le32_to_cpu(rxd->ppdu_start.info3),
 732                                    __le32_to_cpu(rxd->ppdu_start.info4));
 733
 734                        ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
 735                                    __le32_to_cpu(rxd->msdu_end.common.info0),
 736                                    __le32_to_cpu(rxd->mpdu_end.info0));
 737
 738                        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
 739                                        "rx desc msdu payload: ",
 740                                        rxd->msdu_payload, 50);
 741                }
 742
 743                status->rate_idx = mcs;
 744                status->vht_nss = nss;
 745
 746                if (sgi)
 747                        status->flag |= RX_FLAG_SHORT_GI;
 748
 749                switch (bw) {
 750                /* 20MHZ */
 751                case 0:
 752                        break;
 753                /* 40MHZ */
 754                case 1:
 755                        status->flag |= RX_FLAG_40MHZ;
 756                        break;
 757                /* 80MHZ */
 758                case 2:
 759                        status->vht_flag |= RX_VHT_FLAG_80MHZ;
 760                }
 761
 762                status->flag |= RX_FLAG_VHT;
 763                break;
 764        default:
 765                break;
 766        }
 767}
 768
 769static struct ieee80211_channel *
 770ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
 771{
 772        struct ath10k_peer *peer;
 773        struct ath10k_vif *arvif;
 774        struct cfg80211_chan_def def;
 775        u16 peer_id;
 776
 777        lockdep_assert_held(&ar->data_lock);
 778
 779        if (!rxd)
 780                return NULL;
 781
 782        if (rxd->attention.flags &
 783            __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
 784                return NULL;
 785
 786        if (!(rxd->msdu_end.common.info0 &
 787              __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
 788                return NULL;
 789
 790        peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 791                     RX_MPDU_START_INFO0_PEER_IDX);
 792
 793        peer = ath10k_peer_find_by_id(ar, peer_id);
 794        if (!peer)
 795                return NULL;
 796
 797        arvif = ath10k_get_arvif(ar, peer->vdev_id);
 798        if (WARN_ON_ONCE(!arvif))
 799                return NULL;
 800
 801        if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
 802                return NULL;
 803
 804        return def.chan;
 805}
 806
 807static struct ieee80211_channel *
 808ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
 809{
 810        struct ath10k_vif *arvif;
 811        struct cfg80211_chan_def def;
 812
 813        lockdep_assert_held(&ar->data_lock);
 814
 815        list_for_each_entry(arvif, &ar->arvifs, list) {
 816                if (arvif->vdev_id == vdev_id &&
 817                    ath10k_mac_vif_chan(arvif->vif, &def) == 0)
 818                        return def.chan;
 819        }
 820
 821        return NULL;
 822}
 823
 824static void
 825ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
 826                              struct ieee80211_chanctx_conf *conf,
 827                              void *data)
 828{
 829        struct cfg80211_chan_def *def = data;
 830
 831        *def = conf->def;
 832}
 833
 834static struct ieee80211_channel *
 835ath10k_htt_rx_h_any_channel(struct ath10k *ar)
 836{
 837        struct cfg80211_chan_def def = {};
 838
 839        ieee80211_iter_chan_contexts_atomic(ar->hw,
 840                                            ath10k_htt_rx_h_any_chan_iter,
 841                                            &def);
 842
 843        return def.chan;
 844}
 845
 846static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
 847                                    struct ieee80211_rx_status *status,
 848                                    struct htt_rx_desc *rxd,
 849                                    u32 vdev_id)
 850{
 851        struct ieee80211_channel *ch;
 852
 853        spin_lock_bh(&ar->data_lock);
 854        ch = ar->scan_channel;
 855        if (!ch)
 856                ch = ar->rx_channel;
 857        if (!ch)
 858                ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
 859        if (!ch)
 860                ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
 861        if (!ch)
 862                ch = ath10k_htt_rx_h_any_channel(ar);
 863        spin_unlock_bh(&ar->data_lock);
 864
 865        if (!ch)
 866                return false;
 867
 868        status->band = ch->band;
 869        status->freq = ch->center_freq;
 870
 871        return true;
 872}
 873
 874static void ath10k_htt_rx_h_signal(struct ath10k *ar,
 875                                   struct ieee80211_rx_status *status,
 876                                   struct htt_rx_desc *rxd)
 877{
 878        /* FIXME: Get real NF */
 879        status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
 880                         rxd->ppdu_start.rssi_comb;
 881        status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
 882}
 883
 884static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
 885                                    struct ieee80211_rx_status *status,
 886                                    struct htt_rx_desc *rxd)
 887{
 888        /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
 889         * means all prior MSDUs in a PPDU are reported to mac80211 without the
 890         * TSF. Is it worth holding frames until end of PPDU is known?
 891         *
 892         * FIXME: Can we get/compute 64bit TSF?
 893         */
 894        status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
 895        status->flag |= RX_FLAG_MACTIME_END;
 896}
 897
 898static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
 899                                 struct sk_buff_head *amsdu,
 900                                 struct ieee80211_rx_status *status,
 901                                 u32 vdev_id)
 902{
 903        struct sk_buff *first;
 904        struct htt_rx_desc *rxd;
 905        bool is_first_ppdu;
 906        bool is_last_ppdu;
 907
 908        if (skb_queue_empty(amsdu))
 909                return;
 910
 911        first = skb_peek(amsdu);
 912        rxd = (void *)first->data - sizeof(*rxd);
 913
 914        is_first_ppdu = !!(rxd->attention.flags &
 915                           __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
 916        is_last_ppdu = !!(rxd->attention.flags &
 917                          __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
 918
 919        if (is_first_ppdu) {
 920                /* New PPDU starts so clear out the old per-PPDU status. */
 921                status->freq = 0;
 922                status->rate_idx = 0;
 923                status->vht_nss = 0;
 924                status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
 925                status->flag &= ~(RX_FLAG_HT |
 926                                  RX_FLAG_VHT |
 927                                  RX_FLAG_SHORT_GI |
 928                                  RX_FLAG_40MHZ |
 929                                  RX_FLAG_MACTIME_END);
 930                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 931
 932                ath10k_htt_rx_h_signal(ar, status, rxd);
 933                ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
 934                ath10k_htt_rx_h_rates(ar, status, rxd);
 935        }
 936
 937        if (is_last_ppdu)
 938                ath10k_htt_rx_h_mactime(ar, status, rxd);
 939}
 940
 941static const char * const tid_to_ac[] = {
 942        "BE",
 943        "BK",
 944        "BK",
 945        "BE",
 946        "VI",
 947        "VI",
 948        "VO",
 949        "VO",
 950};
 951
 952static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
 953{
 954        u8 *qc;
 955        int tid;
 956
 957        if (!ieee80211_is_data_qos(hdr->frame_control))
 958                return "";
 959
 960        qc = ieee80211_get_qos_ctl(hdr);
 961        tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 962        if (tid < 8)
 963                snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
 964        else
 965                snprintf(out, size, "tid %d", tid);
 966
 967        return out;
 968}
 969
 970static void ath10k_process_rx(struct ath10k *ar,
 971                              struct ieee80211_rx_status *rx_status,
 972                              struct sk_buff *skb)
 973{
 974        struct ieee80211_rx_status *status;
 975        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 976        char tid[32];
 977
 978        status = IEEE80211_SKB_RXCB(skb);
 979        *status = *rx_status;
 980
 981        ath10k_dbg(ar, ATH10K_DBG_DATA,
 982                   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
 983                   skb,
 984                   skb->len,
 985                   ieee80211_get_SA(hdr),
 986                   ath10k_get_tid(hdr, tid, sizeof(tid)),
 987                   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
 988                                                        "mcast" : "ucast",
 989                   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
 990                   status->flag == 0 ? "legacy" : "",
 991                   status->flag & RX_FLAG_HT ? "ht" : "",
 992                   status->flag & RX_FLAG_VHT ? "vht" : "",
 993                   status->flag & RX_FLAG_40MHZ ? "40" : "",
 994                   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
 995                   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
 996                   status->rate_idx,
 997                   status->vht_nss,
 998                   status->freq,
 999                   status->band, status->flag,
1000                   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1001                   !!(status->flag & RX_FLAG_MMIC_ERROR),
1002                   !!(status->flag & RX_FLAG_AMSDU_MORE));
1003        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1004                        skb->data, skb->len);
1005        trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1006        trace_ath10k_rx_payload(ar, skb->data, skb->len);
1007
1008        ieee80211_rx(ar->hw, skb);
1009}
1010
1011static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1012                                      struct ieee80211_hdr *hdr)
1013{
1014        int len = ieee80211_hdrlen(hdr->frame_control);
1015
1016        if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1017                      ar->fw_features))
1018                len = round_up(len, 4);
1019
1020        return len;
1021}
1022
1023static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1024                                        struct sk_buff *msdu,
1025                                        struct ieee80211_rx_status *status,
1026                                        enum htt_rx_mpdu_encrypt_type enctype,
1027                                        bool is_decrypted)
1028{
1029        struct ieee80211_hdr *hdr;
1030        struct htt_rx_desc *rxd;
1031        size_t hdr_len;
1032        size_t crypto_len;
1033        bool is_first;
1034        bool is_last;
1035
1036        rxd = (void *)msdu->data - sizeof(*rxd);
1037        is_first = !!(rxd->msdu_end.common.info0 &
1038                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1039        is_last = !!(rxd->msdu_end.common.info0 &
1040                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1041
1042        /* Delivered decapped frame:
1043         * [802.11 header]
1044         * [crypto param] <-- can be trimmed if !fcs_err &&
1045         *                    !decrypt_err && !peer_idx_invalid
1046         * [amsdu header] <-- only if A-MSDU
1047         * [rfc1042/llc]
1048         * [payload]
1049         * [FCS] <-- at end, needs to be trimmed
1050         */
1051
1052        /* This probably shouldn't happen but warn just in case */
1053        if (unlikely(WARN_ON_ONCE(!is_first)))
1054                return;
1055
1056        /* This probably shouldn't happen but warn just in case */
1057        if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1058                return;
1059
1060        skb_trim(msdu, msdu->len - FCS_LEN);
1061
1062        /* In most cases this will be true for sniffed frames. It makes sense
1063         * to deliver them as-is without stripping the crypto param. This is
1064         * necessary for software based decryption.
1065         *
1066         * If there's no error then the frame is decrypted. At least that is
1067         * the case for frames that come in via fragmented rx indication.
1068         */
1069        if (!is_decrypted)
1070                return;
1071
1072        /* The payload is decrypted so strip crypto params. Start from tail
1073         * since hdr is used to compute some stuff.
1074         */
1075
1076        hdr = (void *)msdu->data;
1077
1078        /* Tail */
1079        skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
1080
1081        /* MMIC */
1082        if (!ieee80211_has_morefrags(hdr->frame_control) &&
1083            enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1084                skb_trim(msdu, msdu->len - 8);
1085
1086        /* Head */
1087        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1088        crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1089
1090        memmove((void *)msdu->data + crypto_len,
1091                (void *)msdu->data, hdr_len);
1092        skb_pull(msdu, crypto_len);
1093}
1094
1095static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1096                                          struct sk_buff *msdu,
1097                                          struct ieee80211_rx_status *status,
1098                                          const u8 first_hdr[64])
1099{
1100        struct ieee80211_hdr *hdr;
1101        size_t hdr_len;
1102        u8 da[ETH_ALEN];
1103        u8 sa[ETH_ALEN];
1104
1105        /* Delivered decapped frame:
1106         * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1107         * [rfc1042/llc]
1108         *
1109         * Note: The nwifi header doesn't have QoS Control and is
1110         * (always?) a 3addr frame.
1111         *
1112         * Note2: There's no A-MSDU subframe header. Even if it's part
1113         * of an A-MSDU.
1114         */
1115
1116        /* pull decapped header and copy SA & DA */
1117        if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
1118            ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
1119                /* The QCA99X0 4 address mode pad 2 bytes at the
1120                 * beginning of MSDU
1121                 */
1122                hdr = (struct ieee80211_hdr *)(msdu->data + 2);
1123                /* The skb length need be extended 2 as the 2 bytes at the tail
1124                 * be excluded due to the padding
1125                 */
1126                skb_put(msdu, 2);
1127        } else {
1128                hdr = (struct ieee80211_hdr *)(msdu->data);
1129        }
1130
1131        hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1132        ether_addr_copy(da, ieee80211_get_DA(hdr));
1133        ether_addr_copy(sa, ieee80211_get_SA(hdr));
1134        skb_pull(msdu, hdr_len);
1135
1136        /* push original 802.11 header */
1137        hdr = (struct ieee80211_hdr *)first_hdr;
1138        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1139        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1140
1141        /* original 802.11 header has a different DA and in
1142         * case of 4addr it may also have different SA
1143         */
1144        hdr = (struct ieee80211_hdr *)msdu->data;
1145        ether_addr_copy(ieee80211_get_DA(hdr), da);
1146        ether_addr_copy(ieee80211_get_SA(hdr), sa);
1147}
1148
1149static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1150                                          struct sk_buff *msdu,
1151                                          enum htt_rx_mpdu_encrypt_type enctype)
1152{
1153        struct ieee80211_hdr *hdr;
1154        struct htt_rx_desc *rxd;
1155        size_t hdr_len, crypto_len;
1156        void *rfc1042;
1157        bool is_first, is_last, is_amsdu;
1158
1159        rxd = (void *)msdu->data - sizeof(*rxd);
1160        hdr = (void *)rxd->rx_hdr_status;
1161
1162        is_first = !!(rxd->msdu_end.common.info0 &
1163                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1164        is_last = !!(rxd->msdu_end.common.info0 &
1165                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1166        is_amsdu = !(is_first && is_last);
1167
1168        rfc1042 = hdr;
1169
1170        if (is_first) {
1171                hdr_len = ieee80211_hdrlen(hdr->frame_control);
1172                crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1173
1174                rfc1042 += round_up(hdr_len, 4) +
1175                           round_up(crypto_len, 4);
1176        }
1177
1178        if (is_amsdu)
1179                rfc1042 += sizeof(struct amsdu_subframe_hdr);
1180
1181        return rfc1042;
1182}
1183
1184static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1185                                        struct sk_buff *msdu,
1186                                        struct ieee80211_rx_status *status,
1187                                        const u8 first_hdr[64],
1188                                        enum htt_rx_mpdu_encrypt_type enctype)
1189{
1190        struct ieee80211_hdr *hdr;
1191        struct ethhdr *eth;
1192        size_t hdr_len;
1193        void *rfc1042;
1194        u8 da[ETH_ALEN];
1195        u8 sa[ETH_ALEN];
1196
1197        /* Delivered decapped frame:
1198         * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1199         * [payload]
1200         */
1201
1202        rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1203        if (WARN_ON_ONCE(!rfc1042))
1204                return;
1205
1206        /* pull decapped header and copy SA & DA */
1207        eth = (struct ethhdr *)msdu->data;
1208        ether_addr_copy(da, eth->h_dest);
1209        ether_addr_copy(sa, eth->h_source);
1210        skb_pull(msdu, sizeof(struct ethhdr));
1211
1212        /* push rfc1042/llc/snap */
1213        memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1214               sizeof(struct rfc1042_hdr));
1215
1216        /* push original 802.11 header */
1217        hdr = (struct ieee80211_hdr *)first_hdr;
1218        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1219        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1220
1221        /* original 802.11 header has a different DA and in
1222         * case of 4addr it may also have different SA
1223         */
1224        hdr = (struct ieee80211_hdr *)msdu->data;
1225        ether_addr_copy(ieee80211_get_DA(hdr), da);
1226        ether_addr_copy(ieee80211_get_SA(hdr), sa);
1227}
1228
1229static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1230                                         struct sk_buff *msdu,
1231                                         struct ieee80211_rx_status *status,
1232                                         const u8 first_hdr[64])
1233{
1234        struct ieee80211_hdr *hdr;
1235        size_t hdr_len;
1236
1237        /* Delivered decapped frame:
1238         * [amsdu header] <-- replaced with 802.11 hdr
1239         * [rfc1042/llc]
1240         * [payload]
1241         */
1242
1243        skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
1244
1245        hdr = (struct ieee80211_hdr *)first_hdr;
1246        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1247        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1248}
1249
1250static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1251                                    struct sk_buff *msdu,
1252                                    struct ieee80211_rx_status *status,
1253                                    u8 first_hdr[64],
1254                                    enum htt_rx_mpdu_encrypt_type enctype,
1255                                    bool is_decrypted)
1256{
1257        struct htt_rx_desc *rxd;
1258        enum rx_msdu_decap_format decap;
1259
1260        /* First msdu's decapped header:
1261         * [802.11 header] <-- padded to 4 bytes long
1262         * [crypto param] <-- padded to 4 bytes long
1263         * [amsdu header] <-- only if A-MSDU
1264         * [rfc1042/llc]
1265         *
1266         * Other (2nd, 3rd, ..) msdu's decapped header:
1267         * [amsdu header] <-- only if A-MSDU
1268         * [rfc1042/llc]
1269         */
1270
1271        rxd = (void *)msdu->data - sizeof(*rxd);
1272        decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1273                   RX_MSDU_START_INFO1_DECAP_FORMAT);
1274
1275        switch (decap) {
1276        case RX_MSDU_DECAP_RAW:
1277                ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1278                                            is_decrypted);
1279                break;
1280        case RX_MSDU_DECAP_NATIVE_WIFI:
1281                ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
1282                break;
1283        case RX_MSDU_DECAP_ETHERNET2_DIX:
1284                ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1285                break;
1286        case RX_MSDU_DECAP_8023_SNAP_LLC:
1287                ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
1288                break;
1289        }
1290}
1291
1292static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1293{
1294        struct htt_rx_desc *rxd;
1295        u32 flags, info;
1296        bool is_ip4, is_ip6;
1297        bool is_tcp, is_udp;
1298        bool ip_csum_ok, tcpudp_csum_ok;
1299
1300        rxd = (void *)skb->data - sizeof(*rxd);
1301        flags = __le32_to_cpu(rxd->attention.flags);
1302        info = __le32_to_cpu(rxd->msdu_start.common.info1);
1303
1304        is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1305        is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1306        is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1307        is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1308        ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1309        tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1310
1311        if (!is_ip4 && !is_ip6)
1312                return CHECKSUM_NONE;
1313        if (!is_tcp && !is_udp)
1314                return CHECKSUM_NONE;
1315        if (!ip_csum_ok)
1316                return CHECKSUM_NONE;
1317        if (!tcpudp_csum_ok)
1318                return CHECKSUM_NONE;
1319
1320        return CHECKSUM_UNNECESSARY;
1321}
1322
1323static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1324{
1325        msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1326}
1327
1328static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1329                                 struct sk_buff_head *amsdu,
1330                                 struct ieee80211_rx_status *status)
1331{
1332        struct sk_buff *first;
1333        struct sk_buff *last;
1334        struct sk_buff *msdu;
1335        struct htt_rx_desc *rxd;
1336        struct ieee80211_hdr *hdr;
1337        enum htt_rx_mpdu_encrypt_type enctype;
1338        u8 first_hdr[64];
1339        u8 *qos;
1340        size_t hdr_len;
1341        bool has_fcs_err;
1342        bool has_crypto_err;
1343        bool has_tkip_err;
1344        bool has_peer_idx_invalid;
1345        bool is_decrypted;
1346        u32 attention;
1347
1348        if (skb_queue_empty(amsdu))
1349                return;
1350
1351        first = skb_peek(amsdu);
1352        rxd = (void *)first->data - sizeof(*rxd);
1353
1354        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1355                     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1356
1357        /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1358         * decapped header. It'll be used for undecapping of each MSDU.
1359         */
1360        hdr = (void *)rxd->rx_hdr_status;
1361        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1362        memcpy(first_hdr, hdr, hdr_len);
1363
1364        /* Each A-MSDU subframe will use the original header as the base and be
1365         * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1366         */
1367        hdr = (void *)first_hdr;
1368        qos = ieee80211_get_qos_ctl(hdr);
1369        qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1370
1371        /* Some attention flags are valid only in the last MSDU. */
1372        last = skb_peek_tail(amsdu);
1373        rxd = (void *)last->data - sizeof(*rxd);
1374        attention = __le32_to_cpu(rxd->attention.flags);
1375
1376        has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1377        has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1378        has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1379        has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1380
1381        /* Note: If hardware captures an encrypted frame that it can't decrypt,
1382         * e.g. due to fcs error, missing peer or invalid key data it will
1383         * report the frame as raw.
1384         */
1385        is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1386                        !has_fcs_err &&
1387                        !has_crypto_err &&
1388                        !has_peer_idx_invalid);
1389
1390        /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1391        status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1392                          RX_FLAG_MMIC_ERROR |
1393                          RX_FLAG_DECRYPTED |
1394                          RX_FLAG_IV_STRIPPED |
1395                          RX_FLAG_MMIC_STRIPPED);
1396
1397        if (has_fcs_err)
1398                status->flag |= RX_FLAG_FAILED_FCS_CRC;
1399
1400        if (has_tkip_err)
1401                status->flag |= RX_FLAG_MMIC_ERROR;
1402
1403        if (is_decrypted)
1404                status->flag |= RX_FLAG_DECRYPTED |
1405                                RX_FLAG_IV_STRIPPED |
1406                                RX_FLAG_MMIC_STRIPPED;
1407
1408        skb_queue_walk(amsdu, msdu) {
1409                ath10k_htt_rx_h_csum_offload(msdu);
1410                ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1411                                        is_decrypted);
1412
1413                /* Undecapping involves copying the original 802.11 header back
1414                 * to sk_buff. If frame is protected and hardware has decrypted
1415                 * it then remove the protected bit.
1416                 */
1417                if (!is_decrypted)
1418                        continue;
1419
1420                hdr = (void *)msdu->data;
1421                hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1422        }
1423}
1424
1425static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1426                                    struct sk_buff_head *amsdu,
1427                                    struct ieee80211_rx_status *status)
1428{
1429        struct sk_buff *msdu;
1430
1431        while ((msdu = __skb_dequeue(amsdu))) {
1432                /* Setup per-MSDU flags */
1433                if (skb_queue_empty(amsdu))
1434                        status->flag &= ~RX_FLAG_AMSDU_MORE;
1435                else
1436                        status->flag |= RX_FLAG_AMSDU_MORE;
1437
1438                ath10k_process_rx(ar, status, msdu);
1439        }
1440}
1441
1442static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
1443{
1444        struct sk_buff *skb, *first;
1445        int space;
1446        int total_len = 0;
1447
1448        /* TODO:  Might could optimize this by using
1449         * skb_try_coalesce or similar method to
1450         * decrease copying, or maybe get mac80211 to
1451         * provide a way to just receive a list of
1452         * skb?
1453         */
1454
1455        first = __skb_dequeue(amsdu);
1456
1457        /* Allocate total length all at once. */
1458        skb_queue_walk(amsdu, skb)
1459                total_len += skb->len;
1460
1461        space = total_len - skb_tailroom(first);
1462        if ((space > 0) &&
1463            (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1464                /* TODO:  bump some rx-oom error stat */
1465                /* put it back together so we can free the
1466                 * whole list at once.
1467                 */
1468                __skb_queue_head(amsdu, first);
1469                return -1;
1470        }
1471
1472        /* Walk list again, copying contents into
1473         * msdu_head
1474         */
1475        while ((skb = __skb_dequeue(amsdu))) {
1476                skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1477                                          skb->len);
1478                dev_kfree_skb_any(skb);
1479        }
1480
1481        __skb_queue_head(amsdu, first);
1482        return 0;
1483}
1484
1485static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1486                                    struct sk_buff_head *amsdu,
1487                                    bool chained)
1488{
1489        struct sk_buff *first;
1490        struct htt_rx_desc *rxd;
1491        enum rx_msdu_decap_format decap;
1492
1493        first = skb_peek(amsdu);
1494        rxd = (void *)first->data - sizeof(*rxd);
1495        decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1496                   RX_MSDU_START_INFO1_DECAP_FORMAT);
1497
1498        if (!chained)
1499                return;
1500
1501        /* FIXME: Current unchaining logic can only handle simple case of raw
1502         * msdu chaining. If decapping is other than raw the chaining may be
1503         * more complex and this isn't handled by the current code. Don't even
1504         * try re-constructing such frames - it'll be pretty much garbage.
1505         */
1506        if (decap != RX_MSDU_DECAP_RAW ||
1507            skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1508                __skb_queue_purge(amsdu);
1509                return;
1510        }
1511
1512        ath10k_unchain_msdu(amsdu);
1513}
1514
1515static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1516                                        struct sk_buff_head *amsdu,
1517                                        struct ieee80211_rx_status *rx_status)
1518{
1519        struct sk_buff *msdu;
1520        struct htt_rx_desc *rxd;
1521        bool is_mgmt;
1522        bool has_fcs_err;
1523
1524        msdu = skb_peek(amsdu);
1525        rxd = (void *)msdu->data - sizeof(*rxd);
1526
1527        /* FIXME: It might be a good idea to do some fuzzy-testing to drop
1528         * invalid/dangerous frames.
1529         */
1530
1531        if (!rx_status->freq) {
1532                ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
1533                return false;
1534        }
1535
1536        is_mgmt = !!(rxd->attention.flags &
1537                     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1538        has_fcs_err = !!(rxd->attention.flags &
1539                         __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
1540
1541        /* Management frames are handled via WMI events. The pros of such
1542         * approach is that channel is explicitly provided in WMI events
1543         * whereas HTT doesn't provide channel information for Rxed frames.
1544         *
1545         * However some firmware revisions don't report corrupted frames via
1546         * WMI so don't drop them.
1547         */
1548        if (is_mgmt && !has_fcs_err) {
1549                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1550                return false;
1551        }
1552
1553        if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1554                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1555                return false;
1556        }
1557
1558        return true;
1559}
1560
1561static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1562                                   struct sk_buff_head *amsdu,
1563                                   struct ieee80211_rx_status *rx_status)
1564{
1565        if (skb_queue_empty(amsdu))
1566                return;
1567
1568        if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1569                return;
1570
1571        __skb_queue_purge(amsdu);
1572}
1573
1574static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
1575                                  struct htt_rx_indication *rx)
1576{
1577        struct ath10k *ar = htt->ar;
1578        struct ieee80211_rx_status *rx_status = &htt->rx_status;
1579        struct htt_rx_indication_mpdu_range *mpdu_ranges;
1580        struct sk_buff_head amsdu;
1581        int num_mpdu_ranges;
1582        int fw_desc_len;
1583        u8 *fw_desc;
1584        int i, ret, mpdu_count = 0;
1585
1586        lockdep_assert_held(&htt->rx_ring.lock);
1587
1588        if (htt->rx_confused)
1589                return;
1590
1591        fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
1592        fw_desc = (u8 *)&rx->fw_desc;
1593
1594        num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1595                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1596        mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1597
1598        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1599                        rx, sizeof(*rx) +
1600                        (sizeof(struct htt_rx_indication_mpdu_range) *
1601                                num_mpdu_ranges));
1602
1603        for (i = 0; i < num_mpdu_ranges; i++)
1604                mpdu_count += mpdu_ranges[i].mpdu_count;
1605
1606        while (mpdu_count--) {
1607                __skb_queue_head_init(&amsdu);
1608                ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
1609                                              &fw_desc_len, &amsdu);
1610                if (ret < 0) {
1611                        ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1612                        __skb_queue_purge(&amsdu);
1613                        /* FIXME: It's probably a good idea to reboot the
1614                         * device instead of leaving it inoperable.
1615                         */
1616                        htt->rx_confused = true;
1617                        break;
1618                }
1619
1620                ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1621                ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
1622                ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1623                ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1624                ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1625        }
1626
1627        tasklet_schedule(&htt->rx_replenish_task);
1628}
1629
1630static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1631                                       struct htt_rx_fragment_indication *frag)
1632{
1633        struct ath10k *ar = htt->ar;
1634        struct ieee80211_rx_status *rx_status = &htt->rx_status;
1635        struct sk_buff_head amsdu;
1636        int ret;
1637        u8 *fw_desc;
1638        int fw_desc_len;
1639
1640        fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
1641        fw_desc = (u8 *)frag->fw_msdu_rx_desc;
1642
1643        __skb_queue_head_init(&amsdu);
1644
1645        spin_lock_bh(&htt->rx_ring.lock);
1646        ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1647                                      &amsdu);
1648        spin_unlock_bh(&htt->rx_ring.lock);
1649
1650        tasklet_schedule(&htt->rx_replenish_task);
1651
1652        ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1653
1654        if (ret) {
1655                ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1656                            ret);
1657                __skb_queue_purge(&amsdu);
1658                return;
1659        }
1660
1661        if (skb_queue_len(&amsdu) != 1) {
1662                ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
1663                __skb_queue_purge(&amsdu);
1664                return;
1665        }
1666
1667        ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1668        ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1669        ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
1670        ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1671
1672        if (fw_desc_len > 0) {
1673                ath10k_dbg(ar, ATH10K_DBG_HTT,
1674                           "expecting more fragmented rx in one indication %d\n",
1675                           fw_desc_len);
1676        }
1677}
1678
1679static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
1680                                       struct sk_buff *skb)
1681{
1682        struct ath10k_htt *htt = &ar->htt;
1683        struct htt_resp *resp = (struct htt_resp *)skb->data;
1684        struct htt_tx_done tx_done = {};
1685        int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1686        __le16 msdu_id;
1687        int i;
1688
1689        switch (status) {
1690        case HTT_DATA_TX_STATUS_NO_ACK:
1691                tx_done.no_ack = true;
1692                break;
1693        case HTT_DATA_TX_STATUS_OK:
1694                tx_done.success = true;
1695                break;
1696        case HTT_DATA_TX_STATUS_DISCARD:
1697        case HTT_DATA_TX_STATUS_POSTPONE:
1698        case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1699                tx_done.discard = true;
1700                break;
1701        default:
1702                ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1703                tx_done.discard = true;
1704                break;
1705        }
1706
1707        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1708                   resp->data_tx_completion.num_msdus);
1709
1710        for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1711                msdu_id = resp->data_tx_completion.msdus[i];
1712                tx_done.msdu_id = __le16_to_cpu(msdu_id);
1713                ath10k_txrx_tx_unref(htt, &tx_done);
1714        }
1715}
1716
1717static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1718{
1719        struct htt_rx_addba *ev = &resp->rx_addba;
1720        struct ath10k_peer *peer;
1721        struct ath10k_vif *arvif;
1722        u16 info0, tid, peer_id;
1723
1724        info0 = __le16_to_cpu(ev->info0);
1725        tid = MS(info0, HTT_RX_BA_INFO0_TID);
1726        peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1727
1728        ath10k_dbg(ar, ATH10K_DBG_HTT,
1729                   "htt rx addba tid %hu peer_id %hu size %hhu\n",
1730                   tid, peer_id, ev->window_size);
1731
1732        spin_lock_bh(&ar->data_lock);
1733        peer = ath10k_peer_find_by_id(ar, peer_id);
1734        if (!peer) {
1735                ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1736                            peer_id);
1737                spin_unlock_bh(&ar->data_lock);
1738                return;
1739        }
1740
1741        arvif = ath10k_get_arvif(ar, peer->vdev_id);
1742        if (!arvif) {
1743                ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1744                            peer->vdev_id);
1745                spin_unlock_bh(&ar->data_lock);
1746                return;
1747        }
1748
1749        ath10k_dbg(ar, ATH10K_DBG_HTT,
1750                   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
1751                   peer->addr, tid, ev->window_size);
1752
1753        ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1754        spin_unlock_bh(&ar->data_lock);
1755}
1756
1757static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1758{
1759        struct htt_rx_delba *ev = &resp->rx_delba;
1760        struct ath10k_peer *peer;
1761        struct ath10k_vif *arvif;
1762        u16 info0, tid, peer_id;
1763
1764        info0 = __le16_to_cpu(ev->info0);
1765        tid = MS(info0, HTT_RX_BA_INFO0_TID);
1766        peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
1767
1768        ath10k_dbg(ar, ATH10K_DBG_HTT,
1769                   "htt rx delba tid %hu peer_id %hu\n",
1770                   tid, peer_id);
1771
1772        spin_lock_bh(&ar->data_lock);
1773        peer = ath10k_peer_find_by_id(ar, peer_id);
1774        if (!peer) {
1775                ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1776                            peer_id);
1777                spin_unlock_bh(&ar->data_lock);
1778                return;
1779        }
1780
1781        arvif = ath10k_get_arvif(ar, peer->vdev_id);
1782        if (!arvif) {
1783                ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1784                            peer->vdev_id);
1785                spin_unlock_bh(&ar->data_lock);
1786                return;
1787        }
1788
1789        ath10k_dbg(ar, ATH10K_DBG_HTT,
1790                   "htt rx stop rx ba session sta %pM tid %hu\n",
1791                   peer->addr, tid);
1792
1793        ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
1794        spin_unlock_bh(&ar->data_lock);
1795}
1796
1797static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1798                                       struct sk_buff_head *amsdu)
1799{
1800        struct sk_buff *msdu;
1801        struct htt_rx_desc *rxd;
1802
1803        if (skb_queue_empty(list))
1804                return -ENOBUFS;
1805
1806        if (WARN_ON(!skb_queue_empty(amsdu)))
1807                return -EINVAL;
1808
1809        while ((msdu = __skb_dequeue(list))) {
1810                __skb_queue_tail(amsdu, msdu);
1811
1812                rxd = (void *)msdu->data - sizeof(*rxd);
1813                if (rxd->msdu_end.common.info0 &
1814                    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1815                        break;
1816        }
1817
1818        msdu = skb_peek_tail(amsdu);
1819        rxd = (void *)msdu->data - sizeof(*rxd);
1820        if (!(rxd->msdu_end.common.info0 &
1821              __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1822                skb_queue_splice_init(amsdu, list);
1823                return -EAGAIN;
1824        }
1825
1826        return 0;
1827}
1828
1829static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1830                                            struct sk_buff *skb)
1831{
1832        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1833
1834        if (!ieee80211_has_protected(hdr->frame_control))
1835                return;
1836
1837        /* Offloaded frames are already decrypted but firmware insists they are
1838         * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
1839         * will drop the frame.
1840         */
1841
1842        hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1843        status->flag |= RX_FLAG_DECRYPTED |
1844                        RX_FLAG_IV_STRIPPED |
1845                        RX_FLAG_MMIC_STRIPPED;
1846}
1847
1848static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1849                                       struct sk_buff_head *list)
1850{
1851        struct ath10k_htt *htt = &ar->htt;
1852        struct ieee80211_rx_status *status = &htt->rx_status;
1853        struct htt_rx_offload_msdu *rx;
1854        struct sk_buff *msdu;
1855        size_t offset;
1856
1857        while ((msdu = __skb_dequeue(list))) {
1858                /* Offloaded frames don't have Rx descriptor. Instead they have
1859                 * a short meta information header.
1860                 */
1861
1862                rx = (void *)msdu->data;
1863
1864                skb_put(msdu, sizeof(*rx));
1865                skb_pull(msdu, sizeof(*rx));
1866
1867                if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1868                        ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1869                        dev_kfree_skb_any(msdu);
1870                        continue;
1871                }
1872
1873                skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1874
1875                /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1876                 * actual payload is unaligned. Align the frame.  Otherwise
1877                 * mac80211 complains.  This shouldn't reduce performance much
1878                 * because these offloaded frames are rare.
1879                 */
1880                offset = 4 - ((unsigned long)msdu->data & 3);
1881                skb_put(msdu, offset);
1882                memmove(msdu->data + offset, msdu->data, msdu->len);
1883                skb_pull(msdu, offset);
1884
1885                /* FIXME: The frame is NWifi. Re-construct QoS Control
1886                 * if possible later.
1887                 */
1888
1889                memset(status, 0, sizeof(*status));
1890                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1891
1892                ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1893                ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
1894                ath10k_process_rx(ar, status, msdu);
1895        }
1896}
1897
1898static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1899{
1900        struct ath10k_htt *htt = &ar->htt;
1901        struct htt_resp *resp = (void *)skb->data;
1902        struct ieee80211_rx_status *status = &htt->rx_status;
1903        struct sk_buff_head list;
1904        struct sk_buff_head amsdu;
1905        u16 peer_id;
1906        u16 msdu_count;
1907        u8 vdev_id;
1908        u8 tid;
1909        bool offload;
1910        bool frag;
1911        int ret;
1912
1913        lockdep_assert_held(&htt->rx_ring.lock);
1914
1915        if (htt->rx_confused)
1916                return;
1917
1918        skb_pull(skb, sizeof(resp->hdr));
1919        skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1920
1921        peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1922        msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1923        vdev_id = resp->rx_in_ord_ind.vdev_id;
1924        tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1925        offload = !!(resp->rx_in_ord_ind.info &
1926                        HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1927        frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1928
1929        ath10k_dbg(ar, ATH10K_DBG_HTT,
1930                   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1931                   vdev_id, peer_id, tid, offload, frag, msdu_count);
1932
1933        if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1934                ath10k_warn(ar, "dropping invalid in order rx indication\n");
1935                return;
1936        }
1937
1938        /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1939         * extracted and processed.
1940         */
1941        __skb_queue_head_init(&list);
1942        ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1943        if (ret < 0) {
1944                ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1945                htt->rx_confused = true;
1946                return;
1947        }
1948
1949        /* Offloaded frames are very different and need to be handled
1950         * separately.
1951         */
1952        if (offload)
1953                ath10k_htt_rx_h_rx_offload(ar, &list);
1954
1955        while (!skb_queue_empty(&list)) {
1956                __skb_queue_head_init(&amsdu);
1957                ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1958                switch (ret) {
1959                case 0:
1960                        /* Note: The in-order indication may report interleaved
1961                         * frames from different PPDUs meaning reported rx rate
1962                         * to mac80211 isn't accurate/reliable. It's still
1963                         * better to report something than nothing though. This
1964                         * should still give an idea about rx rate to the user.
1965                         */
1966                        ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1967                        ath10k_htt_rx_h_filter(ar, &amsdu, status);
1968                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1969                        ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1970                        break;
1971                case -EAGAIN:
1972                        /* fall through */
1973                default:
1974                        /* Should not happen. */
1975                        ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1976                        htt->rx_confused = true;
1977                        __skb_queue_purge(&list);
1978                        return;
1979                }
1980        }
1981
1982        tasklet_schedule(&htt->rx_replenish_task);
1983}
1984
1985void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1986{
1987        struct ath10k_htt *htt = &ar->htt;
1988        struct htt_resp *resp = (struct htt_resp *)skb->data;
1989        enum htt_t2h_msg_type type;
1990
1991        /* confirm alignment */
1992        if (!IS_ALIGNED((unsigned long)skb->data, 4))
1993                ath10k_warn(ar, "unaligned htt message, expect trouble\n");
1994
1995        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1996                   resp->hdr.msg_type);
1997
1998        if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
1999                ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2000                           resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2001                dev_kfree_skb_any(skb);
2002                return;
2003        }
2004        type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2005
2006        switch (type) {
2007        case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2008                htt->target_version_major = resp->ver_resp.major;
2009                htt->target_version_minor = resp->ver_resp.minor;
2010                complete(&htt->target_version_received);
2011                break;
2012        }
2013        case HTT_T2H_MSG_TYPE_RX_IND:
2014                skb_queue_tail(&htt->rx_compl_q, skb);
2015                tasklet_schedule(&htt->txrx_compl_task);
2016                return;
2017        case HTT_T2H_MSG_TYPE_PEER_MAP: {
2018                struct htt_peer_map_event ev = {
2019                        .vdev_id = resp->peer_map.vdev_id,
2020                        .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2021                };
2022                memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2023                ath10k_peer_map_event(htt, &ev);
2024                break;
2025        }
2026        case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2027                struct htt_peer_unmap_event ev = {
2028                        .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2029                };
2030                ath10k_peer_unmap_event(htt, &ev);
2031                break;
2032        }
2033        case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2034                struct htt_tx_done tx_done = {};
2035                int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2036
2037                tx_done.msdu_id =
2038                        __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2039
2040                switch (status) {
2041                case HTT_MGMT_TX_STATUS_OK:
2042                        tx_done.success = true;
2043                        break;
2044                case HTT_MGMT_TX_STATUS_RETRY:
2045                        tx_done.no_ack = true;
2046                        break;
2047                case HTT_MGMT_TX_STATUS_DROP:
2048                        tx_done.discard = true;
2049                        break;
2050                }
2051
2052                ath10k_txrx_tx_unref(htt, &tx_done);
2053                break;
2054        }
2055        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2056                skb_queue_tail(&htt->tx_compl_q, skb);
2057                tasklet_schedule(&htt->txrx_compl_task);
2058                return;
2059        case HTT_T2H_MSG_TYPE_SEC_IND: {
2060                struct ath10k *ar = htt->ar;
2061                struct htt_security_indication *ev = &resp->security_indication;
2062
2063                ath10k_dbg(ar, ATH10K_DBG_HTT,
2064                           "sec ind peer_id %d unicast %d type %d\n",
2065                          __le16_to_cpu(ev->peer_id),
2066                          !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2067                          MS(ev->flags, HTT_SECURITY_TYPE));
2068                complete(&ar->install_key_done);
2069                break;
2070        }
2071        case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2072                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2073                                skb->data, skb->len);
2074                ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
2075                break;
2076        }
2077        case HTT_T2H_MSG_TYPE_TEST:
2078                break;
2079        case HTT_T2H_MSG_TYPE_STATS_CONF:
2080                trace_ath10k_htt_stats(ar, skb->data, skb->len);
2081                break;
2082        case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2083                /* Firmware can return tx frames if it's unable to fully
2084                 * process them and suspects host may be able to fix it. ath10k
2085                 * sends all tx frames as already inspected so this shouldn't
2086                 * happen unless fw has a bug.
2087                 */
2088                ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2089                break;
2090        case HTT_T2H_MSG_TYPE_RX_ADDBA:
2091                ath10k_htt_rx_addba(ar, resp);
2092                break;
2093        case HTT_T2H_MSG_TYPE_RX_DELBA:
2094                ath10k_htt_rx_delba(ar, resp);
2095                break;
2096        case HTT_T2H_MSG_TYPE_PKTLOG: {
2097                struct ath10k_pktlog_hdr *hdr =
2098                        (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
2099
2100                trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2101                                        sizeof(*hdr) +
2102                                        __le16_to_cpu(hdr->size));
2103                break;
2104        }
2105        case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2106                /* Ignore this event because mac80211 takes care of Rx
2107                 * aggregation reordering.
2108                 */
2109                break;
2110        }
2111        case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2112                skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2113                tasklet_schedule(&htt->txrx_compl_task);
2114                return;
2115        }
2116        case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2117                break;
2118        case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
2119                break;
2120        case HTT_T2H_MSG_TYPE_AGGR_CONF:
2121                break;
2122        case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
2123        case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2124        case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2125                /* TODO: Implement pull-push logic */
2126                break;
2127        case HTT_T2H_MSG_TYPE_EN_STATS:
2128        default:
2129                ath10k_warn(ar, "htt event (%d) not handled\n",
2130                            resp->hdr.msg_type);
2131                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2132                                skb->data, skb->len);
2133                break;
2134        };
2135
2136        /* Free the indication buffer */
2137        dev_kfree_skb_any(skb);
2138}
2139EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2140
2141void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2142                                             struct sk_buff *skb)
2143{
2144        trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2145        dev_kfree_skb_any(skb);
2146}
2147EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2148
2149static void ath10k_htt_txrx_compl_task(unsigned long ptr)
2150{
2151        struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2152        struct ath10k *ar = htt->ar;
2153        struct sk_buff_head tx_q;
2154        struct sk_buff_head rx_q;
2155        struct sk_buff_head rx_ind_q;
2156        struct htt_resp *resp;
2157        struct sk_buff *skb;
2158        unsigned long flags;
2159
2160        __skb_queue_head_init(&tx_q);
2161        __skb_queue_head_init(&rx_q);
2162        __skb_queue_head_init(&rx_ind_q);
2163
2164        spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
2165        skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
2166        spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
2167
2168        spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
2169        skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
2170        spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
2171
2172        spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
2173        skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
2174        spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
2175
2176        while ((skb = __skb_dequeue(&tx_q))) {
2177                ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
2178                dev_kfree_skb_any(skb);
2179        }
2180
2181        while ((skb = __skb_dequeue(&rx_q))) {
2182                resp = (struct htt_resp *)skb->data;
2183                spin_lock_bh(&htt->rx_ring.lock);
2184                ath10k_htt_rx_handler(htt, &resp->rx_ind);
2185                spin_unlock_bh(&htt->rx_ring.lock);
2186                dev_kfree_skb_any(skb);
2187        }
2188
2189        while ((skb = __skb_dequeue(&rx_ind_q))) {
2190                spin_lock_bh(&htt->rx_ring.lock);
2191                ath10k_htt_rx_in_ord_ind(ar, skb);
2192                spin_unlock_bh(&htt->rx_ring.lock);
2193                dev_kfree_skb_any(skb);
2194        }
2195}
2196