linux/drivers/net/wireless/ath/ath10k/htt_rx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include "htc.h"
  19#include "htt.h"
  20#include "txrx.h"
  21#include "debug.h"
  22
  23#include <linux/log2.h>
  24
  25/* slightly larger than one large A-MPDU */
  26#define HTT_RX_RING_SIZE_MIN 128
  27
  28/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
  29#define HTT_RX_RING_SIZE_MAX 2048
  30
  31#define HTT_RX_AVG_FRM_BYTES 1000
  32
  33/* ms, very conservative */
  34#define HTT_RX_HOST_LATENCY_MAX_MS 20
  35
  36/* ms, conservative */
  37#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
  38
  39/* when under memory pressure rx ring refill may fail and needs a retry */
  40#define HTT_RX_RING_REFILL_RETRY_MS 50
  41
  42static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
  43{
  44        int size;
  45
  46        /*
  47         * It is expected that the host CPU will typically be able to
  48         * service the rx indication from one A-MPDU before the rx
  49         * indication from the subsequent A-MPDU happens, roughly 1-2 ms
  50         * later. However, the rx ring should be sized very conservatively,
  51         * to accomodate the worst reasonable delay before the host CPU
  52         * services a rx indication interrupt.
  53         *
  54         * The rx ring need not be kept full of empty buffers. In theory,
  55         * the htt host SW can dynamically track the low-water mark in the
  56         * rx ring, and dynamically adjust the level to which the rx ring
  57         * is filled with empty buffers, to dynamically meet the desired
  58         * low-water mark.
  59         *
  60         * In contrast, it's difficult to resize the rx ring itself, once
  61         * it's in use. Thus, the ring itself should be sized very
  62         * conservatively, while the degree to which the ring is filled
  63         * with empty buffers should be sized moderately conservatively.
  64         */
  65
  66        /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  67        size =
  68            htt->max_throughput_mbps +
  69            1000  /
  70            (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
  71
  72        if (size < HTT_RX_RING_SIZE_MIN)
  73                size = HTT_RX_RING_SIZE_MIN;
  74
  75        if (size > HTT_RX_RING_SIZE_MAX)
  76                size = HTT_RX_RING_SIZE_MAX;
  77
  78        size = roundup_pow_of_two(size);
  79
  80        return size;
  81}
  82
  83static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
  84{
  85        int size;
  86
  87        /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
  88        size =
  89            htt->max_throughput_mbps *
  90            1000  /
  91            (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
  92
  93        /*
  94         * Make sure the fill level is at least 1 less than the ring size.
  95         * Leaving 1 element empty allows the SW to easily distinguish
  96         * between a full ring vs. an empty ring.
  97         */
  98        if (size >= htt->rx_ring.size)
  99                size = htt->rx_ring.size - 1;
 100
 101        return size;
 102}
 103
 104static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
 105{
 106        struct sk_buff *skb;
 107        struct ath10k_skb_cb *cb;
 108        int i;
 109
 110        for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
 111                skb = htt->rx_ring.netbufs_ring[i];
 112                cb = ATH10K_SKB_CB(skb);
 113                dma_unmap_single(htt->ar->dev, cb->paddr,
 114                                 skb->len + skb_tailroom(skb),
 115                                 DMA_FROM_DEVICE);
 116                dev_kfree_skb_any(skb);
 117        }
 118
 119        htt->rx_ring.fill_cnt = 0;
 120}
 121
 122static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 123{
 124        struct htt_rx_desc *rx_desc;
 125        struct sk_buff *skb;
 126        dma_addr_t paddr;
 127        int ret = 0, idx;
 128
 129        idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
 130        while (num > 0) {
 131                skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
 132                if (!skb) {
 133                        ret = -ENOMEM;
 134                        goto fail;
 135                }
 136
 137                if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
 138                        skb_pull(skb,
 139                                 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
 140                                 skb->data);
 141
 142                /* Clear rx_desc attention word before posting to Rx ring */
 143                rx_desc = (struct htt_rx_desc *)skb->data;
 144                rx_desc->attention.flags = __cpu_to_le32(0);
 145
 146                paddr = dma_map_single(htt->ar->dev, skb->data,
 147                                       skb->len + skb_tailroom(skb),
 148                                       DMA_FROM_DEVICE);
 149
 150                if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
 151                        dev_kfree_skb_any(skb);
 152                        ret = -ENOMEM;
 153                        goto fail;
 154                }
 155
 156                ATH10K_SKB_CB(skb)->paddr = paddr;
 157                htt->rx_ring.netbufs_ring[idx] = skb;
 158                htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
 159                htt->rx_ring.fill_cnt++;
 160
 161                num--;
 162                idx++;
 163                idx &= htt->rx_ring.size_mask;
 164        }
 165
 166fail:
 167        *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
 168        return ret;
 169}
 170
 171static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 172{
 173        lockdep_assert_held(&htt->rx_ring.lock);
 174        return __ath10k_htt_rx_ring_fill_n(htt, num);
 175}
 176
 177static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 178{
 179        int ret, num_to_fill;
 180
 181        spin_lock_bh(&htt->rx_ring.lock);
 182        num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
 183        ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
 184        if (ret == -ENOMEM) {
 185                /*
 186                 * Failed to fill it to the desired level -
 187                 * we'll start a timer and try again next time.
 188                 * As long as enough buffers are left in the ring for
 189                 * another A-MPDU rx, no special recovery is needed.
 190                 */
 191                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 192                          msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 193        }
 194        spin_unlock_bh(&htt->rx_ring.lock);
 195}
 196
 197static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
 198{
 199        struct ath10k_htt *htt = (struct ath10k_htt *)arg;
 200        ath10k_htt_rx_msdu_buff_replenish(htt);
 201}
 202
 203static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
 204{
 205        return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
 206                htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
 207}
 208
 209void ath10k_htt_rx_detach(struct ath10k_htt *htt)
 210{
 211        int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 212
 213        del_timer_sync(&htt->rx_ring.refill_retry_timer);
 214
 215        while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
 216                struct sk_buff *skb =
 217                                htt->rx_ring.netbufs_ring[sw_rd_idx];
 218                struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
 219
 220                dma_unmap_single(htt->ar->dev, cb->paddr,
 221                                 skb->len + skb_tailroom(skb),
 222                                 DMA_FROM_DEVICE);
 223                dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
 224                sw_rd_idx++;
 225                sw_rd_idx &= htt->rx_ring.size_mask;
 226        }
 227
 228        dma_free_coherent(htt->ar->dev,
 229                          (htt->rx_ring.size *
 230                           sizeof(htt->rx_ring.paddrs_ring)),
 231                          htt->rx_ring.paddrs_ring,
 232                          htt->rx_ring.base_paddr);
 233
 234        dma_free_coherent(htt->ar->dev,
 235                          sizeof(*htt->rx_ring.alloc_idx.vaddr),
 236                          htt->rx_ring.alloc_idx.vaddr,
 237                          htt->rx_ring.alloc_idx.paddr);
 238
 239        kfree(htt->rx_ring.netbufs_ring);
 240}
 241
 242static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 243{
 244        int idx;
 245        struct sk_buff *msdu;
 246
 247        spin_lock_bh(&htt->rx_ring.lock);
 248
 249        if (ath10k_htt_rx_ring_elems(htt) == 0)
 250                ath10k_warn("htt rx ring is empty!\n");
 251
 252        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 253        msdu = htt->rx_ring.netbufs_ring[idx];
 254
 255        idx++;
 256        idx &= htt->rx_ring.size_mask;
 257        htt->rx_ring.sw_rd_idx.msdu_payld = idx;
 258        htt->rx_ring.fill_cnt--;
 259
 260        spin_unlock_bh(&htt->rx_ring.lock);
 261        return msdu;
 262}
 263
 264static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
 265{
 266        struct sk_buff *next;
 267
 268        while (skb) {
 269                next = skb->next;
 270                dev_kfree_skb_any(skb);
 271                skb = next;
 272        }
 273}
 274
 275static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 276                                   u8 **fw_desc, int *fw_desc_len,
 277                                   struct sk_buff **head_msdu,
 278                                   struct sk_buff **tail_msdu)
 279{
 280        int msdu_len, msdu_chaining = 0;
 281        struct sk_buff *msdu;
 282        struct htt_rx_desc *rx_desc;
 283
 284        if (ath10k_htt_rx_ring_elems(htt) == 0)
 285                ath10k_warn("htt rx ring is empty!\n");
 286
 287        if (htt->rx_confused) {
 288                ath10k_warn("htt is confused. refusing rx\n");
 289                return 0;
 290        }
 291
 292        msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
 293        while (msdu) {
 294                int last_msdu, msdu_len_invalid, msdu_chained;
 295
 296                dma_unmap_single(htt->ar->dev,
 297                                 ATH10K_SKB_CB(msdu)->paddr,
 298                                 msdu->len + skb_tailroom(msdu),
 299                                 DMA_FROM_DEVICE);
 300
 301                ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
 302                                msdu->data, msdu->len + skb_tailroom(msdu));
 303
 304                rx_desc = (struct htt_rx_desc *)msdu->data;
 305
 306                /* FIXME: we must report msdu payload since this is what caller
 307                 *        expects now */
 308                skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 309                skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
 310
 311                /*
 312                 * Sanity check - confirm the HW is finished filling in the
 313                 * rx data.
 314                 * If the HW and SW are working correctly, then it's guaranteed
 315                 * that the HW's MAC DMA is done before this point in the SW.
 316                 * To prevent the case that we handle a stale Rx descriptor,
 317                 * just assert for now until we have a way to recover.
 318                 */
 319                if (!(__le32_to_cpu(rx_desc->attention.flags)
 320                                & RX_ATTENTION_FLAGS_MSDU_DONE)) {
 321                        ath10k_htt_rx_free_msdu_chain(*head_msdu);
 322                        *head_msdu = NULL;
 323                        msdu = NULL;
 324                        ath10k_err("htt rx stopped. cannot recover\n");
 325                        htt->rx_confused = true;
 326                        break;
 327                }
 328
 329                /*
 330                 * Copy the FW rx descriptor for this MSDU from the rx
 331                 * indication message into the MSDU's netbuf. HL uses the
 332                 * same rx indication message definition as LL, and simply
 333                 * appends new info (fields from the HW rx desc, and the
 334                 * MSDU payload itself). So, the offset into the rx
 335                 * indication message only has to account for the standard
 336                 * offset of the per-MSDU FW rx desc info within the
 337                 * message, and how many bytes of the per-MSDU FW rx desc
 338                 * info have already been consumed. (And the endianness of
 339                 * the host, since for a big-endian host, the rx ind
 340                 * message contents, including the per-MSDU rx desc bytes,
 341                 * were byteswapped during upload.)
 342                 */
 343                if (*fw_desc_len > 0) {
 344                        rx_desc->fw_desc.info0 = **fw_desc;
 345                        /*
 346                         * The target is expected to only provide the basic
 347                         * per-MSDU rx descriptors. Just to be sure, verify
 348                         * that the target has not attached extension data
 349                         * (e.g. LRO flow ID).
 350                         */
 351
 352                        /* or more, if there's extension data */
 353                        (*fw_desc)++;
 354                        (*fw_desc_len)--;
 355                } else {
 356                        /*
 357                         * When an oversized AMSDU happened, FW will lost
 358                         * some of MSDU status - in this case, the FW
 359                         * descriptors provided will be less than the
 360                         * actual MSDUs inside this MPDU. Mark the FW
 361                         * descriptors so that it will still deliver to
 362                         * upper stack, if no CRC error for this MPDU.
 363                         *
 364                         * FIX THIS - the FW descriptors are actually for
 365                         * MSDUs in the end of this A-MSDU instead of the
 366                         * beginning.
 367                         */
 368                        rx_desc->fw_desc.info0 = 0;
 369                }
 370
 371                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 372                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 373                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
 374                msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
 375                              RX_MSDU_START_INFO0_MSDU_LENGTH);
 376                msdu_chained = rx_desc->frag_info.ring2_more_count;
 377
 378                if (msdu_len_invalid)
 379                        msdu_len = 0;
 380
 381                skb_trim(msdu, 0);
 382                skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
 383                msdu_len -= msdu->len;
 384
 385                /* FIXME: Do chained buffers include htt_rx_desc or not? */
 386                while (msdu_chained--) {
 387                        struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
 388
 389                        dma_unmap_single(htt->ar->dev,
 390                                         ATH10K_SKB_CB(next)->paddr,
 391                                         next->len + skb_tailroom(next),
 392                                         DMA_FROM_DEVICE);
 393
 394                        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
 395                                        next->data,
 396                                        next->len + skb_tailroom(next));
 397
 398                        skb_trim(next, 0);
 399                        skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
 400                        msdu_len -= next->len;
 401
 402                        msdu->next = next;
 403                        msdu = next;
 404                        msdu_chaining = 1;
 405                }
 406
 407                if (msdu_len > 0) {
 408                        /* This may suggest FW bug? */
 409                        ath10k_warn("htt rx msdu len not consumed (%d)\n",
 410                                    msdu_len);
 411                }
 412
 413                last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
 414                                RX_MSDU_END_INFO0_LAST_MSDU;
 415
 416                if (last_msdu) {
 417                        msdu->next = NULL;
 418                        break;
 419                } else {
 420                        struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
 421                        msdu->next = next;
 422                        msdu = next;
 423                }
 424        }
 425        *tail_msdu = msdu;
 426
 427        /*
 428         * Don't refill the ring yet.
 429         *
 430         * First, the elements popped here are still in use - it is not
 431         * safe to overwrite them until the matching call to
 432         * mpdu_desc_list_next. Second, for efficiency it is preferable to
 433         * refill the rx ring with 1 PPDU's worth of rx buffers (something
 434         * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
 435         * (something like 3 buffers). Consequently, we'll rely on the txrx
 436         * SW to tell us when it is done pulling all the PPDU's rx buffers
 437         * out of the rx ring, and then refill it just once.
 438         */
 439
 440        return msdu_chaining;
 441}
 442
 443int ath10k_htt_rx_attach(struct ath10k_htt *htt)
 444{
 445        dma_addr_t paddr;
 446        void *vaddr;
 447        struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 448
 449        htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
 450        if (!is_power_of_2(htt->rx_ring.size)) {
 451                ath10k_warn("htt rx ring size is not power of 2\n");
 452                return -EINVAL;
 453        }
 454
 455        htt->rx_ring.size_mask = htt->rx_ring.size - 1;
 456
 457        /*
 458         * Set the initial value for the level to which the rx ring
 459         * should be filled, based on the max throughput and the
 460         * worst likely latency for the host to fill the rx ring
 461         * with new buffers. In theory, this fill level can be
 462         * dynamically adjusted from the initial value set here, to
 463         * reflect the actual host latency rather than a
 464         * conservative assumption about the host latency.
 465         */
 466        htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
 467
 468        htt->rx_ring.netbufs_ring =
 469                kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
 470                        GFP_KERNEL);
 471        if (!htt->rx_ring.netbufs_ring)
 472                goto err_netbuf;
 473
 474        vaddr = dma_alloc_coherent(htt->ar->dev,
 475                   (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
 476                   &paddr, GFP_DMA);
 477        if (!vaddr)
 478                goto err_dma_ring;
 479
 480        htt->rx_ring.paddrs_ring = vaddr;
 481        htt->rx_ring.base_paddr = paddr;
 482
 483        vaddr = dma_alloc_coherent(htt->ar->dev,
 484                                   sizeof(*htt->rx_ring.alloc_idx.vaddr),
 485                                   &paddr, GFP_DMA);
 486        if (!vaddr)
 487                goto err_dma_idx;
 488
 489        htt->rx_ring.alloc_idx.vaddr = vaddr;
 490        htt->rx_ring.alloc_idx.paddr = paddr;
 491        htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 492        *htt->rx_ring.alloc_idx.vaddr = 0;
 493
 494        /* Initialize the Rx refill retry timer */
 495        setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
 496
 497        spin_lock_init(&htt->rx_ring.lock);
 498
 499        htt->rx_ring.fill_cnt = 0;
 500        if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
 501                goto err_fill_ring;
 502
 503        ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
 504                   htt->rx_ring.size, htt->rx_ring.fill_level);
 505        return 0;
 506
 507err_fill_ring:
 508        ath10k_htt_rx_ring_free(htt);
 509        dma_free_coherent(htt->ar->dev,
 510                          sizeof(*htt->rx_ring.alloc_idx.vaddr),
 511                          htt->rx_ring.alloc_idx.vaddr,
 512                          htt->rx_ring.alloc_idx.paddr);
 513err_dma_idx:
 514        dma_free_coherent(htt->ar->dev,
 515                          (htt->rx_ring.size *
 516                           sizeof(htt->rx_ring.paddrs_ring)),
 517                          htt->rx_ring.paddrs_ring,
 518                          htt->rx_ring.base_paddr);
 519err_dma_ring:
 520        kfree(htt->rx_ring.netbufs_ring);
 521err_netbuf:
 522        return -ENOMEM;
 523}
 524
 525static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
 526{
 527        switch (type) {
 528        case HTT_RX_MPDU_ENCRYPT_WEP40:
 529        case HTT_RX_MPDU_ENCRYPT_WEP104:
 530                return 4;
 531        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 532        case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
 533        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 534        case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
 535        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 536                return 8;
 537        case HTT_RX_MPDU_ENCRYPT_NONE:
 538                return 0;
 539        }
 540
 541        ath10k_warn("unknown encryption type %d\n", type);
 542        return 0;
 543}
 544
 545static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
 546{
 547        switch (type) {
 548        case HTT_RX_MPDU_ENCRYPT_NONE:
 549        case HTT_RX_MPDU_ENCRYPT_WEP40:
 550        case HTT_RX_MPDU_ENCRYPT_WEP104:
 551        case HTT_RX_MPDU_ENCRYPT_WEP128:
 552        case HTT_RX_MPDU_ENCRYPT_WAPI:
 553                return 0;
 554        case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
 555        case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
 556                return 4;
 557        case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
 558                return 8;
 559        }
 560
 561        ath10k_warn("unknown encryption type %d\n", type);
 562        return 0;
 563}
 564
 565/* Applies for first msdu in chain, before altering it. */
 566static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
 567{
 568        struct htt_rx_desc *rxd;
 569        enum rx_msdu_decap_format fmt;
 570
 571        rxd = (void *)skb->data - sizeof(*rxd);
 572        fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
 573                        RX_MSDU_START_INFO1_DECAP_FORMAT);
 574
 575        if (fmt == RX_MSDU_DECAP_RAW)
 576                return (void *)skb->data;
 577        else
 578                return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
 579}
 580
 581/* This function only applies for first msdu in an msdu chain */
 582static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
 583{
 584        if (ieee80211_is_data_qos(hdr->frame_control)) {
 585                u8 *qc = ieee80211_get_qos_ctl(hdr);
 586                if (qc[0] & 0x80)
 587                        return true;
 588        }
 589        return false;
 590}
 591
 592static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
 593                        struct htt_rx_info *info)
 594{
 595        struct htt_rx_desc *rxd;
 596        struct sk_buff *amsdu;
 597        struct sk_buff *first;
 598        struct ieee80211_hdr *hdr;
 599        struct sk_buff *skb = info->skb;
 600        enum rx_msdu_decap_format fmt;
 601        enum htt_rx_mpdu_encrypt_type enctype;
 602        unsigned int hdr_len;
 603        int crypto_len;
 604
 605        rxd = (void *)skb->data - sizeof(*rxd);
 606        fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
 607                        RX_MSDU_START_INFO1_DECAP_FORMAT);
 608        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 609                        RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 610
 611        /* FIXME: No idea what assumptions are safe here. Need logs */
 612        if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
 613            (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
 614                ath10k_htt_rx_free_msdu_chain(skb->next);
 615                skb->next = NULL;
 616                return -ENOTSUPP;
 617        }
 618
 619        /* A-MSDU max is a little less than 8K */
 620        amsdu = dev_alloc_skb(8*1024);
 621        if (!amsdu) {
 622                ath10k_warn("A-MSDU allocation failed\n");
 623                ath10k_htt_rx_free_msdu_chain(skb->next);
 624                skb->next = NULL;
 625                return -ENOMEM;
 626        }
 627
 628        if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
 629                int hdrlen;
 630
 631                hdr = (void *)rxd->rx_hdr_status;
 632                hdrlen = ieee80211_hdrlen(hdr->frame_control);
 633                memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
 634        }
 635
 636        first = skb;
 637        while (skb) {
 638                void *decap_hdr;
 639                int decap_len = 0;
 640
 641                rxd = (void *)skb->data - sizeof(*rxd);
 642                fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
 643                                RX_MSDU_START_INFO1_DECAP_FORMAT);
 644                decap_hdr = (void *)rxd->rx_hdr_status;
 645
 646                if (skb == first) {
 647                        /* We receive linked A-MSDU subframe skbuffs. The
 648                         * first one contains the original 802.11 header (and
 649                         * possible crypto param) in the RX descriptor. The
 650                         * A-MSDU subframe header follows that. Each part is
 651                         * aligned to 4 byte boundary. */
 652
 653                        hdr = (void *)amsdu->data;
 654                        hdr_len = ieee80211_hdrlen(hdr->frame_control);
 655                        crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
 656
 657                        decap_hdr += roundup(hdr_len, 4);
 658                        decap_hdr += roundup(crypto_len, 4);
 659                }
 660
 661                if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
 662                        /* Ethernet2 decap inserts ethernet header in place of
 663                         * A-MSDU subframe header. */
 664                        skb_pull(skb, 6 + 6 + 2);
 665
 666                        /* A-MSDU subframe header length */
 667                        decap_len += 6 + 6 + 2;
 668
 669                        /* Ethernet2 decap also strips the LLC/SNAP so we need
 670                         * to re-insert it. The LLC/SNAP follows A-MSDU
 671                         * subframe header. */
 672                        /* FIXME: Not all LLCs are 8 bytes long */
 673                        decap_len += 8;
 674
 675                        memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
 676                }
 677
 678                if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
 679                        /* Native Wifi decap inserts regular 802.11 header
 680                         * in place of A-MSDU subframe header. */
 681                        hdr = (struct ieee80211_hdr *)skb->data;
 682                        skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
 683
 684                        /* A-MSDU subframe header length */
 685                        decap_len += 6 + 6 + 2;
 686
 687                        memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
 688                }
 689
 690                if (fmt == RX_MSDU_DECAP_RAW)
 691                        skb_trim(skb, skb->len - 4); /* remove FCS */
 692
 693                memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
 694
 695                /* A-MSDU subframes are padded to 4bytes
 696                 * but relative to first subframe, not the whole MPDU */
 697                if (skb->next && ((decap_len + skb->len) & 3)) {
 698                        int padlen = 4 - ((decap_len + skb->len) & 3);
 699                        memset(skb_put(amsdu, padlen), 0, padlen);
 700                }
 701
 702                skb = skb->next;
 703        }
 704
 705        info->skb = amsdu;
 706        info->encrypt_type = enctype;
 707
 708        ath10k_htt_rx_free_msdu_chain(first);
 709
 710        return 0;
 711}
 712
 713static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
 714{
 715        struct sk_buff *skb = info->skb;
 716        struct htt_rx_desc *rxd;
 717        struct ieee80211_hdr *hdr;
 718        enum rx_msdu_decap_format fmt;
 719        enum htt_rx_mpdu_encrypt_type enctype;
 720
 721        /* This shouldn't happen. If it does than it may be a FW bug. */
 722        if (skb->next) {
 723                ath10k_warn("received chained non A-MSDU frame\n");
 724                ath10k_htt_rx_free_msdu_chain(skb->next);
 725                skb->next = NULL;
 726        }
 727
 728        rxd = (void *)skb->data - sizeof(*rxd);
 729        fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
 730                        RX_MSDU_START_INFO1_DECAP_FORMAT);
 731        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 732                        RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 733        hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
 734
 735        switch (fmt) {
 736        case RX_MSDU_DECAP_RAW:
 737                /* remove trailing FCS */
 738                skb_trim(skb, skb->len - 4);
 739                break;
 740        case RX_MSDU_DECAP_NATIVE_WIFI:
 741                /* nothing to do here */
 742                break;
 743        case RX_MSDU_DECAP_ETHERNET2_DIX:
 744                /* macaddr[6] + macaddr[6] + ethertype[2] */
 745                skb_pull(skb, 6 + 6 + 2);
 746                break;
 747        case RX_MSDU_DECAP_8023_SNAP_LLC:
 748                /* macaddr[6] + macaddr[6] + len[2] */
 749                /* we don't need this for non-A-MSDU */
 750                skb_pull(skb, 6 + 6 + 2);
 751                break;
 752        }
 753
 754        if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
 755                void *llc;
 756                int llclen;
 757
 758                llclen = 8;
 759                llc  = hdr;
 760                llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
 761                llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
 762
 763                skb_push(skb, llclen);
 764                memcpy(skb->data, llc, llclen);
 765        }
 766
 767        if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
 768                int len = ieee80211_hdrlen(hdr->frame_control);
 769                skb_push(skb, len);
 770                memcpy(skb->data, hdr, len);
 771        }
 772
 773        info->skb = skb;
 774        info->encrypt_type = enctype;
 775        return 0;
 776}
 777
 778static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
 779{
 780        struct htt_rx_desc *rxd;
 781        u32 flags;
 782
 783        rxd = (void *)skb->data - sizeof(*rxd);
 784        flags = __le32_to_cpu(rxd->attention.flags);
 785
 786        if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
 787                return true;
 788
 789        return false;
 790}
 791
 792static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
 793{
 794        struct htt_rx_desc *rxd;
 795        u32 flags;
 796
 797        rxd = (void *)skb->data - sizeof(*rxd);
 798        flags = __le32_to_cpu(rxd->attention.flags);
 799
 800        if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
 801                return true;
 802
 803        return false;
 804}
 805
 806static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
 807                                  struct htt_rx_indication *rx)
 808{
 809        struct htt_rx_info info;
 810        struct htt_rx_indication_mpdu_range *mpdu_ranges;
 811        struct ieee80211_hdr *hdr;
 812        int num_mpdu_ranges;
 813        int fw_desc_len;
 814        u8 *fw_desc;
 815        int i, j;
 816        int ret;
 817
 818        memset(&info, 0, sizeof(info));
 819
 820        fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
 821        fw_desc = (u8 *)&rx->fw_desc;
 822
 823        num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
 824                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
 825        mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
 826
 827        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
 828                        rx, sizeof(*rx) +
 829                        (sizeof(struct htt_rx_indication_mpdu_range) *
 830                                num_mpdu_ranges));
 831
 832        for (i = 0; i < num_mpdu_ranges; i++) {
 833                info.status = mpdu_ranges[i].mpdu_range_status;
 834
 835                for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
 836                        struct sk_buff *msdu_head, *msdu_tail;
 837                        enum htt_rx_mpdu_status status;
 838                        int msdu_chaining;
 839
 840                        msdu_head = NULL;
 841                        msdu_tail = NULL;
 842                        msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
 843                                                         &fw_desc,
 844                                                         &fw_desc_len,
 845                                                         &msdu_head,
 846                                                         &msdu_tail);
 847
 848                        if (!msdu_head) {
 849                                ath10k_warn("htt rx no data!\n");
 850                                continue;
 851                        }
 852
 853                        if (msdu_head->len == 0) {
 854                                ath10k_dbg(ATH10K_DBG_HTT,
 855                                           "htt rx dropping due to zero-len\n");
 856                                ath10k_htt_rx_free_msdu_chain(msdu_head);
 857                                continue;
 858                        }
 859
 860                        if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
 861                                ath10k_htt_rx_free_msdu_chain(msdu_head);
 862                                continue;
 863                        }
 864
 865                        status = info.status;
 866
 867                        /* Skip mgmt frames while we handle this in WMI */
 868                        if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
 869                                ath10k_htt_rx_free_msdu_chain(msdu_head);
 870                                continue;
 871                        }
 872
 873                        if (status != HTT_RX_IND_MPDU_STATUS_OK &&
 874                            status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
 875                            !htt->ar->monitor_enabled) {
 876                                ath10k_dbg(ATH10K_DBG_HTT,
 877                                           "htt rx ignoring frame w/ status %d\n",
 878                                           status);
 879                                ath10k_htt_rx_free_msdu_chain(msdu_head);
 880                                continue;
 881                        }
 882
 883                        /* FIXME: we do not support chaining yet.
 884                         * this needs investigation */
 885                        if (msdu_chaining) {
 886                                ath10k_warn("msdu_chaining is true\n");
 887                                ath10k_htt_rx_free_msdu_chain(msdu_head);
 888                                continue;
 889                        }
 890
 891                        info.skb     = msdu_head;
 892                        info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
 893                        info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
 894                        info.signal += rx->ppdu.combined_rssi;
 895
 896                        info.rate.info0 = rx->ppdu.info0;
 897                        info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
 898                        info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
 899
 900                        hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 901
 902                        if (ath10k_htt_rx_hdr_is_amsdu(hdr))
 903                                ret = ath10k_htt_rx_amsdu(htt, &info);
 904                        else
 905                                ret = ath10k_htt_rx_msdu(htt, &info);
 906
 907                        if (ret && !info.fcs_err) {
 908                                ath10k_warn("error processing msdus %d\n", ret);
 909                                dev_kfree_skb_any(info.skb);
 910                                continue;
 911                        }
 912
 913                        if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
 914                                ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
 915
 916                        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
 917                                        info.skb->data, info.skb->len);
 918                        ath10k_process_rx(htt->ar, &info);
 919                }
 920        }
 921
 922        ath10k_htt_rx_msdu_buff_replenish(htt);
 923}
 924
 925static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
 926                                struct htt_rx_fragment_indication *frag)
 927{
 928        struct sk_buff *msdu_head, *msdu_tail;
 929        struct htt_rx_desc *rxd;
 930        enum rx_msdu_decap_format fmt;
 931        struct htt_rx_info info = {};
 932        struct ieee80211_hdr *hdr;
 933        int msdu_chaining;
 934        bool tkip_mic_err;
 935        bool decrypt_err;
 936        u8 *fw_desc;
 937        int fw_desc_len, hdrlen, paramlen;
 938        int trim;
 939
 940        fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
 941        fw_desc = (u8 *)frag->fw_msdu_rx_desc;
 942
 943        msdu_head = NULL;
 944        msdu_tail = NULL;
 945        msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
 946                                                &msdu_head, &msdu_tail);
 947
 948        ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
 949
 950        if (!msdu_head) {
 951                ath10k_warn("htt rx frag no data\n");
 952                return;
 953        }
 954
 955        if (msdu_chaining || msdu_head != msdu_tail) {
 956                ath10k_warn("aggregation with fragmentation?!\n");
 957                ath10k_htt_rx_free_msdu_chain(msdu_head);
 958                return;
 959        }
 960
 961        /* FIXME: implement signal strength */
 962
 963        hdr = (struct ieee80211_hdr *)msdu_head->data;
 964        rxd = (void *)msdu_head->data - sizeof(*rxd);
 965        tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
 966                                RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
 967        decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
 968                                RX_ATTENTION_FLAGS_DECRYPT_ERR);
 969        fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
 970                        RX_MSDU_START_INFO1_DECAP_FORMAT);
 971
 972        if (fmt != RX_MSDU_DECAP_RAW) {
 973                ath10k_warn("we dont support non-raw fragmented rx yet\n");
 974                dev_kfree_skb_any(msdu_head);
 975                goto end;
 976        }
 977
 978        info.skb = msdu_head;
 979        info.status = HTT_RX_IND_MPDU_STATUS_OK;
 980        info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 981                                RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 982
 983        if (tkip_mic_err) {
 984                ath10k_warn("tkip mic error\n");
 985                info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
 986        }
 987
 988        if (decrypt_err) {
 989                ath10k_warn("decryption err in fragmented rx\n");
 990                dev_kfree_skb_any(info.skb);
 991                goto end;
 992        }
 993
 994        if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
 995                hdrlen = ieee80211_hdrlen(hdr->frame_control);
 996                paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
 997
 998                /* It is more efficient to move the header than the payload */
 999                memmove((void *)info.skb->data + paramlen,
1000                        (void *)info.skb->data,
1001                        hdrlen);
1002                skb_pull(info.skb, paramlen);
1003                hdr = (struct ieee80211_hdr *)info.skb->data;
1004        }
1005
1006        /* remove trailing FCS */
1007        trim  = 4;
1008
1009        /* remove crypto trailer */
1010        trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
1011
1012        /* last fragment of TKIP frags has MIC */
1013        if (!ieee80211_has_morefrags(hdr->frame_control) &&
1014            info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1015                trim += 8;
1016
1017        if (trim > info.skb->len) {
1018                ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
1019                dev_kfree_skb_any(info.skb);
1020                goto end;
1021        }
1022
1023        skb_trim(info.skb, info.skb->len - trim);
1024
1025        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
1026                        info.skb->data, info.skb->len);
1027        ath10k_process_rx(htt->ar, &info);
1028
1029end:
1030        if (fw_desc_len > 0) {
1031                ath10k_dbg(ATH10K_DBG_HTT,
1032                           "expecting more fragmented rx in one indication %d\n",
1033                           fw_desc_len);
1034        }
1035}
1036
1037void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1038{
1039        struct ath10k_htt *htt = ar->htt;
1040        struct htt_resp *resp = (struct htt_resp *)skb->data;
1041
1042        /* confirm alignment */
1043        if (!IS_ALIGNED((unsigned long)skb->data, 4))
1044                ath10k_warn("unaligned htt message, expect trouble\n");
1045
1046        ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
1047                   resp->hdr.msg_type);
1048        switch (resp->hdr.msg_type) {
1049        case HTT_T2H_MSG_TYPE_VERSION_CONF: {
1050                htt->target_version_major = resp->ver_resp.major;
1051                htt->target_version_minor = resp->ver_resp.minor;
1052                complete(&htt->target_version_received);
1053                break;
1054        }
1055        case HTT_T2H_MSG_TYPE_RX_IND: {
1056                ath10k_htt_rx_handler(htt, &resp->rx_ind);
1057                break;
1058        }
1059        case HTT_T2H_MSG_TYPE_PEER_MAP: {
1060                struct htt_peer_map_event ev = {
1061                        .vdev_id = resp->peer_map.vdev_id,
1062                        .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
1063                };
1064                memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
1065                ath10k_peer_map_event(htt, &ev);
1066                break;
1067        }
1068        case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
1069                struct htt_peer_unmap_event ev = {
1070                        .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
1071                };
1072                ath10k_peer_unmap_event(htt, &ev);
1073                break;
1074        }
1075        case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
1076                struct htt_tx_done tx_done = {};
1077                int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
1078
1079                tx_done.msdu_id =
1080                        __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
1081
1082                switch (status) {
1083                case HTT_MGMT_TX_STATUS_OK:
1084                        break;
1085                case HTT_MGMT_TX_STATUS_RETRY:
1086                        tx_done.no_ack = true;
1087                        break;
1088                case HTT_MGMT_TX_STATUS_DROP:
1089                        tx_done.discard = true;
1090                        break;
1091                }
1092
1093                ath10k_txrx_tx_completed(htt, &tx_done);
1094                break;
1095        }
1096        case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
1097                struct htt_tx_done tx_done = {};
1098                int status = MS(resp->data_tx_completion.flags,
1099                                HTT_DATA_TX_STATUS);
1100                __le16 msdu_id;
1101                int i;
1102
1103                switch (status) {
1104                case HTT_DATA_TX_STATUS_NO_ACK:
1105                        tx_done.no_ack = true;
1106                        break;
1107                case HTT_DATA_TX_STATUS_OK:
1108                        break;
1109                case HTT_DATA_TX_STATUS_DISCARD:
1110                case HTT_DATA_TX_STATUS_POSTPONE:
1111                case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1112                        tx_done.discard = true;
1113                        break;
1114                default:
1115                        ath10k_warn("unhandled tx completion status %d\n",
1116                                    status);
1117                        tx_done.discard = true;
1118                        break;
1119                }
1120
1121                ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1122                           resp->data_tx_completion.num_msdus);
1123
1124                for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1125                        msdu_id = resp->data_tx_completion.msdus[i];
1126                        tx_done.msdu_id = __le16_to_cpu(msdu_id);
1127                        ath10k_txrx_tx_completed(htt, &tx_done);
1128                }
1129                break;
1130        }
1131        case HTT_T2H_MSG_TYPE_SEC_IND: {
1132                struct ath10k *ar = htt->ar;
1133                struct htt_security_indication *ev = &resp->security_indication;
1134
1135                ath10k_dbg(ATH10K_DBG_HTT,
1136                           "sec ind peer_id %d unicast %d type %d\n",
1137                          __le16_to_cpu(ev->peer_id),
1138                          !!(ev->flags & HTT_SECURITY_IS_UNICAST),
1139                          MS(ev->flags, HTT_SECURITY_TYPE));
1140                complete(&ar->install_key_done);
1141                break;
1142        }
1143        case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1144                ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1145                                skb->data, skb->len);
1146                ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
1147                break;
1148        }
1149        case HTT_T2H_MSG_TYPE_TEST:
1150                /* FIX THIS */
1151                break;
1152        case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1153        case HTT_T2H_MSG_TYPE_STATS_CONF:
1154        case HTT_T2H_MSG_TYPE_RX_ADDBA:
1155        case HTT_T2H_MSG_TYPE_RX_DELBA:
1156        case HTT_T2H_MSG_TYPE_RX_FLUSH:
1157        default:
1158                ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
1159                           resp->hdr.msg_type);
1160                ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1161                                skb->data, skb->len);
1162                break;
1163        };
1164
1165        /* Free the indication buffer */
1166        dev_kfree_skb_any(skb);
1167}
1168