linux/drivers/net/wireless/ath/ath11k/dp_rx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 */
   5
   6#include <linux/ieee80211.h>
   7#include <linux/kernel.h>
   8#include <linux/skbuff.h>
   9#include <crypto/hash.h>
  10#include "core.h"
  11#include "debug.h"
  12#include "debugfs_htt_stats.h"
  13#include "debugfs_sta.h"
  14#include "hal_desc.h"
  15#include "hw.h"
  16#include "dp_rx.h"
  17#include "hal_rx.h"
  18#include "dp_tx.h"
  19#include "peer.h"
  20
  21#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
  22
  23static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
  24{
  25        return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
  26}
  27
  28static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
  29                                                               struct hal_rx_desc *desc)
  30{
  31        if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
  32                return HAL_ENCRYPT_TYPE_OPEN;
  33
  34        return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
  35}
  36
  37static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
  38                                               struct hal_rx_desc *desc)
  39{
  40        return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
  41}
  42
  43static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
  44                                                     struct hal_rx_desc *desc)
  45{
  46        return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
  47}
  48
  49static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
  50                                                     struct hal_rx_desc *desc)
  51{
  52        return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
  53}
  54
  55static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
  56                                               struct hal_rx_desc *desc)
  57{
  58        return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
  59}
  60
  61static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
  62                                                 struct sk_buff *skb)
  63{
  64        struct ieee80211_hdr *hdr;
  65
  66        hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
  67        return ieee80211_has_morefrags(hdr->frame_control);
  68}
  69
  70static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
  71                                             struct sk_buff *skb)
  72{
  73        struct ieee80211_hdr *hdr;
  74
  75        hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
  76        return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  77}
  78
  79static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
  80                                            struct hal_rx_desc *desc)
  81{
  82        return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
  83}
  84
  85static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
  86                                        struct hal_rx_desc *desc)
  87{
  88        return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
  89}
  90
  91static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
  92{
  93        return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
  94                           __le32_to_cpu(attn->info2));
  95}
  96
  97static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
  98{
  99        return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
 100                           __le32_to_cpu(attn->info1));
 101}
 102
 103static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
 104{
 105        return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
 106                           __le32_to_cpu(attn->info1));
 107}
 108
 109static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
 110{
 111        return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
 112                          __le32_to_cpu(attn->info2)) ==
 113                RX_DESC_DECRYPT_STATUS_CODE_OK);
 114}
 115
 116static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
 117{
 118        u32 info = __le32_to_cpu(attn->info1);
 119        u32 errmap = 0;
 120
 121        if (info & RX_ATTENTION_INFO1_FCS_ERR)
 122                errmap |= DP_RX_MPDU_ERR_FCS;
 123
 124        if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
 125                errmap |= DP_RX_MPDU_ERR_DECRYPT;
 126
 127        if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
 128                errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
 129
 130        if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
 131                errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
 132
 133        if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
 134                errmap |= DP_RX_MPDU_ERR_OVERFLOW;
 135
 136        if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
 137                errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
 138
 139        if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
 140                errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
 141
 142        return errmap;
 143}
 144
 145static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
 146                                              struct hal_rx_desc *desc)
 147{
 148        return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
 149}
 150
 151static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
 152                                        struct hal_rx_desc *desc)
 153{
 154        return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
 155}
 156
 157static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
 158                                             struct hal_rx_desc *desc)
 159{
 160        return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
 161}
 162
 163static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
 164                                          struct hal_rx_desc *desc)
 165{
 166        return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
 167}
 168
 169static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
 170                                          struct hal_rx_desc *desc)
 171{
 172        return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
 173}
 174
 175static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
 176                                             struct hal_rx_desc *desc)
 177{
 178        return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
 179}
 180
 181static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
 182                                        struct hal_rx_desc *desc)
 183{
 184        return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
 185}
 186
 187static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
 188                                        struct hal_rx_desc *desc)
 189{
 190        return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
 191}
 192
 193static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
 194                                             struct hal_rx_desc *desc)
 195{
 196        return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
 197}
 198
 199static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
 200                                        struct hal_rx_desc *desc)
 201{
 202        return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
 203}
 204
 205static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
 206                                               struct hal_rx_desc *desc)
 207{
 208        return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
 209}
 210
 211static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
 212                                              struct hal_rx_desc *desc)
 213{
 214        return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
 215}
 216
 217static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
 218                                           struct hal_rx_desc *fdesc,
 219                                           struct hal_rx_desc *ldesc)
 220{
 221        ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
 222}
 223
 224static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
 225{
 226        return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
 227                         __le32_to_cpu(attn->info1));
 228}
 229
 230static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
 231                                         struct hal_rx_desc *rx_desc)
 232{
 233        u8 *rx_pkt_hdr;
 234
 235        rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
 236
 237        return rx_pkt_hdr;
 238}
 239
 240static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
 241                                        struct hal_rx_desc *rx_desc)
 242{
 243        u32 tlv_tag;
 244
 245        tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
 246
 247        return tlv_tag == HAL_RX_MPDU_START;
 248}
 249
 250static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
 251                                       struct hal_rx_desc *rx_desc)
 252{
 253        return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
 254}
 255
 256static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
 257                                          struct hal_rx_desc *desc,
 258                                          u16 len)
 259{
 260        ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
 261}
 262
 263static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
 264                                        struct hal_rx_desc *desc)
 265{
 266        struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
 267
 268        return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
 269                (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
 270                 __le32_to_cpu(attn->info1)));
 271}
 272
 273static void ath11k_dp_service_mon_ring(struct timer_list *t)
 274{
 275        struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
 276        int i;
 277
 278        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
 279                ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
 280
 281        mod_timer(&ab->mon_reap_timer, jiffies +
 282                  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
 283}
 284
 285static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
 286{
 287        int i, reaped = 0;
 288        unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
 289
 290        do {
 291                for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
 292                        reaped += ath11k_dp_rx_process_mon_rings(ab, i,
 293                                                                 NULL,
 294                                                                 DP_MON_SERVICE_BUDGET);
 295
 296                /* nothing more to reap */
 297                if (reaped < DP_MON_SERVICE_BUDGET)
 298                        return 0;
 299
 300        } while (time_before(jiffies, timeout));
 301
 302        ath11k_warn(ab, "dp mon ring purge timeout");
 303
 304        return -ETIMEDOUT;
 305}
 306
 307/* Returns number of Rx buffers replenished */
 308int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
 309                               struct dp_rxdma_ring *rx_ring,
 310                               int req_entries,
 311                               enum hal_rx_buf_return_buf_manager mgr)
 312{
 313        struct hal_srng *srng;
 314        u32 *desc;
 315        struct sk_buff *skb;
 316        int num_free;
 317        int num_remain;
 318        int buf_id;
 319        u32 cookie;
 320        dma_addr_t paddr;
 321
 322        req_entries = min(req_entries, rx_ring->bufs_max);
 323
 324        srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
 325
 326        spin_lock_bh(&srng->lock);
 327
 328        ath11k_hal_srng_access_begin(ab, srng);
 329
 330        num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
 331        if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
 332                req_entries = num_free;
 333
 334        req_entries = min(num_free, req_entries);
 335        num_remain = req_entries;
 336
 337        while (num_remain > 0) {
 338                skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
 339                                    DP_RX_BUFFER_ALIGN_SIZE);
 340                if (!skb)
 341                        break;
 342
 343                if (!IS_ALIGNED((unsigned long)skb->data,
 344                                DP_RX_BUFFER_ALIGN_SIZE)) {
 345                        skb_pull(skb,
 346                                 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
 347                                 skb->data);
 348                }
 349
 350                paddr = dma_map_single(ab->dev, skb->data,
 351                                       skb->len + skb_tailroom(skb),
 352                                       DMA_FROM_DEVICE);
 353                if (dma_mapping_error(ab->dev, paddr))
 354                        goto fail_free_skb;
 355
 356                spin_lock_bh(&rx_ring->idr_lock);
 357                buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
 358                                   rx_ring->bufs_max * 3, GFP_ATOMIC);
 359                spin_unlock_bh(&rx_ring->idr_lock);
 360                if (buf_id < 0)
 361                        goto fail_dma_unmap;
 362
 363                desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
 364                if (!desc)
 365                        goto fail_idr_remove;
 366
 367                ATH11K_SKB_RXCB(skb)->paddr = paddr;
 368
 369                cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
 370                         FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
 371
 372                num_remain--;
 373
 374                ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
 375        }
 376
 377        ath11k_hal_srng_access_end(ab, srng);
 378
 379        spin_unlock_bh(&srng->lock);
 380
 381        return req_entries - num_remain;
 382
 383fail_idr_remove:
 384        spin_lock_bh(&rx_ring->idr_lock);
 385        idr_remove(&rx_ring->bufs_idr, buf_id);
 386        spin_unlock_bh(&rx_ring->idr_lock);
 387fail_dma_unmap:
 388        dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
 389                         DMA_FROM_DEVICE);
 390fail_free_skb:
 391        dev_kfree_skb_any(skb);
 392
 393        ath11k_hal_srng_access_end(ab, srng);
 394
 395        spin_unlock_bh(&srng->lock);
 396
 397        return req_entries - num_remain;
 398}
 399
 400static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
 401                                         struct dp_rxdma_ring *rx_ring)
 402{
 403        struct ath11k_pdev_dp *dp = &ar->dp;
 404        struct sk_buff *skb;
 405        int buf_id;
 406
 407        spin_lock_bh(&rx_ring->idr_lock);
 408        idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
 409                idr_remove(&rx_ring->bufs_idr, buf_id);
 410                /* TODO: Understand where internal driver does this dma_unmap
 411                 * of rxdma_buffer.
 412                 */
 413                dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
 414                                 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
 415                dev_kfree_skb_any(skb);
 416        }
 417
 418        idr_destroy(&rx_ring->bufs_idr);
 419        spin_unlock_bh(&rx_ring->idr_lock);
 420
 421        /* if rxdma1_enable is false, mon_status_refill_ring
 422         * isn't setup, so don't clean.
 423         */
 424        if (!ar->ab->hw_params.rxdma1_enable)
 425                return 0;
 426
 427        rx_ring = &dp->rx_mon_status_refill_ring[0];
 428
 429        spin_lock_bh(&rx_ring->idr_lock);
 430        idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
 431                idr_remove(&rx_ring->bufs_idr, buf_id);
 432                /* XXX: Understand where internal driver does this dma_unmap
 433                 * of rxdma_buffer.
 434                 */
 435                dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
 436                                 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
 437                dev_kfree_skb_any(skb);
 438        }
 439
 440        idr_destroy(&rx_ring->bufs_idr);
 441        spin_unlock_bh(&rx_ring->idr_lock);
 442
 443        return 0;
 444}
 445
 446static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
 447{
 448        struct ath11k_pdev_dp *dp = &ar->dp;
 449        struct ath11k_base *ab = ar->ab;
 450        struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 451        int i;
 452
 453        ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
 454
 455        rx_ring = &dp->rxdma_mon_buf_ring;
 456        ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
 457
 458        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 459                rx_ring = &dp->rx_mon_status_refill_ring[i];
 460                ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
 461        }
 462
 463        return 0;
 464}
 465
 466static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
 467                                          struct dp_rxdma_ring *rx_ring,
 468                                          u32 ringtype)
 469{
 470        struct ath11k_pdev_dp *dp = &ar->dp;
 471        int num_entries;
 472
 473        num_entries = rx_ring->refill_buf_ring.size /
 474                ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
 475
 476        rx_ring->bufs_max = num_entries;
 477        ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
 478                                   HAL_RX_BUF_RBM_SW3_BM);
 479        return 0;
 480}
 481
 482static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
 483{
 484        struct ath11k_pdev_dp *dp = &ar->dp;
 485        struct ath11k_base *ab = ar->ab;
 486        struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 487        int i;
 488
 489        ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
 490
 491        if (ar->ab->hw_params.rxdma1_enable) {
 492                rx_ring = &dp->rxdma_mon_buf_ring;
 493                ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
 494        }
 495
 496        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 497                rx_ring = &dp->rx_mon_status_refill_ring[i];
 498                ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
 499        }
 500
 501        return 0;
 502}
 503
 504static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
 505{
 506        struct ath11k_pdev_dp *dp = &ar->dp;
 507        struct ath11k_base *ab = ar->ab;
 508        int i;
 509
 510        ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
 511
 512        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 513                if (ab->hw_params.rx_mac_buf_ring)
 514                        ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
 515
 516                ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
 517                ath11k_dp_srng_cleanup(ab,
 518                                       &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
 519        }
 520
 521        ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
 522}
 523
 524void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
 525{
 526        struct ath11k_dp *dp = &ab->dp;
 527        int i;
 528
 529        for (i = 0; i < DP_REO_DST_RING_MAX; i++)
 530                ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
 531}
 532
 533int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
 534{
 535        struct ath11k_dp *dp = &ab->dp;
 536        int ret;
 537        int i;
 538
 539        for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
 540                ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
 541                                           HAL_REO_DST, i, 0,
 542                                           DP_REO_DST_RING_SIZE);
 543                if (ret) {
 544                        ath11k_warn(ab, "failed to setup reo_dst_ring\n");
 545                        goto err_reo_cleanup;
 546                }
 547        }
 548
 549        return 0;
 550
 551err_reo_cleanup:
 552        ath11k_dp_pdev_reo_cleanup(ab);
 553
 554        return ret;
 555}
 556
 557static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
 558{
 559        struct ath11k_pdev_dp *dp = &ar->dp;
 560        struct ath11k_base *ab = ar->ab;
 561        struct dp_srng *srng = NULL;
 562        int i;
 563        int ret;
 564
 565        ret = ath11k_dp_srng_setup(ar->ab,
 566                                   &dp->rx_refill_buf_ring.refill_buf_ring,
 567                                   HAL_RXDMA_BUF, 0,
 568                                   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
 569        if (ret) {
 570                ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
 571                return ret;
 572        }
 573
 574        if (ar->ab->hw_params.rx_mac_buf_ring) {
 575                for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 576                        ret = ath11k_dp_srng_setup(ar->ab,
 577                                                   &dp->rx_mac_buf_ring[i],
 578                                                   HAL_RXDMA_BUF, 1,
 579                                                   dp->mac_id + i, 1024);
 580                        if (ret) {
 581                                ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
 582                                            i);
 583                                return ret;
 584                        }
 585                }
 586        }
 587
 588        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 589                ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
 590                                           HAL_RXDMA_DST, 0, dp->mac_id + i,
 591                                           DP_RXDMA_ERR_DST_RING_SIZE);
 592                if (ret) {
 593                        ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
 594                        return ret;
 595                }
 596        }
 597
 598        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
 599                srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
 600                ret = ath11k_dp_srng_setup(ar->ab,
 601                                           srng,
 602                                           HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
 603                                           DP_RXDMA_MON_STATUS_RING_SIZE);
 604                if (ret) {
 605                        ath11k_warn(ar->ab,
 606                                    "failed to setup rx_mon_status_refill_ring %d\n", i);
 607                        return ret;
 608                }
 609        }
 610
 611        /* if rxdma1_enable is false, then it doesn't need
 612         * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
 613         * and rxdma_mon_desc_ring.
 614         * init reap timer for QCA6390.
 615         */
 616        if (!ar->ab->hw_params.rxdma1_enable) {
 617                //init mon status buffer reap timer
 618                timer_setup(&ar->ab->mon_reap_timer,
 619                            ath11k_dp_service_mon_ring, 0);
 620                return 0;
 621        }
 622
 623        ret = ath11k_dp_srng_setup(ar->ab,
 624                                   &dp->rxdma_mon_buf_ring.refill_buf_ring,
 625                                   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
 626                                   DP_RXDMA_MONITOR_BUF_RING_SIZE);
 627        if (ret) {
 628                ath11k_warn(ar->ab,
 629                            "failed to setup HAL_RXDMA_MONITOR_BUF\n");
 630                return ret;
 631        }
 632
 633        ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
 634                                   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
 635                                   DP_RXDMA_MONITOR_DST_RING_SIZE);
 636        if (ret) {
 637                ath11k_warn(ar->ab,
 638                            "failed to setup HAL_RXDMA_MONITOR_DST\n");
 639                return ret;
 640        }
 641
 642        ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
 643                                   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
 644                                   DP_RXDMA_MONITOR_DESC_RING_SIZE);
 645        if (ret) {
 646                ath11k_warn(ar->ab,
 647                            "failed to setup HAL_RXDMA_MONITOR_DESC\n");
 648                return ret;
 649        }
 650
 651        return 0;
 652}
 653
 654void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
 655{
 656        struct ath11k_dp *dp = &ab->dp;
 657        struct dp_reo_cmd *cmd, *tmp;
 658        struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
 659
 660        spin_lock_bh(&dp->reo_cmd_lock);
 661        list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
 662                list_del(&cmd->list);
 663                dma_unmap_single(ab->dev, cmd->data.paddr,
 664                                 cmd->data.size, DMA_BIDIRECTIONAL);
 665                kfree(cmd->data.vaddr);
 666                kfree(cmd);
 667        }
 668
 669        list_for_each_entry_safe(cmd_cache, tmp_cache,
 670                                 &dp->reo_cmd_cache_flush_list, list) {
 671                list_del(&cmd_cache->list);
 672                dp->reo_cmd_cache_flush_count--;
 673                dma_unmap_single(ab->dev, cmd_cache->data.paddr,
 674                                 cmd_cache->data.size, DMA_BIDIRECTIONAL);
 675                kfree(cmd_cache->data.vaddr);
 676                kfree(cmd_cache);
 677        }
 678        spin_unlock_bh(&dp->reo_cmd_lock);
 679}
 680
 681static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
 682                                   enum hal_reo_cmd_status status)
 683{
 684        struct dp_rx_tid *rx_tid = ctx;
 685
 686        if (status != HAL_REO_CMD_SUCCESS)
 687                ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
 688                            rx_tid->tid, status);
 689
 690        dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
 691                         DMA_BIDIRECTIONAL);
 692        kfree(rx_tid->vaddr);
 693}
 694
 695static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
 696                                      struct dp_rx_tid *rx_tid)
 697{
 698        struct ath11k_hal_reo_cmd cmd = {0};
 699        unsigned long tot_desc_sz, desc_sz;
 700        int ret;
 701
 702        tot_desc_sz = rx_tid->size;
 703        desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
 704
 705        while (tot_desc_sz > desc_sz) {
 706                tot_desc_sz -= desc_sz;
 707                cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
 708                cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 709                ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
 710                                                HAL_REO_CMD_FLUSH_CACHE, &cmd,
 711                                                NULL);
 712                if (ret)
 713                        ath11k_warn(ab,
 714                                    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
 715                                    rx_tid->tid, ret);
 716        }
 717
 718        memset(&cmd, 0, sizeof(cmd));
 719        cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 720        cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 721        cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
 722        ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
 723                                        HAL_REO_CMD_FLUSH_CACHE,
 724                                        &cmd, ath11k_dp_reo_cmd_free);
 725        if (ret) {
 726                ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
 727                           rx_tid->tid, ret);
 728                dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 729                                 DMA_BIDIRECTIONAL);
 730                kfree(rx_tid->vaddr);
 731        }
 732}
 733
 734static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
 735                                      enum hal_reo_cmd_status status)
 736{
 737        struct ath11k_base *ab = dp->ab;
 738        struct dp_rx_tid *rx_tid = ctx;
 739        struct dp_reo_cache_flush_elem *elem, *tmp;
 740
 741        if (status == HAL_REO_CMD_DRAIN) {
 742                goto free_desc;
 743        } else if (status != HAL_REO_CMD_SUCCESS) {
 744                /* Shouldn't happen! Cleanup in case of other failure? */
 745                ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
 746                            rx_tid->tid, status);
 747                return;
 748        }
 749
 750        elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
 751        if (!elem)
 752                goto free_desc;
 753
 754        elem->ts = jiffies;
 755        memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
 756
 757        spin_lock_bh(&dp->reo_cmd_lock);
 758        list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
 759        dp->reo_cmd_cache_flush_count++;
 760
 761        /* Flush and invalidate aged REO desc from HW cache */
 762        list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
 763                                 list) {
 764                if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
 765                    time_after(jiffies, elem->ts +
 766                               msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
 767                        list_del(&elem->list);
 768                        dp->reo_cmd_cache_flush_count--;
 769                        spin_unlock_bh(&dp->reo_cmd_lock);
 770
 771                        ath11k_dp_reo_cache_flush(ab, &elem->data);
 772                        kfree(elem);
 773                        spin_lock_bh(&dp->reo_cmd_lock);
 774                }
 775        }
 776        spin_unlock_bh(&dp->reo_cmd_lock);
 777
 778        return;
 779free_desc:
 780        dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 781                         DMA_BIDIRECTIONAL);
 782        kfree(rx_tid->vaddr);
 783}
 784
 785void ath11k_peer_rx_tid_delete(struct ath11k *ar,
 786                               struct ath11k_peer *peer, u8 tid)
 787{
 788        struct ath11k_hal_reo_cmd cmd = {0};
 789        struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 790        int ret;
 791
 792        if (!rx_tid->active)
 793                return;
 794
 795        cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 796        cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 797        cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 798        cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
 799        ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
 800                                        HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 801                                        ath11k_dp_rx_tid_del_func);
 802        if (ret) {
 803                ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
 804                           tid, ret);
 805                dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
 806                                 DMA_BIDIRECTIONAL);
 807                kfree(rx_tid->vaddr);
 808        }
 809
 810        rx_tid->active = false;
 811}
 812
 813static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
 814                                         u32 *link_desc,
 815                                         enum hal_wbm_rel_bm_act action)
 816{
 817        struct ath11k_dp *dp = &ab->dp;
 818        struct hal_srng *srng;
 819        u32 *desc;
 820        int ret = 0;
 821
 822        srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
 823
 824        spin_lock_bh(&srng->lock);
 825
 826        ath11k_hal_srng_access_begin(ab, srng);
 827
 828        desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
 829        if (!desc) {
 830                ret = -ENOBUFS;
 831                goto exit;
 832        }
 833
 834        ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
 835                                         action);
 836
 837exit:
 838        ath11k_hal_srng_access_end(ab, srng);
 839
 840        spin_unlock_bh(&srng->lock);
 841
 842        return ret;
 843}
 844
 845static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
 846{
 847        struct ath11k_base *ab = rx_tid->ab;
 848
 849        lockdep_assert_held(&ab->base_lock);
 850
 851        if (rx_tid->dst_ring_desc) {
 852                if (rel_link_desc)
 853                        ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
 854                                                      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
 855                kfree(rx_tid->dst_ring_desc);
 856                rx_tid->dst_ring_desc = NULL;
 857        }
 858
 859        rx_tid->cur_sn = 0;
 860        rx_tid->last_frag_no = 0;
 861        rx_tid->rx_frag_bitmap = 0;
 862        __skb_queue_purge(&rx_tid->rx_frags);
 863}
 864
 865void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
 866{
 867        struct dp_rx_tid *rx_tid;
 868        int i;
 869
 870        lockdep_assert_held(&ar->ab->base_lock);
 871
 872        for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 873                rx_tid = &peer->rx_tid[i];
 874
 875                spin_unlock_bh(&ar->ab->base_lock);
 876                del_timer_sync(&rx_tid->frag_timer);
 877                spin_lock_bh(&ar->ab->base_lock);
 878
 879                ath11k_dp_rx_frags_cleanup(rx_tid, true);
 880        }
 881}
 882
 883void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
 884{
 885        struct dp_rx_tid *rx_tid;
 886        int i;
 887
 888        lockdep_assert_held(&ar->ab->base_lock);
 889
 890        for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 891                rx_tid = &peer->rx_tid[i];
 892
 893                ath11k_peer_rx_tid_delete(ar, peer, i);
 894                ath11k_dp_rx_frags_cleanup(rx_tid, true);
 895
 896                spin_unlock_bh(&ar->ab->base_lock);
 897                del_timer_sync(&rx_tid->frag_timer);
 898                spin_lock_bh(&ar->ab->base_lock);
 899        }
 900}
 901
 902static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
 903                                         struct ath11k_peer *peer,
 904                                         struct dp_rx_tid *rx_tid,
 905                                         u32 ba_win_sz, u16 ssn,
 906                                         bool update_ssn)
 907{
 908        struct ath11k_hal_reo_cmd cmd = {0};
 909        int ret;
 910
 911        cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 912        cmd.addr_hi = upper_32_bits(rx_tid->paddr);
 913        cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 914        cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
 915        cmd.ba_window_size = ba_win_sz;
 916
 917        if (update_ssn) {
 918                cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
 919                cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
 920        }
 921
 922        ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
 923                                        HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
 924                                        NULL);
 925        if (ret) {
 926                ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
 927                            rx_tid->tid, ret);
 928                return ret;
 929        }
 930
 931        rx_tid->ba_win_sz = ba_win_sz;
 932
 933        return 0;
 934}
 935
 936static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
 937                                      const u8 *peer_mac, int vdev_id, u8 tid)
 938{
 939        struct ath11k_peer *peer;
 940        struct dp_rx_tid *rx_tid;
 941
 942        spin_lock_bh(&ab->base_lock);
 943
 944        peer = ath11k_peer_find(ab, vdev_id, peer_mac);
 945        if (!peer) {
 946                ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
 947                goto unlock_exit;
 948        }
 949
 950        rx_tid = &peer->rx_tid[tid];
 951        if (!rx_tid->active)
 952                goto unlock_exit;
 953
 954        dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
 955                         DMA_BIDIRECTIONAL);
 956        kfree(rx_tid->vaddr);
 957
 958        rx_tid->active = false;
 959
 960unlock_exit:
 961        spin_unlock_bh(&ab->base_lock);
 962}
 963
 964int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
 965                             u8 tid, u32 ba_win_sz, u16 ssn,
 966                             enum hal_pn_type pn_type)
 967{
 968        struct ath11k_base *ab = ar->ab;
 969        struct ath11k_peer *peer;
 970        struct dp_rx_tid *rx_tid;
 971        u32 hw_desc_sz;
 972        u32 *addr_aligned;
 973        void *vaddr;
 974        dma_addr_t paddr;
 975        int ret;
 976
 977        spin_lock_bh(&ab->base_lock);
 978
 979        peer = ath11k_peer_find(ab, vdev_id, peer_mac);
 980        if (!peer) {
 981                ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
 982                spin_unlock_bh(&ab->base_lock);
 983                return -ENOENT;
 984        }
 985
 986        rx_tid = &peer->rx_tid[tid];
 987        /* Update the tid queue if it is already setup */
 988        if (rx_tid->active) {
 989                paddr = rx_tid->paddr;
 990                ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
 991                                                    ba_win_sz, ssn, true);
 992                spin_unlock_bh(&ab->base_lock);
 993                if (ret) {
 994                        ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
 995                        return ret;
 996                }
 997
 998                ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
 999                                                             peer_mac, paddr,
1000                                                             tid, 1, ba_win_sz);
1001                if (ret)
1002                        ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
1003                                    tid, ret);
1004                return ret;
1005        }
1006
1007        rx_tid->tid = tid;
1008
1009        rx_tid->ba_win_sz = ba_win_sz;
1010
1011        /* TODO: Optimize the memory allocation for qos tid based on
1012         * the actual BA window size in REO tid update path.
1013         */
1014        if (tid == HAL_DESC_REO_NON_QOS_TID)
1015                hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1016        else
1017                hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1018
1019        vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1020        if (!vaddr) {
1021                spin_unlock_bh(&ab->base_lock);
1022                return -ENOMEM;
1023        }
1024
1025        addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1026
1027        ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1028                                   ssn, pn_type);
1029
1030        paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1031                               DMA_BIDIRECTIONAL);
1032
1033        ret = dma_mapping_error(ab->dev, paddr);
1034        if (ret) {
1035                spin_unlock_bh(&ab->base_lock);
1036                goto err_mem_free;
1037        }
1038
1039        rx_tid->vaddr = vaddr;
1040        rx_tid->paddr = paddr;
1041        rx_tid->size = hw_desc_sz;
1042        rx_tid->active = true;
1043
1044        spin_unlock_bh(&ab->base_lock);
1045
1046        ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1047                                                     paddr, tid, 1, ba_win_sz);
1048        if (ret) {
1049                ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
1050                            tid, ret);
1051                ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1052        }
1053
1054        return ret;
1055
1056err_mem_free:
1057        kfree(vaddr);
1058
1059        return ret;
1060}
1061
1062int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1063                             struct ieee80211_ampdu_params *params)
1064{
1065        struct ath11k_base *ab = ar->ab;
1066        struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1067        int vdev_id = arsta->arvif->vdev_id;
1068        int ret;
1069
1070        ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1071                                       params->tid, params->buf_size,
1072                                       params->ssn, arsta->pn_type);
1073        if (ret)
1074                ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1075
1076        return ret;
1077}
1078
1079int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1080                            struct ieee80211_ampdu_params *params)
1081{
1082        struct ath11k_base *ab = ar->ab;
1083        struct ath11k_peer *peer;
1084        struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1085        int vdev_id = arsta->arvif->vdev_id;
1086        dma_addr_t paddr;
1087        bool active;
1088        int ret;
1089
1090        spin_lock_bh(&ab->base_lock);
1091
1092        peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1093        if (!peer) {
1094                ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1095                spin_unlock_bh(&ab->base_lock);
1096                return -ENOENT;
1097        }
1098
1099        paddr = peer->rx_tid[params->tid].paddr;
1100        active = peer->rx_tid[params->tid].active;
1101
1102        if (!active) {
1103                spin_unlock_bh(&ab->base_lock);
1104                return 0;
1105        }
1106
1107        ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1108        spin_unlock_bh(&ab->base_lock);
1109        if (ret) {
1110                ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1111                            params->tid, ret);
1112                return ret;
1113        }
1114
1115        ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1116                                                     params->sta->addr, paddr,
1117                                                     params->tid, 1, 1);
1118        if (ret)
1119                ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1120                            ret);
1121
1122        return ret;
1123}
1124
1125int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1126                                       const u8 *peer_addr,
1127                                       enum set_key_cmd key_cmd,
1128                                       struct ieee80211_key_conf *key)
1129{
1130        struct ath11k *ar = arvif->ar;
1131        struct ath11k_base *ab = ar->ab;
1132        struct ath11k_hal_reo_cmd cmd = {0};
1133        struct ath11k_peer *peer;
1134        struct dp_rx_tid *rx_tid;
1135        u8 tid;
1136        int ret = 0;
1137
1138        /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1139         * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1140         * for now.
1141         */
1142        if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1143                return 0;
1144
1145        cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1146        cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1147                    HAL_REO_CMD_UPD0_PN_SIZE |
1148                    HAL_REO_CMD_UPD0_PN_VALID |
1149                    HAL_REO_CMD_UPD0_PN_CHECK |
1150                    HAL_REO_CMD_UPD0_SVLD;
1151
1152        switch (key->cipher) {
1153        case WLAN_CIPHER_SUITE_TKIP:
1154        case WLAN_CIPHER_SUITE_CCMP:
1155        case WLAN_CIPHER_SUITE_CCMP_256:
1156        case WLAN_CIPHER_SUITE_GCMP:
1157        case WLAN_CIPHER_SUITE_GCMP_256:
1158                if (key_cmd == SET_KEY) {
1159                        cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1160                        cmd.pn_size = 48;
1161                }
1162                break;
1163        default:
1164                break;
1165        }
1166
1167        spin_lock_bh(&ab->base_lock);
1168
1169        peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1170        if (!peer) {
1171                ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1172                spin_unlock_bh(&ab->base_lock);
1173                return -ENOENT;
1174        }
1175
1176        for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1177                rx_tid = &peer->rx_tid[tid];
1178                if (!rx_tid->active)
1179                        continue;
1180                cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1181                cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1182                ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1183                                                HAL_REO_CMD_UPDATE_RX_QUEUE,
1184                                                &cmd, NULL);
1185                if (ret) {
1186                        ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1187                                    tid, ret);
1188                        break;
1189                }
1190        }
1191
1192        spin_unlock_bh(&ab->base_lock);
1193
1194        return ret;
1195}
1196
1197static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1198                                             u16 peer_id)
1199{
1200        int i;
1201
1202        for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1203                if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1204                        if (peer_id == ppdu_stats->user_stats[i].peer_id)
1205                                return i;
1206                } else {
1207                        return i;
1208                }
1209        }
1210
1211        return -EINVAL;
1212}
1213
1214static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1215                                           u16 tag, u16 len, const void *ptr,
1216                                           void *data)
1217{
1218        struct htt_ppdu_stats_info *ppdu_info;
1219        struct htt_ppdu_user_stats *user_stats;
1220        int cur_user;
1221        u16 peer_id;
1222
1223        ppdu_info = (struct htt_ppdu_stats_info *)data;
1224
1225        switch (tag) {
1226        case HTT_PPDU_STATS_TAG_COMMON:
1227                if (len < sizeof(struct htt_ppdu_stats_common)) {
1228                        ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1229                                    len, tag);
1230                        return -EINVAL;
1231                }
1232                memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1233                       sizeof(struct htt_ppdu_stats_common));
1234                break;
1235        case HTT_PPDU_STATS_TAG_USR_RATE:
1236                if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1237                        ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1238                                    len, tag);
1239                        return -EINVAL;
1240                }
1241
1242                peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1243                cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1244                                                      peer_id);
1245                if (cur_user < 0)
1246                        return -EINVAL;
1247                user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1248                user_stats->peer_id = peer_id;
1249                user_stats->is_valid_peer_id = true;
1250                memcpy((void *)&user_stats->rate, ptr,
1251                       sizeof(struct htt_ppdu_stats_user_rate));
1252                user_stats->tlv_flags |= BIT(tag);
1253                break;
1254        case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1255                if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1256                        ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1257                                    len, tag);
1258                        return -EINVAL;
1259                }
1260
1261                peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1262                cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1263                                                      peer_id);
1264                if (cur_user < 0)
1265                        return -EINVAL;
1266                user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1267                user_stats->peer_id = peer_id;
1268                user_stats->is_valid_peer_id = true;
1269                memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1270                       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1271                user_stats->tlv_flags |= BIT(tag);
1272                break;
1273        case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1274                if (len <
1275                    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1276                        ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1277                                    len, tag);
1278                        return -EINVAL;
1279                }
1280
1281                peer_id =
1282                ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1283                cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1284                                                      peer_id);
1285                if (cur_user < 0)
1286                        return -EINVAL;
1287                user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1288                user_stats->peer_id = peer_id;
1289                user_stats->is_valid_peer_id = true;
1290                memcpy((void *)&user_stats->ack_ba, ptr,
1291                       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1292                user_stats->tlv_flags |= BIT(tag);
1293                break;
1294        }
1295        return 0;
1296}
1297
1298int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1299                           int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1300                                       const void *ptr, void *data),
1301                           void *data)
1302{
1303        const struct htt_tlv *tlv;
1304        const void *begin = ptr;
1305        u16 tlv_tag, tlv_len;
1306        int ret = -EINVAL;
1307
1308        while (len > 0) {
1309                if (len < sizeof(*tlv)) {
1310                        ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1311                                   ptr - begin, len, sizeof(*tlv));
1312                        return -EINVAL;
1313                }
1314                tlv = (struct htt_tlv *)ptr;
1315                tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1316                tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1317                ptr += sizeof(*tlv);
1318                len -= sizeof(*tlv);
1319
1320                if (tlv_len > len) {
1321                        ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1322                                   tlv_tag, ptr - begin, len, tlv_len);
1323                        return -EINVAL;
1324                }
1325                ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1326                if (ret == -ENOMEM)
1327                        return ret;
1328
1329                ptr += tlv_len;
1330                len -= tlv_len;
1331        }
1332        return 0;
1333}
1334
1335static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
1336{
1337        u32 ret = 0;
1338
1339        switch (sgi) {
1340        case RX_MSDU_START_SGI_0_8_US:
1341                ret = NL80211_RATE_INFO_HE_GI_0_8;
1342                break;
1343        case RX_MSDU_START_SGI_1_6_US:
1344                ret = NL80211_RATE_INFO_HE_GI_1_6;
1345                break;
1346        case RX_MSDU_START_SGI_3_2_US:
1347                ret = NL80211_RATE_INFO_HE_GI_3_2;
1348                break;
1349        }
1350
1351        return ret;
1352}
1353
1354static void
1355ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1356                                struct htt_ppdu_stats *ppdu_stats, u8 user)
1357{
1358        struct ath11k_base *ab = ar->ab;
1359        struct ath11k_peer *peer;
1360        struct ieee80211_sta *sta;
1361        struct ath11k_sta *arsta;
1362        struct htt_ppdu_stats_user_rate *user_rate;
1363        struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1364        struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1365        struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1366        int ret;
1367        u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1368        u32 succ_bytes = 0;
1369        u16 rate = 0, succ_pkts = 0;
1370        u32 tx_duration = 0;
1371        u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1372        bool is_ampdu = false;
1373
1374        if (!usr_stats)
1375                return;
1376
1377        if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1378                return;
1379
1380        if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1381                is_ampdu =
1382                        HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1383
1384        if (usr_stats->tlv_flags &
1385            BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1386                succ_bytes = usr_stats->ack_ba.success_bytes;
1387                succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1388                                      usr_stats->ack_ba.info);
1389                tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1390                                usr_stats->ack_ba.info);
1391        }
1392
1393        if (common->fes_duration_us)
1394                tx_duration = common->fes_duration_us;
1395
1396        user_rate = &usr_stats->rate;
1397        flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1398        bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1399        nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1400        mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1401        sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1402        dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1403
1404        /* Note: If host configured fixed rates and in some other special
1405         * cases, the broadcast/management frames are sent in different rates.
1406         * Firmware rate's control to be skipped for this?
1407         */
1408
1409        if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1410                ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1411                return;
1412        }
1413
1414        if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1415                ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1416                return;
1417        }
1418
1419        if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1420                ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1421                            mcs, nss);
1422                return;
1423        }
1424
1425        if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1426                ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1427                                                            flags,
1428                                                            &rate_idx,
1429                                                            &rate);
1430                if (ret < 0)
1431                        return;
1432        }
1433
1434        rcu_read_lock();
1435        spin_lock_bh(&ab->base_lock);
1436        peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1437
1438        if (!peer || !peer->sta) {
1439                spin_unlock_bh(&ab->base_lock);
1440                rcu_read_unlock();
1441                return;
1442        }
1443
1444        sta = peer->sta;
1445        arsta = (struct ath11k_sta *)sta->drv_priv;
1446
1447        memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1448
1449        switch (flags) {
1450        case WMI_RATE_PREAMBLE_OFDM:
1451                arsta->txrate.legacy = rate;
1452                break;
1453        case WMI_RATE_PREAMBLE_CCK:
1454                arsta->txrate.legacy = rate;
1455                break;
1456        case WMI_RATE_PREAMBLE_HT:
1457                arsta->txrate.mcs = mcs + 8 * (nss - 1);
1458                arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1459                if (sgi)
1460                        arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1461                break;
1462        case WMI_RATE_PREAMBLE_VHT:
1463                arsta->txrate.mcs = mcs;
1464                arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1465                if (sgi)
1466                        arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1467                break;
1468        case WMI_RATE_PREAMBLE_HE:
1469                arsta->txrate.mcs = mcs;
1470                arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1471                arsta->txrate.he_dcm = dcm;
1472                arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
1473                arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
1474                                                (user_rate->ru_end -
1475                                                 user_rate->ru_start) + 1);
1476                break;
1477        }
1478
1479        arsta->txrate.nss = nss;
1480        arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1481        arsta->tx_duration += tx_duration;
1482        memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1483
1484        /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1485         * So skip peer stats update for mgmt packets.
1486         */
1487        if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1488                memset(peer_stats, 0, sizeof(*peer_stats));
1489                peer_stats->succ_pkts = succ_pkts;
1490                peer_stats->succ_bytes = succ_bytes;
1491                peer_stats->is_ampdu = is_ampdu;
1492                peer_stats->duration = tx_duration;
1493                peer_stats->ba_fails =
1494                        HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1495                        HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1496
1497                if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1498                        ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1499        }
1500
1501        spin_unlock_bh(&ab->base_lock);
1502        rcu_read_unlock();
1503}
1504
1505static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1506                                         struct htt_ppdu_stats *ppdu_stats)
1507{
1508        u8 user;
1509
1510        for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1511                ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1512}
1513
1514static
1515struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1516                                                        u32 ppdu_id)
1517{
1518        struct htt_ppdu_stats_info *ppdu_info;
1519
1520        spin_lock_bh(&ar->data_lock);
1521        if (!list_empty(&ar->ppdu_stats_info)) {
1522                list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1523                        if (ppdu_info->ppdu_id == ppdu_id) {
1524                                spin_unlock_bh(&ar->data_lock);
1525                                return ppdu_info;
1526                        }
1527                }
1528
1529                if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1530                        ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1531                                                     typeof(*ppdu_info), list);
1532                        list_del(&ppdu_info->list);
1533                        ar->ppdu_stat_list_depth--;
1534                        ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1535                        kfree(ppdu_info);
1536                }
1537        }
1538        spin_unlock_bh(&ar->data_lock);
1539
1540        ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1541        if (!ppdu_info)
1542                return NULL;
1543
1544        spin_lock_bh(&ar->data_lock);
1545        list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1546        ar->ppdu_stat_list_depth++;
1547        spin_unlock_bh(&ar->data_lock);
1548
1549        return ppdu_info;
1550}
1551
1552static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1553                                      struct sk_buff *skb)
1554{
1555        struct ath11k_htt_ppdu_stats_msg *msg;
1556        struct htt_ppdu_stats_info *ppdu_info;
1557        struct ath11k *ar;
1558        int ret;
1559        u8 pdev_id;
1560        u32 ppdu_id, len;
1561
1562        msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1563        len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1564        pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1565        ppdu_id = msg->ppdu_id;
1566
1567        rcu_read_lock();
1568        ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1569        if (!ar) {
1570                ret = -EINVAL;
1571                goto exit;
1572        }
1573
1574        if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1575                trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1576
1577        ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1578        if (!ppdu_info) {
1579                ret = -EINVAL;
1580                goto exit;
1581        }
1582
1583        ppdu_info->ppdu_id = ppdu_id;
1584        ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1585                                     ath11k_htt_tlv_ppdu_stats_parse,
1586                                     (void *)ppdu_info);
1587        if (ret) {
1588                ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1589                goto exit;
1590        }
1591
1592exit:
1593        rcu_read_unlock();
1594
1595        return ret;
1596}
1597
1598static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1599{
1600        struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1601        struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1602        struct ath11k *ar;
1603        u8 pdev_id;
1604
1605        pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1606        ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1607        if (!ar) {
1608                ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1609                return;
1610        }
1611
1612        trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1613                                ar->ab->pktlog_defs_checksum);
1614}
1615
1616static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1617                                                  struct sk_buff *skb)
1618{
1619        u32 *data = (u32 *)skb->data;
1620        u8 pdev_id, ring_type, ring_id, pdev_idx;
1621        u16 hp, tp;
1622        u32 backpressure_time;
1623        struct ath11k_bp_stats *bp_stats;
1624
1625        pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1626        ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1627        ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1628        ++data;
1629
1630        hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1631        tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1632        ++data;
1633
1634        backpressure_time = *data;
1635
1636        ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1637                   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1638
1639        if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1640                if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1641                        return;
1642
1643                bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1644        } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1645                pdev_idx = DP_HW2SW_MACID(pdev_id);
1646
1647                if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1648                        return;
1649
1650                bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1651        } else {
1652                ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1653                            ring_type);
1654                return;
1655        }
1656
1657        spin_lock_bh(&ab->base_lock);
1658        bp_stats->hp = hp;
1659        bp_stats->tp = tp;
1660        bp_stats->count++;
1661        bp_stats->jiffies = jiffies;
1662        spin_unlock_bh(&ab->base_lock);
1663}
1664
1665void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1666                                       struct sk_buff *skb)
1667{
1668        struct ath11k_dp *dp = &ab->dp;
1669        struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1670        enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1671        u16 peer_id;
1672        u8 vdev_id;
1673        u8 mac_addr[ETH_ALEN];
1674        u16 peer_mac_h16;
1675        u16 ast_hash;
1676        u16 hw_peer_id;
1677
1678        ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1679
1680        switch (type) {
1681        case HTT_T2H_MSG_TYPE_VERSION_CONF:
1682                dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1683                                                  resp->version_msg.version);
1684                dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1685                                                  resp->version_msg.version);
1686                complete(&dp->htt_tgt_version_received);
1687                break;
1688        case HTT_T2H_MSG_TYPE_PEER_MAP:
1689                vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1690                                    resp->peer_map_ev.info);
1691                peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1692                                    resp->peer_map_ev.info);
1693                peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1694                                         resp->peer_map_ev.info1);
1695                ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1696                                       peer_mac_h16, mac_addr);
1697                ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1698                break;
1699        case HTT_T2H_MSG_TYPE_PEER_MAP2:
1700                vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1701                                    resp->peer_map_ev.info);
1702                peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1703                                    resp->peer_map_ev.info);
1704                peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1705                                         resp->peer_map_ev.info1);
1706                ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1707                                       peer_mac_h16, mac_addr);
1708                ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1709                                     resp->peer_map_ev.info2);
1710                hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1711                                       resp->peer_map_ev.info1);
1712                ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1713                                      hw_peer_id);
1714                break;
1715        case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1716        case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1717                peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1718                                    resp->peer_unmap_ev.info);
1719                ath11k_peer_unmap_event(ab, peer_id);
1720                break;
1721        case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1722                ath11k_htt_pull_ppdu_stats(ab, skb);
1723                break;
1724        case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1725                ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1726                break;
1727        case HTT_T2H_MSG_TYPE_PKTLOG:
1728                ath11k_htt_pktlog(ab, skb);
1729                break;
1730        case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1731                ath11k_htt_backpressure_event_handler(ab, skb);
1732                break;
1733        default:
1734                ath11k_warn(ab, "htt event %d not handled\n", type);
1735                break;
1736        }
1737
1738        dev_kfree_skb_any(skb);
1739}
1740
1741static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1742                                      struct sk_buff_head *msdu_list,
1743                                      struct sk_buff *first, struct sk_buff *last,
1744                                      u8 l3pad_bytes, int msdu_len)
1745{
1746        struct ath11k_base *ab = ar->ab;
1747        struct sk_buff *skb;
1748        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1749        int buf_first_hdr_len, buf_first_len;
1750        struct hal_rx_desc *ldesc;
1751        int space_extra, rem_len, buf_len;
1752        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1753
1754        /* As the msdu is spread across multiple rx buffers,
1755         * find the offset to the start of msdu for computing
1756         * the length of the msdu in the first buffer.
1757         */
1758        buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1759        buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1760
1761        if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1762                skb_put(first, buf_first_hdr_len + msdu_len);
1763                skb_pull(first, buf_first_hdr_len);
1764                return 0;
1765        }
1766
1767        ldesc = (struct hal_rx_desc *)last->data;
1768        rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1769        rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1770
1771        /* MSDU spans over multiple buffers because the length of the MSDU
1772         * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1773         * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1774         */
1775        skb_put(first, DP_RX_BUFFER_SIZE);
1776        skb_pull(first, buf_first_hdr_len);
1777
1778        /* When an MSDU spread over multiple buffers attention, MSDU_END and
1779         * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1780         */
1781        ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1782
1783        space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1784        if (space_extra > 0 &&
1785            (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1786                /* Free up all buffers of the MSDU */
1787                while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1788                        rxcb = ATH11K_SKB_RXCB(skb);
1789                        if (!rxcb->is_continuation) {
1790                                dev_kfree_skb_any(skb);
1791                                break;
1792                        }
1793                        dev_kfree_skb_any(skb);
1794                }
1795                return -ENOMEM;
1796        }
1797
1798        rem_len = msdu_len - buf_first_len;
1799        while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1800                rxcb = ATH11K_SKB_RXCB(skb);
1801                if (rxcb->is_continuation)
1802                        buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1803                else
1804                        buf_len = rem_len;
1805
1806                if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1807                        WARN_ON_ONCE(1);
1808                        dev_kfree_skb_any(skb);
1809                        return -EINVAL;
1810                }
1811
1812                skb_put(skb, buf_len + hal_rx_desc_sz);
1813                skb_pull(skb, hal_rx_desc_sz);
1814                skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1815                                          buf_len);
1816                dev_kfree_skb_any(skb);
1817
1818                rem_len -= buf_len;
1819                if (!rxcb->is_continuation)
1820                        break;
1821        }
1822
1823        return 0;
1824}
1825
1826static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1827                                                      struct sk_buff *first)
1828{
1829        struct sk_buff *skb;
1830        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1831
1832        if (!rxcb->is_continuation)
1833                return first;
1834
1835        skb_queue_walk(msdu_list, skb) {
1836                rxcb = ATH11K_SKB_RXCB(skb);
1837                if (!rxcb->is_continuation)
1838                        return skb;
1839        }
1840
1841        return NULL;
1842}
1843
1844static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1845{
1846        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1847        struct rx_attention *rx_attention;
1848        bool ip_csum_fail, l4_csum_fail;
1849
1850        rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1851        ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1852        l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1853
1854        msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1855                          CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1856}
1857
1858static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1859                                       enum hal_encrypt_type enctype)
1860{
1861        switch (enctype) {
1862        case HAL_ENCRYPT_TYPE_OPEN:
1863        case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1864        case HAL_ENCRYPT_TYPE_TKIP_MIC:
1865                return 0;
1866        case HAL_ENCRYPT_TYPE_CCMP_128:
1867                return IEEE80211_CCMP_MIC_LEN;
1868        case HAL_ENCRYPT_TYPE_CCMP_256:
1869                return IEEE80211_CCMP_256_MIC_LEN;
1870        case HAL_ENCRYPT_TYPE_GCMP_128:
1871        case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1872                return IEEE80211_GCMP_MIC_LEN;
1873        case HAL_ENCRYPT_TYPE_WEP_40:
1874        case HAL_ENCRYPT_TYPE_WEP_104:
1875        case HAL_ENCRYPT_TYPE_WEP_128:
1876        case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1877        case HAL_ENCRYPT_TYPE_WAPI:
1878                break;
1879        }
1880
1881        ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1882        return 0;
1883}
1884
1885static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1886                                         enum hal_encrypt_type enctype)
1887{
1888        switch (enctype) {
1889        case HAL_ENCRYPT_TYPE_OPEN:
1890                return 0;
1891        case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1892        case HAL_ENCRYPT_TYPE_TKIP_MIC:
1893                return IEEE80211_TKIP_IV_LEN;
1894        case HAL_ENCRYPT_TYPE_CCMP_128:
1895                return IEEE80211_CCMP_HDR_LEN;
1896        case HAL_ENCRYPT_TYPE_CCMP_256:
1897                return IEEE80211_CCMP_256_HDR_LEN;
1898        case HAL_ENCRYPT_TYPE_GCMP_128:
1899        case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1900                return IEEE80211_GCMP_HDR_LEN;
1901        case HAL_ENCRYPT_TYPE_WEP_40:
1902        case HAL_ENCRYPT_TYPE_WEP_104:
1903        case HAL_ENCRYPT_TYPE_WEP_128:
1904        case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1905        case HAL_ENCRYPT_TYPE_WAPI:
1906                break;
1907        }
1908
1909        ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1910        return 0;
1911}
1912
1913static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1914                                       enum hal_encrypt_type enctype)
1915{
1916        switch (enctype) {
1917        case HAL_ENCRYPT_TYPE_OPEN:
1918        case HAL_ENCRYPT_TYPE_CCMP_128:
1919        case HAL_ENCRYPT_TYPE_CCMP_256:
1920        case HAL_ENCRYPT_TYPE_GCMP_128:
1921        case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1922                return 0;
1923        case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1924        case HAL_ENCRYPT_TYPE_TKIP_MIC:
1925                return IEEE80211_TKIP_ICV_LEN;
1926        case HAL_ENCRYPT_TYPE_WEP_40:
1927        case HAL_ENCRYPT_TYPE_WEP_104:
1928        case HAL_ENCRYPT_TYPE_WEP_128:
1929        case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1930        case HAL_ENCRYPT_TYPE_WAPI:
1931                break;
1932        }
1933
1934        ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1935        return 0;
1936}
1937
1938static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1939                                         struct sk_buff *msdu,
1940                                         u8 *first_hdr,
1941                                         enum hal_encrypt_type enctype,
1942                                         struct ieee80211_rx_status *status)
1943{
1944        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1945        u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1946        struct ieee80211_hdr *hdr;
1947        size_t hdr_len;
1948        u8 da[ETH_ALEN];
1949        u8 sa[ETH_ALEN];
1950        u16 qos_ctl = 0;
1951        u8 *qos;
1952
1953        /* copy SA & DA and pull decapped header */
1954        hdr = (struct ieee80211_hdr *)msdu->data;
1955        hdr_len = ieee80211_hdrlen(hdr->frame_control);
1956        ether_addr_copy(da, ieee80211_get_DA(hdr));
1957        ether_addr_copy(sa, ieee80211_get_SA(hdr));
1958        skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1959
1960        if (rxcb->is_first_msdu) {
1961                /* original 802.11 header is valid for the first msdu
1962                 * hence we can reuse the same header
1963                 */
1964                hdr = (struct ieee80211_hdr *)first_hdr;
1965                hdr_len = ieee80211_hdrlen(hdr->frame_control);
1966
1967                /* Each A-MSDU subframe will be reported as a separate MSDU,
1968                 * so strip the A-MSDU bit from QoS Ctl.
1969                 */
1970                if (ieee80211_is_data_qos(hdr->frame_control)) {
1971                        qos = ieee80211_get_qos_ctl(hdr);
1972                        qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1973                }
1974        } else {
1975                /*  Rebuild qos header if this is a middle/last msdu */
1976                hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1977
1978                /* Reset the order bit as the HT_Control header is stripped */
1979                hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1980
1981                qos_ctl = rxcb->tid;
1982
1983                if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
1984                        qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1985
1986                /* TODO Add other QoS ctl fields when required */
1987
1988                /* copy decap header before overwriting for reuse below */
1989                memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
1990        }
1991
1992        if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1993                memcpy(skb_push(msdu,
1994                                ath11k_dp_rx_crypto_param_len(ar, enctype)),
1995                       (void *)hdr + hdr_len,
1996                       ath11k_dp_rx_crypto_param_len(ar, enctype));
1997        }
1998
1999        if (!rxcb->is_first_msdu) {
2000                memcpy(skb_push(msdu,
2001                                IEEE80211_QOS_CTL_LEN), &qos_ctl,
2002                                IEEE80211_QOS_CTL_LEN);
2003                memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2004                return;
2005        }
2006
2007        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2008
2009        /* original 802.11 header has a different DA and in
2010         * case of 4addr it may also have different SA
2011         */
2012        hdr = (struct ieee80211_hdr *)msdu->data;
2013        ether_addr_copy(ieee80211_get_DA(hdr), da);
2014        ether_addr_copy(ieee80211_get_SA(hdr), sa);
2015}
2016
2017static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2018                                       enum hal_encrypt_type enctype,
2019                                       struct ieee80211_rx_status *status,
2020                                       bool decrypted)
2021{
2022        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2023        struct ieee80211_hdr *hdr;
2024        size_t hdr_len;
2025        size_t crypto_len;
2026
2027        if (!rxcb->is_first_msdu ||
2028            !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2029                WARN_ON_ONCE(1);
2030                return;
2031        }
2032
2033        skb_trim(msdu, msdu->len - FCS_LEN);
2034
2035        if (!decrypted)
2036                return;
2037
2038        hdr = (void *)msdu->data;
2039
2040        /* Tail */
2041        if (status->flag & RX_FLAG_IV_STRIPPED) {
2042                skb_trim(msdu, msdu->len -
2043                         ath11k_dp_rx_crypto_mic_len(ar, enctype));
2044
2045                skb_trim(msdu, msdu->len -
2046                         ath11k_dp_rx_crypto_icv_len(ar, enctype));
2047        } else {
2048                /* MIC */
2049                if (status->flag & RX_FLAG_MIC_STRIPPED)
2050                        skb_trim(msdu, msdu->len -
2051                                 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2052
2053                /* ICV */
2054                if (status->flag & RX_FLAG_ICV_STRIPPED)
2055                        skb_trim(msdu, msdu->len -
2056                                 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2057        }
2058
2059        /* MMIC */
2060        if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2061            !ieee80211_has_morefrags(hdr->frame_control) &&
2062            enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2063                skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2064
2065        /* Head */
2066        if (status->flag & RX_FLAG_IV_STRIPPED) {
2067                hdr_len = ieee80211_hdrlen(hdr->frame_control);
2068                crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2069
2070                memmove((void *)msdu->data + crypto_len,
2071                        (void *)msdu->data, hdr_len);
2072                skb_pull(msdu, crypto_len);
2073        }
2074}
2075
2076static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2077                                         struct sk_buff *msdu,
2078                                         enum hal_encrypt_type enctype)
2079{
2080        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2081        struct ieee80211_hdr *hdr;
2082        size_t hdr_len, crypto_len;
2083        void *rfc1042;
2084        bool is_amsdu;
2085
2086        is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2087        hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2088        rfc1042 = hdr;
2089
2090        if (rxcb->is_first_msdu) {
2091                hdr_len = ieee80211_hdrlen(hdr->frame_control);
2092                crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2093
2094                rfc1042 += hdr_len + crypto_len;
2095        }
2096
2097        if (is_amsdu)
2098                rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2099
2100        return rfc1042;
2101}
2102
2103static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2104                                       struct sk_buff *msdu,
2105                                       u8 *first_hdr,
2106                                       enum hal_encrypt_type enctype,
2107                                       struct ieee80211_rx_status *status)
2108{
2109        struct ieee80211_hdr *hdr;
2110        struct ethhdr *eth;
2111        size_t hdr_len;
2112        u8 da[ETH_ALEN];
2113        u8 sa[ETH_ALEN];
2114        void *rfc1042;
2115
2116        rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2117        if (WARN_ON_ONCE(!rfc1042))
2118                return;
2119
2120        /* pull decapped header and copy SA & DA */
2121        eth = (struct ethhdr *)msdu->data;
2122        ether_addr_copy(da, eth->h_dest);
2123        ether_addr_copy(sa, eth->h_source);
2124        skb_pull(msdu, sizeof(struct ethhdr));
2125
2126        /* push rfc1042/llc/snap */
2127        memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2128               sizeof(struct ath11k_dp_rfc1042_hdr));
2129
2130        /* push original 802.11 header */
2131        hdr = (struct ieee80211_hdr *)first_hdr;
2132        hdr_len = ieee80211_hdrlen(hdr->frame_control);
2133
2134        if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2135                memcpy(skb_push(msdu,
2136                                ath11k_dp_rx_crypto_param_len(ar, enctype)),
2137                       (void *)hdr + hdr_len,
2138                       ath11k_dp_rx_crypto_param_len(ar, enctype));
2139        }
2140
2141        memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2142
2143        /* original 802.11 header has a different DA and in
2144         * case of 4addr it may also have different SA
2145         */
2146        hdr = (struct ieee80211_hdr *)msdu->data;
2147        ether_addr_copy(ieee80211_get_DA(hdr), da);
2148        ether_addr_copy(ieee80211_get_SA(hdr), sa);
2149}
2150
2151static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2152                                   struct hal_rx_desc *rx_desc,
2153                                   enum hal_encrypt_type enctype,
2154                                   struct ieee80211_rx_status *status,
2155                                   bool decrypted)
2156{
2157        u8 *first_hdr;
2158        u8 decap;
2159
2160        first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2161        decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2162
2163        switch (decap) {
2164        case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2165                ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2166                                             enctype, status);
2167                break;
2168        case DP_RX_DECAP_TYPE_RAW:
2169                ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2170                                           decrypted);
2171                break;
2172        case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2173                /* TODO undecap support for middle/last msdu's of amsdu */
2174                ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2175                                           enctype, status);
2176                break;
2177        case DP_RX_DECAP_TYPE_8023:
2178                /* TODO: Handle undecap for these formats */
2179                break;
2180        }
2181}
2182
2183static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2184                                struct sk_buff *msdu,
2185                                struct hal_rx_desc *rx_desc,
2186                                struct ieee80211_rx_status *rx_status)
2187{
2188        bool  fill_crypto_hdr, mcast;
2189        enum hal_encrypt_type enctype;
2190        bool is_decrypted = false;
2191        struct ieee80211_hdr *hdr;
2192        struct ath11k_peer *peer;
2193        struct rx_attention *rx_attention;
2194        u32 err_bitmap;
2195
2196        hdr = (struct ieee80211_hdr *)msdu->data;
2197
2198        /* PN for multicast packets will be checked in mac80211 */
2199
2200        mcast = is_multicast_ether_addr(hdr->addr1);
2201        fill_crypto_hdr = mcast;
2202
2203        spin_lock_bh(&ar->ab->base_lock);
2204        peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
2205        if (peer) {
2206                if (mcast)
2207                        enctype = peer->sec_type_grp;
2208                else
2209                        enctype = peer->sec_type;
2210        } else {
2211                enctype = HAL_ENCRYPT_TYPE_OPEN;
2212        }
2213        spin_unlock_bh(&ar->ab->base_lock);
2214
2215        rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2216        err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2217        if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2218                is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2219
2220        /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2221        rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2222                             RX_FLAG_MMIC_ERROR |
2223                             RX_FLAG_DECRYPTED |
2224                             RX_FLAG_IV_STRIPPED |
2225                             RX_FLAG_MMIC_STRIPPED);
2226
2227        if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2228                rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2229        if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2230                rx_status->flag |= RX_FLAG_MMIC_ERROR;
2231
2232        if (is_decrypted) {
2233                rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2234
2235                if (fill_crypto_hdr)
2236                        rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2237                                        RX_FLAG_ICV_STRIPPED;
2238                else
2239                        rx_status->flag |= RX_FLAG_IV_STRIPPED |
2240                                           RX_FLAG_PN_VALIDATED;
2241        }
2242
2243        ath11k_dp_rx_h_csum_offload(ar, msdu);
2244        ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2245                               enctype, rx_status, is_decrypted);
2246
2247        if (!is_decrypted || fill_crypto_hdr)
2248                return;
2249
2250        hdr = (void *)msdu->data;
2251        hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2252}
2253
2254static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2255                                struct ieee80211_rx_status *rx_status)
2256{
2257        struct ieee80211_supported_band *sband;
2258        enum rx_msdu_start_pkt_type pkt_type;
2259        u8 bw;
2260        u8 rate_mcs, nss;
2261        u8 sgi;
2262        bool is_cck;
2263
2264        pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2265        bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2266        rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2267        nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2268        sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2269
2270        switch (pkt_type) {
2271        case RX_MSDU_START_PKT_TYPE_11A:
2272        case RX_MSDU_START_PKT_TYPE_11B:
2273                is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2274                sband = &ar->mac.sbands[rx_status->band];
2275                rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2276                                                                is_cck);
2277                break;
2278        case RX_MSDU_START_PKT_TYPE_11N:
2279                rx_status->encoding = RX_ENC_HT;
2280                if (rate_mcs > ATH11K_HT_MCS_MAX) {
2281                        ath11k_warn(ar->ab,
2282                                    "Received with invalid mcs in HT mode %d\n",
2283                                     rate_mcs);
2284                        break;
2285                }
2286                rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2287                if (sgi)
2288                        rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2289                rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2290                break;
2291        case RX_MSDU_START_PKT_TYPE_11AC:
2292                rx_status->encoding = RX_ENC_VHT;
2293                rx_status->rate_idx = rate_mcs;
2294                if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2295                        ath11k_warn(ar->ab,
2296                                    "Received with invalid mcs in VHT mode %d\n",
2297                                     rate_mcs);
2298                        break;
2299                }
2300                rx_status->nss = nss;
2301                if (sgi)
2302                        rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2303                rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2304                break;
2305        case RX_MSDU_START_PKT_TYPE_11AX:
2306                rx_status->rate_idx = rate_mcs;
2307                if (rate_mcs > ATH11K_HE_MCS_MAX) {
2308                        ath11k_warn(ar->ab,
2309                                    "Received with invalid mcs in HE mode %d\n",
2310                                    rate_mcs);
2311                        break;
2312                }
2313                rx_status->encoding = RX_ENC_HE;
2314                rx_status->nss = nss;
2315                rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
2316                rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2317                break;
2318        }
2319}
2320
2321static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2322                                struct ieee80211_rx_status *rx_status)
2323{
2324        u8 channel_num;
2325        u32 center_freq, meta_data;
2326        struct ieee80211_channel *channel;
2327
2328        rx_status->freq = 0;
2329        rx_status->rate_idx = 0;
2330        rx_status->nss = 0;
2331        rx_status->encoding = RX_ENC_LEGACY;
2332        rx_status->bw = RATE_INFO_BW_20;
2333
2334        rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2335
2336        meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2337        channel_num = meta_data;
2338        center_freq = meta_data >> 16;
2339
2340        if (center_freq >= 5935 && center_freq <= 7105) {
2341                rx_status->band = NL80211_BAND_6GHZ;
2342        } else if (channel_num >= 1 && channel_num <= 14) {
2343                rx_status->band = NL80211_BAND_2GHZ;
2344        } else if (channel_num >= 36 && channel_num <= 173) {
2345                rx_status->band = NL80211_BAND_5GHZ;
2346        } else {
2347                spin_lock_bh(&ar->data_lock);
2348                channel = ar->rx_channel;
2349                if (channel) {
2350                        rx_status->band = channel->band;
2351                        channel_num =
2352                                ieee80211_frequency_to_channel(channel->center_freq);
2353                }
2354                spin_unlock_bh(&ar->data_lock);
2355                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2356                                rx_desc, sizeof(struct hal_rx_desc));
2357        }
2358
2359        rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2360                                                         rx_status->band);
2361
2362        ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2363}
2364
2365static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
2366                                  size_t size)
2367{
2368        u8 *qc;
2369        int tid;
2370
2371        if (!ieee80211_is_data_qos(hdr->frame_control))
2372                return "";
2373
2374        qc = ieee80211_get_qos_ctl(hdr);
2375        tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
2376        snprintf(out, size, "tid %d", tid);
2377
2378        return out;
2379}
2380
2381static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2382                                      struct sk_buff *msdu)
2383{
2384        static const struct ieee80211_radiotap_he known = {
2385                .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2386                                     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2387                .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2388        };
2389        struct ieee80211_rx_status *status;
2390        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
2391        struct ieee80211_radiotap_he *he = NULL;
2392        char tid[32];
2393
2394        status = IEEE80211_SKB_RXCB(msdu);
2395        if (status->encoding == RX_ENC_HE) {
2396                he = skb_push(msdu, sizeof(known));
2397                memcpy(he, &known, sizeof(known));
2398                status->flag |= RX_FLAG_RADIOTAP_HE;
2399        }
2400
2401        ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2402                   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2403                   msdu,
2404                   msdu->len,
2405                   ieee80211_get_SA(hdr),
2406                   ath11k_print_get_tid(hdr, tid, sizeof(tid)),
2407                   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
2408                                                        "mcast" : "ucast",
2409                   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
2410                   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2411                   (status->encoding == RX_ENC_HT) ? "ht" : "",
2412                   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2413                   (status->encoding == RX_ENC_HE) ? "he" : "",
2414                   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2415                   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2416                   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2417                   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2418                   status->rate_idx,
2419                   status->nss,
2420                   status->freq,
2421                   status->band, status->flag,
2422                   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2423                   !!(status->flag & RX_FLAG_MMIC_ERROR),
2424                   !!(status->flag & RX_FLAG_AMSDU_MORE));
2425
2426        ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2427                        msdu->data, msdu->len);
2428
2429        /* TODO: trace rx packet */
2430
2431        ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
2432}
2433
2434static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2435                                     struct sk_buff *msdu,
2436                                     struct sk_buff_head *msdu_list)
2437{
2438        struct ath11k_base *ab = ar->ab;
2439        struct hal_rx_desc *rx_desc, *lrx_desc;
2440        struct rx_attention *rx_attention;
2441        struct ieee80211_rx_status rx_status = {0};
2442        struct ieee80211_rx_status *status;
2443        struct ath11k_skb_rxcb *rxcb;
2444        struct ieee80211_hdr *hdr;
2445        struct sk_buff *last_buf;
2446        u8 l3_pad_bytes;
2447        u8 *hdr_status;
2448        u16 msdu_len;
2449        int ret;
2450        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2451
2452        last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2453        if (!last_buf) {
2454                ath11k_warn(ab,
2455                            "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2456                ret = -EIO;
2457                goto free_out;
2458        }
2459
2460        rx_desc = (struct hal_rx_desc *)msdu->data;
2461        lrx_desc = (struct hal_rx_desc *)last_buf->data;
2462        rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2463        if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2464                ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2465                ret = -EIO;
2466                goto free_out;
2467        }
2468
2469        rxcb = ATH11K_SKB_RXCB(msdu);
2470        rxcb->rx_desc = rx_desc;
2471        msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2472        l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2473
2474        if (rxcb->is_frag) {
2475                skb_pull(msdu, hal_rx_desc_sz);
2476        } else if (!rxcb->is_continuation) {
2477                if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2478                        hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2479                        ret = -EINVAL;
2480                        ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2481                        ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2482                                        sizeof(struct ieee80211_hdr));
2483                        ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2484                                        sizeof(struct hal_rx_desc));
2485                        goto free_out;
2486                }
2487                skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2488                skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2489        } else {
2490                ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2491                                                 msdu, last_buf,
2492                                                 l3_pad_bytes, msdu_len);
2493                if (ret) {
2494                        ath11k_warn(ab,
2495                                    "failed to coalesce msdu rx buffer%d\n", ret);
2496                        goto free_out;
2497                }
2498        }
2499
2500        hdr = (struct ieee80211_hdr *)msdu->data;
2501
2502        /* Process only data frames */
2503        if (!ieee80211_is_data(hdr->frame_control))
2504                return -EINVAL;
2505
2506        ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
2507        ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
2508
2509        rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2510
2511        status = IEEE80211_SKB_RXCB(msdu);
2512        *status = rx_status;
2513        return 0;
2514
2515free_out:
2516        return ret;
2517}
2518
2519static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2520                                                  struct napi_struct *napi,
2521                                                  struct sk_buff_head *msdu_list,
2522                                                  int *quota, int ring_id)
2523{
2524        struct ath11k_skb_rxcb *rxcb;
2525        struct sk_buff *msdu;
2526        struct ath11k *ar;
2527        u8 mac_id;
2528        int ret;
2529
2530        if (skb_queue_empty(msdu_list))
2531                return;
2532
2533        rcu_read_lock();
2534
2535        while (*quota && (msdu = __skb_dequeue(msdu_list))) {
2536                rxcb = ATH11K_SKB_RXCB(msdu);
2537                mac_id = rxcb->mac_id;
2538                ar = ab->pdevs[mac_id].ar;
2539                if (!rcu_dereference(ab->pdevs_active[mac_id])) {
2540                        dev_kfree_skb_any(msdu);
2541                        continue;
2542                }
2543
2544                if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
2545                        dev_kfree_skb_any(msdu);
2546                        continue;
2547                }
2548
2549                ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
2550                if (ret) {
2551                        ath11k_dbg(ab, ATH11K_DBG_DATA,
2552                                   "Unable to process msdu %d", ret);
2553                        dev_kfree_skb_any(msdu);
2554                        continue;
2555                }
2556
2557                ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
2558                (*quota)--;
2559        }
2560
2561        rcu_read_unlock();
2562}
2563
2564int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2565                         struct napi_struct *napi, int budget)
2566{
2567        struct ath11k_dp *dp = &ab->dp;
2568        struct dp_rxdma_ring *rx_ring;
2569        int num_buffs_reaped[MAX_RADIOS] = {0};
2570        struct sk_buff_head msdu_list;
2571        struct ath11k_skb_rxcb *rxcb;
2572        int total_msdu_reaped = 0;
2573        struct hal_srng *srng;
2574        struct sk_buff *msdu;
2575        int quota = budget;
2576        bool done = false;
2577        int buf_id, mac_id;
2578        struct ath11k *ar;
2579        u32 *rx_desc;
2580        int i;
2581
2582        __skb_queue_head_init(&msdu_list);
2583
2584        srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2585
2586        spin_lock_bh(&srng->lock);
2587
2588        ath11k_hal_srng_access_begin(ab, srng);
2589
2590try_again:
2591        while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
2592                struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
2593                enum hal_reo_dest_ring_push_reason push_reason;
2594                u32 cookie;
2595
2596                cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2597                                   desc.buf_addr_info.info1);
2598                buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2599                                   cookie);
2600                mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2601
2602                ar = ab->pdevs[mac_id].ar;
2603                rx_ring = &ar->dp.rx_refill_buf_ring;
2604                spin_lock_bh(&rx_ring->idr_lock);
2605                msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2606                if (!msdu) {
2607                        ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2608                                    buf_id);
2609                        spin_unlock_bh(&rx_ring->idr_lock);
2610                        continue;
2611                }
2612
2613                idr_remove(&rx_ring->bufs_idr, buf_id);
2614                spin_unlock_bh(&rx_ring->idr_lock);
2615
2616                rxcb = ATH11K_SKB_RXCB(msdu);
2617                dma_unmap_single(ab->dev, rxcb->paddr,
2618                                 msdu->len + skb_tailroom(msdu),
2619                                 DMA_FROM_DEVICE);
2620
2621                num_buffs_reaped[mac_id]++;
2622                total_msdu_reaped++;
2623
2624                push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2625                                        desc.info0);
2626                if (push_reason !=
2627                    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2628                        dev_kfree_skb_any(msdu);
2629                        ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2630                        continue;
2631                }
2632
2633                rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
2634                                         RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2635                rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
2636                                        RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2637                rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
2638                                           RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2639                rxcb->mac_id = mac_id;
2640                rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2641                                      desc.info0);
2642
2643                __skb_queue_tail(&msdu_list, msdu);
2644
2645                if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
2646                        done = true;
2647                        break;
2648                }
2649        }
2650
2651        /* Hw might have updated the head pointer after we cached it.
2652         * In this case, even though there are entries in the ring we'll
2653         * get rx_desc NULL. Give the read another try with updated cached
2654         * head pointer so that we can reap complete MPDU in the current
2655         * rx processing.
2656         */
2657        if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
2658                ath11k_hal_srng_access_end(ab, srng);
2659                goto try_again;
2660        }
2661
2662        ath11k_hal_srng_access_end(ab, srng);
2663
2664        spin_unlock_bh(&srng->lock);
2665
2666        if (!total_msdu_reaped)
2667                goto exit;
2668
2669        for (i = 0; i < ab->num_radios; i++) {
2670                if (!num_buffs_reaped[i])
2671                        continue;
2672
2673                ar = ab->pdevs[i].ar;
2674                rx_ring = &ar->dp.rx_refill_buf_ring;
2675
2676                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2677                                           HAL_RX_BUF_RBM_SW3_BM);
2678        }
2679
2680        ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2681                                              &quota, ring_id);
2682
2683exit:
2684        return budget - quota;
2685}
2686
2687static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2688                                           struct hal_rx_mon_ppdu_info *ppdu_info)
2689{
2690        struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2691        u32 num_msdu;
2692
2693        if (!rx_stats)
2694                return;
2695
2696        num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2697                   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2698
2699        rx_stats->num_msdu += num_msdu;
2700        rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2701                                    ppdu_info->tcp_ack_msdu_count;
2702        rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2703        rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2704
2705        if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2706            ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2707                ppdu_info->nss = 1;
2708                ppdu_info->mcs = HAL_RX_MAX_MCS;
2709                ppdu_info->tid = IEEE80211_NUM_TIDS;
2710        }
2711
2712        if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2713                rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2714
2715        if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2716                rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2717
2718        if (ppdu_info->gi < HAL_RX_GI_MAX)
2719                rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2720
2721        if (ppdu_info->bw < HAL_RX_BW_MAX)
2722                rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2723
2724        if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2725                rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2726
2727        if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2728                rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2729
2730        if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2731                rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2732
2733        if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2734                rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2735
2736        if (ppdu_info->is_stbc)
2737                rx_stats->stbc_count += num_msdu;
2738
2739        if (ppdu_info->beamformed)
2740                rx_stats->beamformed_count += num_msdu;
2741
2742        if (ppdu_info->num_mpdu_fcs_ok > 1)
2743                rx_stats->ampdu_msdu_count += num_msdu;
2744        else
2745                rx_stats->non_ampdu_msdu_count += num_msdu;
2746
2747        rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2748        rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2749        rx_stats->dcm_count += ppdu_info->dcm;
2750        rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2751
2752        arsta->rssi_comb = ppdu_info->rssi_comb;
2753        rx_stats->rx_duration += ppdu_info->rx_duration;
2754        arsta->rx_duration = rx_stats->rx_duration;
2755}
2756
2757static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2758                                                         struct dp_rxdma_ring *rx_ring,
2759                                                         int *buf_id)
2760{
2761        struct sk_buff *skb;
2762        dma_addr_t paddr;
2763
2764        skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2765                            DP_RX_BUFFER_ALIGN_SIZE);
2766
2767        if (!skb)
2768                goto fail_alloc_skb;
2769
2770        if (!IS_ALIGNED((unsigned long)skb->data,
2771                        DP_RX_BUFFER_ALIGN_SIZE)) {
2772                skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2773                         skb->data);
2774        }
2775
2776        paddr = dma_map_single(ab->dev, skb->data,
2777                               skb->len + skb_tailroom(skb),
2778                               DMA_FROM_DEVICE);
2779        if (unlikely(dma_mapping_error(ab->dev, paddr)))
2780                goto fail_free_skb;
2781
2782        spin_lock_bh(&rx_ring->idr_lock);
2783        *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2784                            rx_ring->bufs_max, GFP_ATOMIC);
2785        spin_unlock_bh(&rx_ring->idr_lock);
2786        if (*buf_id < 0)
2787                goto fail_dma_unmap;
2788
2789        ATH11K_SKB_RXCB(skb)->paddr = paddr;
2790        return skb;
2791
2792fail_dma_unmap:
2793        dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2794                         DMA_FROM_DEVICE);
2795fail_free_skb:
2796        dev_kfree_skb_any(skb);
2797fail_alloc_skb:
2798        return NULL;
2799}
2800
2801int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2802                                           struct dp_rxdma_ring *rx_ring,
2803                                           int req_entries,
2804                                           enum hal_rx_buf_return_buf_manager mgr)
2805{
2806        struct hal_srng *srng;
2807        u32 *desc;
2808        struct sk_buff *skb;
2809        int num_free;
2810        int num_remain;
2811        int buf_id;
2812        u32 cookie;
2813        dma_addr_t paddr;
2814
2815        req_entries = min(req_entries, rx_ring->bufs_max);
2816
2817        srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2818
2819        spin_lock_bh(&srng->lock);
2820
2821        ath11k_hal_srng_access_begin(ab, srng);
2822
2823        num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2824
2825        req_entries = min(num_free, req_entries);
2826        num_remain = req_entries;
2827
2828        while (num_remain > 0) {
2829                skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2830                                                        &buf_id);
2831                if (!skb)
2832                        break;
2833                paddr = ATH11K_SKB_RXCB(skb)->paddr;
2834
2835                desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2836                if (!desc)
2837                        goto fail_desc_get;
2838
2839                cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2840                         FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2841
2842                num_remain--;
2843
2844                ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2845        }
2846
2847        ath11k_hal_srng_access_end(ab, srng);
2848
2849        spin_unlock_bh(&srng->lock);
2850
2851        return req_entries - num_remain;
2852
2853fail_desc_get:
2854        spin_lock_bh(&rx_ring->idr_lock);
2855        idr_remove(&rx_ring->bufs_idr, buf_id);
2856        spin_unlock_bh(&rx_ring->idr_lock);
2857        dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2858                         DMA_FROM_DEVICE);
2859        dev_kfree_skb_any(skb);
2860        ath11k_hal_srng_access_end(ab, srng);
2861        spin_unlock_bh(&srng->lock);
2862
2863        return req_entries - num_remain;
2864}
2865
2866static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2867                                             int *budget, struct sk_buff_head *skb_list)
2868{
2869        struct ath11k *ar;
2870        struct ath11k_pdev_dp *dp;
2871        struct dp_rxdma_ring *rx_ring;
2872        struct hal_srng *srng;
2873        void *rx_mon_status_desc;
2874        struct sk_buff *skb;
2875        struct ath11k_skb_rxcb *rxcb;
2876        struct hal_tlv_hdr *tlv;
2877        u32 cookie;
2878        int buf_id, srng_id;
2879        dma_addr_t paddr;
2880        u8 rbm;
2881        int num_buffs_reaped = 0;
2882
2883        ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
2884        dp = &ar->dp;
2885        srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
2886        rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
2887
2888        srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2889
2890        spin_lock_bh(&srng->lock);
2891
2892        ath11k_hal_srng_access_begin(ab, srng);
2893        while (*budget) {
2894                *budget -= 1;
2895                rx_mon_status_desc =
2896                        ath11k_hal_srng_src_peek(ab, srng);
2897                if (!rx_mon_status_desc)
2898                        break;
2899
2900                ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
2901                                                &cookie, &rbm);
2902                if (paddr) {
2903                        buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
2904
2905                        spin_lock_bh(&rx_ring->idr_lock);
2906                        skb = idr_find(&rx_ring->bufs_idr, buf_id);
2907                        if (!skb) {
2908                                ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
2909                                            buf_id);
2910                                spin_unlock_bh(&rx_ring->idr_lock);
2911                                goto move_next;
2912                        }
2913
2914                        idr_remove(&rx_ring->bufs_idr, buf_id);
2915                        spin_unlock_bh(&rx_ring->idr_lock);
2916
2917                        rxcb = ATH11K_SKB_RXCB(skb);
2918
2919                        dma_unmap_single(ab->dev, rxcb->paddr,
2920                                         skb->len + skb_tailroom(skb),
2921                                         DMA_FROM_DEVICE);
2922
2923                        tlv = (struct hal_tlv_hdr *)skb->data;
2924                        if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
2925                                        HAL_RX_STATUS_BUFFER_DONE) {
2926                                ath11k_warn(ab, "mon status DONE not set %lx\n",
2927                                            FIELD_GET(HAL_TLV_HDR_TAG,
2928                                                      tlv->tl));
2929                                dev_kfree_skb_any(skb);
2930                                goto move_next;
2931                        }
2932
2933                        __skb_queue_tail(skb_list, skb);
2934                }
2935move_next:
2936                skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2937                                                        &buf_id);
2938
2939                if (!skb) {
2940                        ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
2941                                                        HAL_RX_BUF_RBM_SW3_BM);
2942                        num_buffs_reaped++;
2943                        break;
2944                }
2945                rxcb = ATH11K_SKB_RXCB(skb);
2946
2947                cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2948                         FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2949
2950                ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
2951                                                cookie, HAL_RX_BUF_RBM_SW3_BM);
2952                ath11k_hal_srng_src_get_next_entry(ab, srng);
2953                num_buffs_reaped++;
2954        }
2955        ath11k_hal_srng_access_end(ab, srng);
2956        spin_unlock_bh(&srng->lock);
2957
2958        return num_buffs_reaped;
2959}
2960
2961int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
2962                                    struct napi_struct *napi, int budget)
2963{
2964        struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
2965        enum hal_rx_mon_status hal_status;
2966        struct sk_buff *skb;
2967        struct sk_buff_head skb_list;
2968        struct hal_rx_mon_ppdu_info ppdu_info;
2969        struct ath11k_peer *peer;
2970        struct ath11k_sta *arsta;
2971        int num_buffs_reaped = 0;
2972
2973        __skb_queue_head_init(&skb_list);
2974
2975        num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
2976                                                             &skb_list);
2977        if (!num_buffs_reaped)
2978                goto exit;
2979
2980        while ((skb = __skb_dequeue(&skb_list))) {
2981                memset(&ppdu_info, 0, sizeof(ppdu_info));
2982                ppdu_info.peer_id = HAL_INVALID_PEERID;
2983
2984                if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
2985                        trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
2986
2987                hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
2988
2989                if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
2990                    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
2991                        dev_kfree_skb_any(skb);
2992                        continue;
2993                }
2994
2995                rcu_read_lock();
2996                spin_lock_bh(&ab->base_lock);
2997                peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
2998
2999                if (!peer || !peer->sta) {
3000                        ath11k_dbg(ab, ATH11K_DBG_DATA,
3001                                   "failed to find the peer with peer_id %d\n",
3002                                   ppdu_info.peer_id);
3003                        spin_unlock_bh(&ab->base_lock);
3004                        rcu_read_unlock();
3005                        dev_kfree_skb_any(skb);
3006                        continue;
3007                }
3008
3009                arsta = (struct ath11k_sta *)peer->sta->drv_priv;
3010                ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
3011
3012                if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
3013                        trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
3014
3015                spin_unlock_bh(&ab->base_lock);
3016                rcu_read_unlock();
3017
3018                dev_kfree_skb_any(skb);
3019        }
3020exit:
3021        return num_buffs_reaped;
3022}
3023
3024static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3025{
3026        struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3027
3028        spin_lock_bh(&rx_tid->ab->base_lock);
3029        if (rx_tid->last_frag_no &&
3030            rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3031                spin_unlock_bh(&rx_tid->ab->base_lock);
3032                return;
3033        }
3034        ath11k_dp_rx_frags_cleanup(rx_tid, true);
3035        spin_unlock_bh(&rx_tid->ab->base_lock);
3036}
3037
3038int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3039{
3040        struct ath11k_base *ab = ar->ab;
3041        struct crypto_shash *tfm;
3042        struct ath11k_peer *peer;
3043        struct dp_rx_tid *rx_tid;
3044        int i;
3045
3046        tfm = crypto_alloc_shash("michael_mic", 0, 0);
3047        if (IS_ERR(tfm))
3048                return PTR_ERR(tfm);
3049
3050        spin_lock_bh(&ab->base_lock);
3051
3052        peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3053        if (!peer) {
3054                ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3055                spin_unlock_bh(&ab->base_lock);
3056                return -ENOENT;
3057        }
3058
3059        for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3060                rx_tid = &peer->rx_tid[i];
3061                rx_tid->ab = ab;
3062                timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3063                skb_queue_head_init(&rx_tid->rx_frags);
3064        }
3065
3066        peer->tfm_mmic = tfm;
3067        spin_unlock_bh(&ab->base_lock);
3068
3069        return 0;
3070}
3071
3072static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3073                                      struct ieee80211_hdr *hdr, u8 *data,
3074                                      size_t data_len, u8 *mic)
3075{
3076        SHASH_DESC_ON_STACK(desc, tfm);
3077        u8 mic_hdr[16] = {0};
3078        u8 tid = 0;
3079        int ret;
3080
3081        if (!tfm)
3082                return -EINVAL;
3083
3084        desc->tfm = tfm;
3085
3086        ret = crypto_shash_setkey(tfm, key, 8);
3087        if (ret)
3088                goto out;
3089
3090        ret = crypto_shash_init(desc);
3091        if (ret)
3092                goto out;
3093
3094        /* TKIP MIC header */
3095        memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3096        memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3097        if (ieee80211_is_data_qos(hdr->frame_control))
3098                tid = ieee80211_get_tid(hdr);
3099        mic_hdr[12] = tid;
3100
3101        ret = crypto_shash_update(desc, mic_hdr, 16);
3102        if (ret)
3103                goto out;
3104        ret = crypto_shash_update(desc, data, data_len);
3105        if (ret)
3106                goto out;
3107        ret = crypto_shash_final(desc, mic);
3108out:
3109        shash_desc_zero(desc);
3110        return ret;
3111}
3112
3113static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3114                                          struct sk_buff *msdu)
3115{
3116        struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3117        struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3118        struct ieee80211_key_conf *key_conf;
3119        struct ieee80211_hdr *hdr;
3120        u8 mic[IEEE80211_CCMP_MIC_LEN];
3121        int head_len, tail_len, ret;
3122        size_t data_len;
3123        u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3124        u8 *key, *data;
3125        u8 key_idx;
3126
3127        if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3128            HAL_ENCRYPT_TYPE_TKIP_MIC)
3129                return 0;
3130
3131        hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3132        hdr_len = ieee80211_hdrlen(hdr->frame_control);
3133        head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3134        tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3135
3136        if (!is_multicast_ether_addr(hdr->addr1))
3137                key_idx = peer->ucast_keyidx;
3138        else
3139                key_idx = peer->mcast_keyidx;
3140
3141        key_conf = peer->keys[key_idx];
3142
3143        data = msdu->data + head_len;
3144        data_len = msdu->len - head_len - tail_len;
3145        key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3146
3147        ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3148        if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3149                goto mic_fail;
3150
3151        return 0;
3152
3153mic_fail:
3154        (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3155        (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3156
3157        rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3158                    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3159        skb_pull(msdu, hal_rx_desc_sz);
3160
3161        ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3162        ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3163                               HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3164        ieee80211_rx(ar->hw, msdu);
3165        return -EINVAL;
3166}
3167
3168static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3169                                        enum hal_encrypt_type enctype, u32 flags)
3170{
3171        struct ieee80211_hdr *hdr;
3172        size_t hdr_len;
3173        size_t crypto_len;
3174        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3175
3176        if (!flags)
3177                return;
3178
3179        hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3180
3181        if (flags & RX_FLAG_MIC_STRIPPED)
3182                skb_trim(msdu, msdu->len -
3183                         ath11k_dp_rx_crypto_mic_len(ar, enctype));
3184
3185        if (flags & RX_FLAG_ICV_STRIPPED)
3186                skb_trim(msdu, msdu->len -
3187                         ath11k_dp_rx_crypto_icv_len(ar, enctype));
3188
3189        if (flags & RX_FLAG_IV_STRIPPED) {
3190                hdr_len = ieee80211_hdrlen(hdr->frame_control);
3191                crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3192
3193                memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3194                        (void *)msdu->data + hal_rx_desc_sz, hdr_len);
3195                skb_pull(msdu, crypto_len);
3196        }
3197}
3198
3199static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3200                                 struct ath11k_peer *peer,
3201                                 struct dp_rx_tid *rx_tid,
3202                                 struct sk_buff **defrag_skb)
3203{
3204        struct hal_rx_desc *rx_desc;
3205        struct sk_buff *skb, *first_frag, *last_frag;
3206        struct ieee80211_hdr *hdr;
3207        struct rx_attention *rx_attention;
3208        enum hal_encrypt_type enctype;
3209        bool is_decrypted = false;
3210        int msdu_len = 0;
3211        int extra_space;
3212        u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3213
3214        first_frag = skb_peek(&rx_tid->rx_frags);
3215        last_frag = skb_peek_tail(&rx_tid->rx_frags);
3216
3217        skb_queue_walk(&rx_tid->rx_frags, skb) {
3218                flags = 0;
3219                rx_desc = (struct hal_rx_desc *)skb->data;
3220                hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3221
3222                enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3223                if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3224                        rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3225                        is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3226                }
3227
3228                if (is_decrypted) {
3229                        if (skb != first_frag)
3230                                flags |=  RX_FLAG_IV_STRIPPED;
3231                        if (skb != last_frag)
3232                                flags |= RX_FLAG_ICV_STRIPPED |
3233                                         RX_FLAG_MIC_STRIPPED;
3234                }
3235
3236                /* RX fragments are always raw packets */
3237                if (skb != last_frag)
3238                        skb_trim(skb, skb->len - FCS_LEN);
3239                ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3240
3241                if (skb != first_frag)
3242                        skb_pull(skb, hal_rx_desc_sz +
3243                                      ieee80211_hdrlen(hdr->frame_control));
3244                msdu_len += skb->len;
3245        }
3246
3247        extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3248        if (extra_space > 0 &&
3249            (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3250                return -ENOMEM;
3251
3252        __skb_unlink(first_frag, &rx_tid->rx_frags);
3253        while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3254                skb_put_data(first_frag, skb->data, skb->len);
3255                dev_kfree_skb_any(skb);
3256        }
3257
3258        hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3259        hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3260        ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3261
3262        if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3263                first_frag = NULL;
3264
3265        *defrag_skb = first_frag;
3266        return 0;
3267}
3268
3269static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3270                                              struct sk_buff *defrag_skb)
3271{
3272        struct ath11k_base *ab = ar->ab;
3273        struct ath11k_pdev_dp *dp = &ar->dp;
3274        struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3275        struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3276        struct hal_reo_entrance_ring *reo_ent_ring;
3277        struct hal_reo_dest_ring *reo_dest_ring;
3278        struct dp_link_desc_bank *link_desc_banks;
3279        struct hal_rx_msdu_link *msdu_link;
3280        struct hal_rx_msdu_details *msdu0;
3281        struct hal_srng *srng;
3282        dma_addr_t paddr;
3283        u32 desc_bank, msdu_info, mpdu_info;
3284        u32 dst_idx, cookie, hal_rx_desc_sz;
3285        int ret, buf_id;
3286
3287        hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3288        link_desc_banks = ab->dp.link_desc_banks;
3289        reo_dest_ring = rx_tid->dst_ring_desc;
3290
3291        ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3292        msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3293                        (paddr - link_desc_banks[desc_bank].paddr));
3294        msdu0 = &msdu_link->msdu_link[0];
3295        dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3296        memset(msdu0, 0, sizeof(*msdu0));
3297
3298        msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3299                    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3300                    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3301                    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3302                               defrag_skb->len - hal_rx_desc_sz) |
3303                    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3304                    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3305                    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3306        msdu0->rx_msdu_info.info0 = msdu_info;
3307
3308        /* change msdu len in hal rx desc */
3309        ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3310
3311        paddr = dma_map_single(ab->dev, defrag_skb->data,
3312                               defrag_skb->len + skb_tailroom(defrag_skb),
3313                               DMA_FROM_DEVICE);
3314        if (dma_mapping_error(ab->dev, paddr))
3315                return -ENOMEM;
3316
3317        spin_lock_bh(&rx_refill_ring->idr_lock);
3318        buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3319                           rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3320        spin_unlock_bh(&rx_refill_ring->idr_lock);
3321        if (buf_id < 0) {
3322                ret = -ENOMEM;
3323                goto err_unmap_dma;
3324        }
3325
3326        ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3327        cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3328                 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3329
3330        ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
3331
3332        /* Fill mpdu details into reo entrace ring */
3333        srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3334
3335        spin_lock_bh(&srng->lock);
3336        ath11k_hal_srng_access_begin(ab, srng);
3337
3338        reo_ent_ring = (struct hal_reo_entrance_ring *)
3339                        ath11k_hal_srng_src_get_next_entry(ab, srng);
3340        if (!reo_ent_ring) {
3341                ath11k_hal_srng_access_end(ab, srng);
3342                spin_unlock_bh(&srng->lock);
3343                ret = -ENOSPC;
3344                goto err_free_idr;
3345        }
3346        memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3347
3348        ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3349        ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3350                                        HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3351
3352        mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3353                    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3354                    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3355                    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3356                    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3357                    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3358                    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3359
3360        reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3361        reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3362        reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3363        reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3364                                         FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3365                                                   reo_dest_ring->info0)) |
3366                              FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3367        ath11k_hal_srng_access_end(ab, srng);
3368        spin_unlock_bh(&srng->lock);
3369
3370        return 0;
3371
3372err_free_idr:
3373        spin_lock_bh(&rx_refill_ring->idr_lock);
3374        idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3375        spin_unlock_bh(&rx_refill_ring->idr_lock);
3376err_unmap_dma:
3377        dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3378                         DMA_FROM_DEVICE);
3379        return ret;
3380}
3381
3382static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3383                                    struct sk_buff *a, struct sk_buff *b)
3384{
3385        int frag1, frag2;
3386
3387        frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3388        frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3389
3390        return frag1 - frag2;
3391}
3392
3393static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3394                                      struct sk_buff_head *frag_list,
3395                                      struct sk_buff *cur_frag)
3396{
3397        struct sk_buff *skb;
3398        int cmp;
3399
3400        skb_queue_walk(frag_list, skb) {
3401                cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3402                if (cmp < 0)
3403                        continue;
3404                __skb_queue_before(frag_list, skb, cur_frag);
3405                return;
3406        }
3407        __skb_queue_tail(frag_list, cur_frag);
3408}
3409
3410static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3411{
3412        struct ieee80211_hdr *hdr;
3413        u64 pn = 0;
3414        u8 *ehdr;
3415        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3416
3417        hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3418        ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3419
3420        pn = ehdr[0];
3421        pn |= (u64)ehdr[1] << 8;
3422        pn |= (u64)ehdr[4] << 16;
3423        pn |= (u64)ehdr[5] << 24;
3424        pn |= (u64)ehdr[6] << 32;
3425        pn |= (u64)ehdr[7] << 40;
3426
3427        return pn;
3428}
3429
3430static bool
3431ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3432{
3433        enum hal_encrypt_type encrypt_type;
3434        struct sk_buff *first_frag, *skb;
3435        struct hal_rx_desc *desc;
3436        u64 last_pn;
3437        u64 cur_pn;
3438
3439        first_frag = skb_peek(&rx_tid->rx_frags);
3440        desc = (struct hal_rx_desc *)first_frag->data;
3441
3442        encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3443        if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3444            encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3445            encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3446            encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3447                return true;
3448
3449        last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3450        skb_queue_walk(&rx_tid->rx_frags, skb) {
3451                if (skb == first_frag)
3452                        continue;
3453
3454                cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3455                if (cur_pn != last_pn + 1)
3456                        return false;
3457                last_pn = cur_pn;
3458        }
3459        return true;
3460}
3461
3462static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3463                                    struct sk_buff *msdu,
3464                                    u32 *ring_desc)
3465{
3466        struct ath11k_base *ab = ar->ab;
3467        struct hal_rx_desc *rx_desc;
3468        struct ath11k_peer *peer;
3469        struct dp_rx_tid *rx_tid;
3470        struct sk_buff *defrag_skb = NULL;
3471        u32 peer_id;
3472        u16 seqno, frag_no;
3473        u8 tid;
3474        int ret = 0;
3475        bool more_frags;
3476        bool is_mcbc;
3477
3478        rx_desc = (struct hal_rx_desc *)msdu->data;
3479        peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3480        tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3481        seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3482        frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3483        more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3484        is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3485
3486        /* Multicast/Broadcast fragments are not expected */
3487        if (is_mcbc)
3488                return -EINVAL;
3489
3490        if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3491            !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3492            tid > IEEE80211_NUM_TIDS)
3493                return -EINVAL;
3494
3495        /* received unfragmented packet in reo
3496         * exception ring, this shouldn't happen
3497         * as these packets typically come from
3498         * reo2sw srngs.
3499         */
3500        if (WARN_ON_ONCE(!frag_no && !more_frags))
3501                return -EINVAL;
3502
3503        spin_lock_bh(&ab->base_lock);
3504        peer = ath11k_peer_find_by_id(ab, peer_id);
3505        if (!peer) {
3506                ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3507                            peer_id);
3508                ret = -ENOENT;
3509                goto out_unlock;
3510        }
3511        rx_tid = &peer->rx_tid[tid];
3512
3513        if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3514            skb_queue_empty(&rx_tid->rx_frags)) {
3515                /* Flush stored fragments and start a new sequence */
3516                ath11k_dp_rx_frags_cleanup(rx_tid, true);
3517                rx_tid->cur_sn = seqno;
3518        }
3519
3520        if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3521                /* Fragment already present */
3522                ret = -EINVAL;
3523                goto out_unlock;
3524        }
3525
3526        if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3527                __skb_queue_tail(&rx_tid->rx_frags, msdu);
3528        else
3529                ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3530
3531        rx_tid->rx_frag_bitmap |= BIT(frag_no);
3532        if (!more_frags)
3533                rx_tid->last_frag_no = frag_no;
3534
3535        if (frag_no == 0) {
3536                rx_tid->dst_ring_desc = kmemdup(ring_desc,
3537                                                sizeof(*rx_tid->dst_ring_desc),
3538                                                GFP_ATOMIC);
3539                if (!rx_tid->dst_ring_desc) {
3540                        ret = -ENOMEM;
3541                        goto out_unlock;
3542                }
3543        } else {
3544                ath11k_dp_rx_link_desc_return(ab, ring_desc,
3545                                              HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3546        }
3547
3548        if (!rx_tid->last_frag_no ||
3549            rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3550                mod_timer(&rx_tid->frag_timer, jiffies +
3551                                               ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3552                goto out_unlock;
3553        }
3554
3555        spin_unlock_bh(&ab->base_lock);
3556        del_timer_sync(&rx_tid->frag_timer);
3557        spin_lock_bh(&ab->base_lock);
3558
3559        peer = ath11k_peer_find_by_id(ab, peer_id);
3560        if (!peer)
3561                goto err_frags_cleanup;
3562
3563        if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3564                goto err_frags_cleanup;
3565
3566        if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3567                goto err_frags_cleanup;
3568
3569        if (!defrag_skb)
3570                goto err_frags_cleanup;
3571
3572        if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3573                goto err_frags_cleanup;
3574
3575        ath11k_dp_rx_frags_cleanup(rx_tid, false);
3576        goto out_unlock;
3577
3578err_frags_cleanup:
3579        dev_kfree_skb_any(defrag_skb);
3580        ath11k_dp_rx_frags_cleanup(rx_tid, true);
3581out_unlock:
3582        spin_unlock_bh(&ab->base_lock);
3583        return ret;
3584}
3585
3586static int
3587ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3588{
3589        struct ath11k_pdev_dp *dp = &ar->dp;
3590        struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3591        struct sk_buff *msdu;
3592        struct ath11k_skb_rxcb *rxcb;
3593        struct hal_rx_desc *rx_desc;
3594        u8 *hdr_status;
3595        u16 msdu_len;
3596        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3597
3598        spin_lock_bh(&rx_ring->idr_lock);
3599        msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3600        if (!msdu) {
3601                ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3602                            buf_id);
3603                spin_unlock_bh(&rx_ring->idr_lock);
3604                return -EINVAL;
3605        }
3606
3607        idr_remove(&rx_ring->bufs_idr, buf_id);
3608        spin_unlock_bh(&rx_ring->idr_lock);
3609
3610        rxcb = ATH11K_SKB_RXCB(msdu);
3611        dma_unmap_single(ar->ab->dev, rxcb->paddr,
3612                         msdu->len + skb_tailroom(msdu),
3613                         DMA_FROM_DEVICE);
3614
3615        if (drop) {
3616                dev_kfree_skb_any(msdu);
3617                return 0;
3618        }
3619
3620        rcu_read_lock();
3621        if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3622                dev_kfree_skb_any(msdu);
3623                goto exit;
3624        }
3625
3626        if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3627                dev_kfree_skb_any(msdu);
3628                goto exit;
3629        }
3630
3631        rx_desc = (struct hal_rx_desc *)msdu->data;
3632        msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3633        if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3634                hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3635                ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3636                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3637                                sizeof(struct ieee80211_hdr));
3638                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3639                                sizeof(struct hal_rx_desc));
3640                dev_kfree_skb_any(msdu);
3641                goto exit;
3642        }
3643
3644        skb_put(msdu, hal_rx_desc_sz + msdu_len);
3645
3646        if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3647                dev_kfree_skb_any(msdu);
3648                ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3649                                              HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3650        }
3651exit:
3652        rcu_read_unlock();
3653        return 0;
3654}
3655
3656int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3657                             int budget)
3658{
3659        u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3660        struct dp_link_desc_bank *link_desc_banks;
3661        enum hal_rx_buf_return_buf_manager rbm;
3662        int tot_n_bufs_reaped, quota, ret, i;
3663        int n_bufs_reaped[MAX_RADIOS] = {0};
3664        struct dp_rxdma_ring *rx_ring;
3665        struct dp_srng *reo_except;
3666        u32 desc_bank, num_msdus;
3667        struct hal_srng *srng;
3668        struct ath11k_dp *dp;
3669        void *link_desc_va;
3670        int buf_id, mac_id;
3671        struct ath11k *ar;
3672        dma_addr_t paddr;
3673        u32 *desc;
3674        bool is_frag;
3675        u8 drop = 0;
3676
3677        tot_n_bufs_reaped = 0;
3678        quota = budget;
3679
3680        dp = &ab->dp;
3681        reo_except = &dp->reo_except_ring;
3682        link_desc_banks = dp->link_desc_banks;
3683
3684        srng = &ab->hal.srng_list[reo_except->ring_id];
3685
3686        spin_lock_bh(&srng->lock);
3687
3688        ath11k_hal_srng_access_begin(ab, srng);
3689
3690        while (budget &&
3691               (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3692                struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3693
3694                ab->soc_stats.err_ring_pkts++;
3695                ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3696                                                    &desc_bank);
3697                if (ret) {
3698                        ath11k_warn(ab, "failed to parse error reo desc %d\n",
3699                                    ret);
3700                        continue;
3701                }
3702                link_desc_va = link_desc_banks[desc_bank].vaddr +
3703                               (paddr - link_desc_banks[desc_bank].paddr);
3704                ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3705                                                 &rbm);
3706                if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3707                    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3708                        ab->soc_stats.invalid_rbm++;
3709                        ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3710                        ath11k_dp_rx_link_desc_return(ab, desc,
3711                                                      HAL_WBM_REL_BM_ACT_REL_MSDU);
3712                        continue;
3713                }
3714
3715                is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3716
3717                /* Process only rx fragments with one msdu per link desc below, and drop
3718                 * msdu's indicated due to error reasons.
3719                 */
3720                if (!is_frag || num_msdus > 1) {
3721                        drop = 1;
3722                        /* Return the link desc back to wbm idle list */
3723                        ath11k_dp_rx_link_desc_return(ab, desc,
3724                                                      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3725                }
3726
3727                for (i = 0; i < num_msdus; i++) {
3728                        buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3729                                           msdu_cookies[i]);
3730
3731                        mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3732                                           msdu_cookies[i]);
3733
3734                        ar = ab->pdevs[mac_id].ar;
3735
3736                        if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3737                                n_bufs_reaped[mac_id]++;
3738                                tot_n_bufs_reaped++;
3739                        }
3740                }
3741
3742                if (tot_n_bufs_reaped >= quota) {
3743                        tot_n_bufs_reaped = quota;
3744                        goto exit;
3745                }
3746
3747                budget = quota - tot_n_bufs_reaped;
3748        }
3749
3750exit:
3751        ath11k_hal_srng_access_end(ab, srng);
3752
3753        spin_unlock_bh(&srng->lock);
3754
3755        for (i = 0; i <  ab->num_radios; i++) {
3756                if (!n_bufs_reaped[i])
3757                        continue;
3758
3759                ar = ab->pdevs[i].ar;
3760                rx_ring = &ar->dp.rx_refill_buf_ring;
3761
3762                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3763                                           HAL_RX_BUF_RBM_SW3_BM);
3764        }
3765
3766        return tot_n_bufs_reaped;
3767}
3768
3769static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3770                                             int msdu_len,
3771                                             struct sk_buff_head *msdu_list)
3772{
3773        struct sk_buff *skb, *tmp;
3774        struct ath11k_skb_rxcb *rxcb;
3775        int n_buffs;
3776
3777        n_buffs = DIV_ROUND_UP(msdu_len,
3778                               (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3779
3780        skb_queue_walk_safe(msdu_list, skb, tmp) {
3781                rxcb = ATH11K_SKB_RXCB(skb);
3782                if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3783                    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3784                        if (!n_buffs)
3785                                break;
3786                        __skb_unlink(skb, msdu_list);
3787                        dev_kfree_skb_any(skb);
3788                        n_buffs--;
3789                }
3790        }
3791}
3792
3793static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3794                                      struct ieee80211_rx_status *status,
3795                                      struct sk_buff_head *msdu_list)
3796{
3797        u16 msdu_len;
3798        struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3799        struct rx_attention *rx_attention;
3800        u8 l3pad_bytes;
3801        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3802        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3803
3804        msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3805
3806        if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3807                /* First buffer will be freed by the caller, so deduct it's length */
3808                msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3809                ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3810                return -EINVAL;
3811        }
3812
3813        rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3814        if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3815                ath11k_warn(ar->ab,
3816                            "msdu_done bit not set in null_q_des processing\n");
3817                __skb_queue_purge(msdu_list);
3818                return -EIO;
3819        }
3820
3821        /* Handle NULL queue descriptor violations arising out a missing
3822         * REO queue for a given peer or a given TID. This typically
3823         * may happen if a packet is received on a QOS enabled TID before the
3824         * ADDBA negotiation for that TID, when the TID queue is setup. Or
3825         * it may also happen for MC/BC frames if they are not routed to the
3826         * non-QOS TID queue, in the absence of any other default TID queue.
3827         * This error can show up both in a REO destination or WBM release ring.
3828         */
3829
3830        rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3831        rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3832
3833        if (rxcb->is_frag) {
3834                skb_pull(msdu, hal_rx_desc_sz);
3835        } else {
3836                l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3837
3838                if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3839                        return -EINVAL;
3840
3841                skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3842                skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3843        }
3844        ath11k_dp_rx_h_ppdu(ar, desc, status);
3845
3846        ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3847
3848        rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3849
3850        /* Please note that caller will having the access to msdu and completing
3851         * rx with mac80211. Need not worry about cleaning up amsdu_list.
3852         */
3853
3854        return 0;
3855}
3856
3857static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3858                                   struct ieee80211_rx_status *status,
3859                                   struct sk_buff_head *msdu_list)
3860{
3861        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3862        bool drop = false;
3863
3864        ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3865
3866        switch (rxcb->err_code) {
3867        case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3868                if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3869                        drop = true;
3870                break;
3871        case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3872                /* TODO: Do not drop PN failed packets in the driver;
3873                 * instead, it is good to drop such packets in mac80211
3874                 * after incrementing the replay counters.
3875                 */
3876                fallthrough;
3877        default:
3878                /* TODO: Review other errors and process them to mac80211
3879                 * as appropriate.
3880                 */
3881                drop = true;
3882                break;
3883        }
3884
3885        return drop;
3886}
3887
3888static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3889                                        struct ieee80211_rx_status *status)
3890{
3891        u16 msdu_len;
3892        struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3893        u8 l3pad_bytes;
3894        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3895        u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3896
3897        rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3898        rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3899
3900        l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3901        msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3902        skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3903        skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3904
3905        ath11k_dp_rx_h_ppdu(ar, desc, status);
3906
3907        status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3908                         RX_FLAG_DECRYPTED);
3909
3910        ath11k_dp_rx_h_undecap(ar, msdu, desc,
3911                               HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
3912}
3913
3914static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
3915                                     struct ieee80211_rx_status *status)
3916{
3917        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3918        bool drop = false;
3919
3920        ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
3921
3922        switch (rxcb->err_code) {
3923        case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3924                ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
3925                break;
3926        default:
3927                /* TODO: Review other rxdma error code to check if anything is
3928                 * worth reporting to mac80211
3929                 */
3930                drop = true;
3931                break;
3932        }
3933
3934        return drop;
3935}
3936
3937static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
3938                                 struct napi_struct *napi,
3939                                 struct sk_buff *msdu,
3940                                 struct sk_buff_head *msdu_list)
3941{
3942        struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3943        struct ieee80211_rx_status rxs = {0};
3944        struct ieee80211_rx_status *status;
3945        bool drop = true;
3946
3947        switch (rxcb->err_rel_src) {
3948        case HAL_WBM_REL_SRC_MODULE_REO:
3949                drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
3950                break;
3951        case HAL_WBM_REL_SRC_MODULE_RXDMA:
3952                drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
3953                break;
3954        default:
3955                /* msdu will get freed */
3956                break;
3957        }
3958
3959        if (drop) {
3960                dev_kfree_skb_any(msdu);
3961                return;
3962        }
3963
3964        status = IEEE80211_SKB_RXCB(msdu);
3965        *status = rxs;
3966
3967        ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
3968}
3969
3970int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
3971                                 struct napi_struct *napi, int budget)
3972{
3973        struct ath11k *ar;
3974        struct ath11k_dp *dp = &ab->dp;
3975        struct dp_rxdma_ring *rx_ring;
3976        struct hal_rx_wbm_rel_info err_info;
3977        struct hal_srng *srng;
3978        struct sk_buff *msdu;
3979        struct sk_buff_head msdu_list[MAX_RADIOS];
3980        struct ath11k_skb_rxcb *rxcb;
3981        u32 *rx_desc;
3982        int buf_id, mac_id;
3983        int num_buffs_reaped[MAX_RADIOS] = {0};
3984        int total_num_buffs_reaped = 0;
3985        int ret, i;
3986
3987        for (i = 0; i < ab->num_radios; i++)
3988                __skb_queue_head_init(&msdu_list[i]);
3989
3990        srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
3991
3992        spin_lock_bh(&srng->lock);
3993
3994        ath11k_hal_srng_access_begin(ab, srng);
3995
3996        while (budget) {
3997                rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
3998                if (!rx_desc)
3999                        break;
4000
4001                ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4002                if (ret) {
4003                        ath11k_warn(ab,
4004                                    "failed to parse rx error in wbm_rel ring desc %d\n",
4005                                    ret);
4006                        continue;
4007                }
4008
4009                buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4010                mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4011
4012                ar = ab->pdevs[mac_id].ar;
4013                rx_ring = &ar->dp.rx_refill_buf_ring;
4014
4015                spin_lock_bh(&rx_ring->idr_lock);
4016                msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4017                if (!msdu) {
4018                        ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4019                                    buf_id, mac_id);
4020                        spin_unlock_bh(&rx_ring->idr_lock);
4021                        continue;
4022                }
4023
4024                idr_remove(&rx_ring->bufs_idr, buf_id);
4025                spin_unlock_bh(&rx_ring->idr_lock);
4026
4027                rxcb = ATH11K_SKB_RXCB(msdu);
4028                dma_unmap_single(ab->dev, rxcb->paddr,
4029                                 msdu->len + skb_tailroom(msdu),
4030                                 DMA_FROM_DEVICE);
4031
4032                num_buffs_reaped[mac_id]++;
4033                total_num_buffs_reaped++;
4034                budget--;
4035
4036                if (err_info.push_reason !=
4037                    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4038                        dev_kfree_skb_any(msdu);
4039                        continue;
4040                }
4041
4042                rxcb->err_rel_src = err_info.err_rel_src;
4043                rxcb->err_code = err_info.err_code;
4044                rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4045                __skb_queue_tail(&msdu_list[mac_id], msdu);
4046        }
4047
4048        ath11k_hal_srng_access_end(ab, srng);
4049
4050        spin_unlock_bh(&srng->lock);
4051
4052        if (!total_num_buffs_reaped)
4053                goto done;
4054
4055        for (i = 0; i <  ab->num_radios; i++) {
4056                if (!num_buffs_reaped[i])
4057                        continue;
4058
4059                ar = ab->pdevs[i].ar;
4060                rx_ring = &ar->dp.rx_refill_buf_ring;
4061
4062                ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4063                                           HAL_RX_BUF_RBM_SW3_BM);
4064        }
4065
4066        rcu_read_lock();
4067        for (i = 0; i <  ab->num_radios; i++) {
4068                if (!rcu_dereference(ab->pdevs_active[i])) {
4069                        __skb_queue_purge(&msdu_list[i]);
4070                        continue;
4071                }
4072
4073                ar = ab->pdevs[i].ar;
4074
4075                if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4076                        __skb_queue_purge(&msdu_list[i]);
4077                        continue;
4078                }
4079
4080                while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4081                        ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4082        }
4083        rcu_read_unlock();
4084done:
4085        return total_num_buffs_reaped;
4086}
4087
4088int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4089{
4090        struct ath11k *ar;
4091        struct dp_srng *err_ring;
4092        struct dp_rxdma_ring *rx_ring;
4093        struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4094        struct hal_srng *srng;
4095        u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4096        enum hal_rx_buf_return_buf_manager rbm;
4097        enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4098        struct ath11k_skb_rxcb *rxcb;
4099        struct sk_buff *skb;
4100        struct hal_reo_entrance_ring *entr_ring;
4101        void *desc;
4102        int num_buf_freed = 0;
4103        int quota = budget;
4104        dma_addr_t paddr;
4105        u32 desc_bank;
4106        void *link_desc_va;
4107        int num_msdus;
4108        int i;
4109        int buf_id;
4110
4111        ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4112        err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4113                                                                          mac_id)];
4114        rx_ring = &ar->dp.rx_refill_buf_ring;
4115
4116        srng = &ab->hal.srng_list[err_ring->ring_id];
4117
4118        spin_lock_bh(&srng->lock);
4119
4120        ath11k_hal_srng_access_begin(ab, srng);
4121
4122        while (quota-- &&
4123               (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4124                ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4125
4126                entr_ring = (struct hal_reo_entrance_ring *)desc;
4127                rxdma_err_code =
4128                        FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4129                                  entr_ring->info1);
4130                ab->soc_stats.rxdma_error[rxdma_err_code]++;
4131
4132                link_desc_va = link_desc_banks[desc_bank].vaddr +
4133                               (paddr - link_desc_banks[desc_bank].paddr);
4134                ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4135                                                 msdu_cookies, &rbm);
4136
4137                for (i = 0; i < num_msdus; i++) {
4138                        buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4139                                           msdu_cookies[i]);
4140
4141                        spin_lock_bh(&rx_ring->idr_lock);
4142                        skb = idr_find(&rx_ring->bufs_idr, buf_id);
4143                        if (!skb) {
4144                                ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4145                                            buf_id);
4146                                spin_unlock_bh(&rx_ring->idr_lock);
4147                                continue;
4148                        }
4149
4150                        idr_remove(&rx_ring->bufs_idr, buf_id);
4151                        spin_unlock_bh(&rx_ring->idr_lock);
4152
4153                        rxcb = ATH11K_SKB_RXCB(skb);
4154                        dma_unmap_single(ab->dev, rxcb->paddr,
4155                                         skb->len + skb_tailroom(skb),
4156                                         DMA_FROM_DEVICE);
4157                        dev_kfree_skb_any(skb);
4158
4159                        num_buf_freed++;
4160                }
4161
4162                ath11k_dp_rx_link_desc_return(ab, desc,
4163                                              HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4164        }
4165
4166        ath11k_hal_srng_access_end(ab, srng);
4167
4168        spin_unlock_bh(&srng->lock);
4169
4170        if (num_buf_freed)
4171                ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4172                                           HAL_RX_BUF_RBM_SW3_BM);
4173
4174        return budget - quota;
4175}
4176
4177void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4178{
4179        struct ath11k_dp *dp = &ab->dp;
4180        struct hal_srng *srng;
4181        struct dp_reo_cmd *cmd, *tmp;
4182        bool found = false;
4183        u32 *reo_desc;
4184        u16 tag;
4185        struct hal_reo_status reo_status;
4186
4187        srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4188
4189        memset(&reo_status, 0, sizeof(reo_status));
4190
4191        spin_lock_bh(&srng->lock);
4192
4193        ath11k_hal_srng_access_begin(ab, srng);
4194
4195        while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4196                tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4197
4198                switch (tag) {
4199                case HAL_REO_GET_QUEUE_STATS_STATUS:
4200                        ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4201                                                          &reo_status);
4202                        break;
4203                case HAL_REO_FLUSH_QUEUE_STATUS:
4204                        ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4205                                                          &reo_status);
4206                        break;
4207                case HAL_REO_FLUSH_CACHE_STATUS:
4208                        ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4209                                                          &reo_status);
4210                        break;
4211                case HAL_REO_UNBLOCK_CACHE_STATUS:
4212                        ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4213                                                          &reo_status);
4214                        break;
4215                case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4216                        ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4217                                                                 &reo_status);
4218                        break;
4219                case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4220                        ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4221                                                                  &reo_status);
4222                        break;
4223                case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4224                        ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4225                                                                  &reo_status);
4226                        break;
4227                default:
4228                        ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4229                        continue;
4230                }
4231
4232                spin_lock_bh(&dp->reo_cmd_lock);
4233                list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4234                        if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4235                                found = true;
4236                                list_del(&cmd->list);
4237                                break;
4238                        }
4239                }
4240                spin_unlock_bh(&dp->reo_cmd_lock);
4241
4242                if (found) {
4243                        cmd->handler(dp, (void *)&cmd->data,
4244                                     reo_status.uniform_hdr.cmd_status);
4245                        kfree(cmd);
4246                }
4247
4248                found = false;
4249        }
4250
4251        ath11k_hal_srng_access_end(ab, srng);
4252
4253        spin_unlock_bh(&srng->lock);
4254}
4255
4256void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4257{
4258        struct ath11k *ar = ab->pdevs[mac_id].ar;
4259
4260        ath11k_dp_rx_pdev_srng_free(ar);
4261        ath11k_dp_rxdma_pdev_buf_free(ar);
4262}
4263
4264int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4265{
4266        struct ath11k *ar = ab->pdevs[mac_id].ar;
4267        struct ath11k_pdev_dp *dp = &ar->dp;
4268        u32 ring_id;
4269        int i;
4270        int ret;
4271
4272        ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4273        if (ret) {
4274                ath11k_warn(ab, "failed to setup rx srngs\n");
4275                return ret;
4276        }
4277
4278        ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4279        if (ret) {
4280                ath11k_warn(ab, "failed to setup rxdma ring\n");
4281                return ret;
4282        }
4283
4284        ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4285        ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4286        if (ret) {
4287                ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4288                            ret);
4289                return ret;
4290        }
4291
4292        if (ab->hw_params.rx_mac_buf_ring) {
4293                for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4294                        ring_id = dp->rx_mac_buf_ring[i].ring_id;
4295                        ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4296                                                          mac_id + i, HAL_RXDMA_BUF);
4297                        if (ret) {
4298                                ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4299                                            i, ret);
4300                                return ret;
4301                        }
4302                }
4303        }
4304
4305        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4306                ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4307                ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4308                                                  mac_id + i, HAL_RXDMA_DST);
4309                if (ret) {
4310                        ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4311                                    i, ret);
4312                        return ret;
4313                }
4314        }
4315
4316        if (!ab->hw_params.rxdma1_enable)
4317                goto config_refill_ring;
4318
4319        ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4320        ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4321                                          mac_id, HAL_RXDMA_MONITOR_BUF);
4322        if (ret) {
4323                ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4324                            ret);
4325                return ret;
4326        }
4327        ret = ath11k_dp_tx_htt_srng_setup(ab,
4328                                          dp->rxdma_mon_dst_ring.ring_id,
4329                                          mac_id, HAL_RXDMA_MONITOR_DST);
4330        if (ret) {
4331                ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4332                            ret);
4333                return ret;
4334        }
4335        ret = ath11k_dp_tx_htt_srng_setup(ab,
4336                                          dp->rxdma_mon_desc_ring.ring_id,
4337                                          mac_id, HAL_RXDMA_MONITOR_DESC);
4338        if (ret) {
4339                ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4340                            ret);
4341                return ret;
4342        }
4343
4344config_refill_ring:
4345        for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4346                ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4347                ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4348                                                  HAL_RXDMA_MONITOR_STATUS);
4349                if (ret) {
4350                        ath11k_warn(ab,
4351                                    "failed to configure mon_status_refill_ring%d %d\n",
4352                                    i, ret);
4353                        return ret;
4354                }
4355        }
4356
4357        return 0;
4358}
4359
4360static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4361{
4362        if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4363                *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4364                *total_len -= *frag_len;
4365        } else {
4366                *frag_len = *total_len;
4367                *total_len = 0;
4368        }
4369}
4370
4371static
4372int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4373                                          void *p_last_buf_addr_info,
4374                                          u8 mac_id)
4375{
4376        struct ath11k_pdev_dp *dp = &ar->dp;
4377        struct dp_srng *dp_srng;
4378        void *hal_srng;
4379        void *src_srng_desc;
4380        int ret = 0;
4381
4382        if (ar->ab->hw_params.rxdma1_enable) {
4383                dp_srng = &dp->rxdma_mon_desc_ring;
4384                hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4385        } else {
4386                dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4387                hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4388        }
4389
4390        ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4391
4392        src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4393
4394        if (src_srng_desc) {
4395                struct ath11k_buffer_addr *src_desc =
4396                                (struct ath11k_buffer_addr *)src_srng_desc;
4397
4398                *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4399        } else {
4400                ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4401                           "Monitor Link Desc Ring %d Full", mac_id);
4402                ret = -ENOMEM;
4403        }
4404
4405        ath11k_hal_srng_access_end(ar->ab, hal_srng);
4406        return ret;
4407}
4408
4409static
4410void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4411                                         dma_addr_t *paddr, u32 *sw_cookie,
4412                                         u8 *rbm,
4413                                         void **pp_buf_addr_info)
4414{
4415        struct hal_rx_msdu_link *msdu_link =
4416                        (struct hal_rx_msdu_link *)rx_msdu_link_desc;
4417        struct ath11k_buffer_addr *buf_addr_info;
4418
4419        buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4420
4421        ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4422
4423        *pp_buf_addr_info = (void *)buf_addr_info;
4424}
4425
4426static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4427{
4428        if (skb->len > len) {
4429                skb_trim(skb, len);
4430        } else {
4431                if (skb_tailroom(skb) < len - skb->len) {
4432                        if ((pskb_expand_head(skb, 0,
4433                                              len - skb->len - skb_tailroom(skb),
4434                                              GFP_ATOMIC))) {
4435                                dev_kfree_skb_any(skb);
4436                                return -ENOMEM;
4437                        }
4438                }
4439                skb_put(skb, (len - skb->len));
4440        }
4441        return 0;
4442}
4443
4444static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4445                                        void *msdu_link_desc,
4446                                        struct hal_rx_msdu_list *msdu_list,
4447                                        u16 *num_msdus)
4448{
4449        struct hal_rx_msdu_details *msdu_details = NULL;
4450        struct rx_msdu_desc *msdu_desc_info = NULL;
4451        struct hal_rx_msdu_link *msdu_link = NULL;
4452        int i;
4453        u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4454        u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4455        u8  tmp  = 0;
4456
4457        msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4458        msdu_details = &msdu_link->msdu_link[0];
4459
4460        for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4461                if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4462                              msdu_details[i].buf_addr_info.info0) == 0) {
4463                        msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4464                        msdu_desc_info->info0 |= last;
4465                        ;
4466                        break;
4467                }
4468                msdu_desc_info = &msdu_details[i].rx_msdu_info;
4469
4470                if (!i)
4471                        msdu_desc_info->info0 |= first;
4472                else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4473                        msdu_desc_info->info0 |= last;
4474                msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4475                msdu_list->msdu_info[i].msdu_len =
4476                         HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4477                msdu_list->sw_cookie[i] =
4478                        FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4479                                  msdu_details[i].buf_addr_info.info1);
4480                tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4481                                msdu_details[i].buf_addr_info.info1);
4482                msdu_list->rbm[i] = tmp;
4483        }
4484        *num_msdus = i;
4485}
4486
4487static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4488                                        u32 *rx_bufs_used)
4489{
4490        u32 ret = 0;
4491
4492        if ((*ppdu_id < msdu_ppdu_id) &&
4493            ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4494                *ppdu_id = msdu_ppdu_id;
4495                ret = msdu_ppdu_id;
4496        } else if ((*ppdu_id > msdu_ppdu_id) &&
4497                ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4498                /* mon_dst is behind than mon_status
4499                 * skip dst_ring and free it
4500                 */
4501                *rx_bufs_used += 1;
4502                *ppdu_id = msdu_ppdu_id;
4503                ret = msdu_ppdu_id;
4504        }
4505        return ret;
4506}
4507
4508static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4509                                      bool *is_frag, u32 *total_len,
4510                                      u32 *frag_len, u32 *msdu_cnt)
4511{
4512        if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4513                if (!*is_frag) {
4514                        *total_len = info->msdu_len;
4515                        *is_frag = true;
4516                }
4517                ath11k_dp_mon_set_frag_len(total_len,
4518                                           frag_len);
4519        } else {
4520                if (*is_frag) {
4521                        ath11k_dp_mon_set_frag_len(total_len,
4522                                                   frag_len);
4523                } else {
4524                        *frag_len = info->msdu_len;
4525                }
4526                *is_frag = false;
4527                *msdu_cnt -= 1;
4528        }
4529}
4530
4531static u32
4532ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4533                          void *ring_entry, struct sk_buff **head_msdu,
4534                          struct sk_buff **tail_msdu, u32 *npackets,
4535                          u32 *ppdu_id)
4536{
4537        struct ath11k_pdev_dp *dp = &ar->dp;
4538        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4539        struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4540        struct sk_buff *msdu = NULL, *last = NULL;
4541        struct hal_rx_msdu_list msdu_list;
4542        void *p_buf_addr_info, *p_last_buf_addr_info;
4543        struct hal_rx_desc *rx_desc;
4544        void *rx_msdu_link_desc;
4545        dma_addr_t paddr;
4546        u16 num_msdus = 0;
4547        u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4548        u32 rx_bufs_used = 0, i = 0;
4549        u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4550        u32 total_len = 0, frag_len = 0;
4551        bool is_frag, is_first_msdu;
4552        bool drop_mpdu = false;
4553        struct ath11k_skb_rxcb *rxcb;
4554        struct hal_reo_entrance_ring *ent_desc =
4555                        (struct hal_reo_entrance_ring *)ring_entry;
4556        int buf_id;
4557        u32 rx_link_buf_info[2];
4558        u8 rbm;
4559
4560        if (!ar->ab->hw_params.rxdma1_enable)
4561                rx_ring = &dp->rx_refill_buf_ring;
4562
4563        ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4564                                            &sw_cookie,
4565                                            &p_last_buf_addr_info, &rbm,
4566                                            &msdu_cnt);
4567
4568        if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4569                      ent_desc->info1) ==
4570                      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4571                u8 rxdma_err =
4572                        FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4573                                  ent_desc->info1);
4574                if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4575                    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4576                    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4577                        drop_mpdu = true;
4578                        pmon->rx_mon_stats.dest_mpdu_drop++;
4579                }
4580        }
4581
4582        is_frag = false;
4583        is_first_msdu = true;
4584
4585        do {
4586                if (pmon->mon_last_linkdesc_paddr == paddr) {
4587                        pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4588                        return rx_bufs_used;
4589                }
4590
4591                if (ar->ab->hw_params.rxdma1_enable)
4592                        rx_msdu_link_desc =
4593                                (void *)pmon->link_desc_banks[sw_cookie].vaddr +
4594                                (paddr - pmon->link_desc_banks[sw_cookie].paddr);
4595                else
4596                        rx_msdu_link_desc =
4597                                (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4598                                (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4599
4600                ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4601                                            &num_msdus);
4602
4603                for (i = 0; i < num_msdus; i++) {
4604                        u32 l2_hdr_offset;
4605
4606                        if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4607                                ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4608                                           "i %d last_cookie %d is same\n",
4609                                           i, pmon->mon_last_buf_cookie);
4610                                drop_mpdu = true;
4611                                pmon->rx_mon_stats.dup_mon_buf_cnt++;
4612                                continue;
4613                        }
4614                        buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4615                                           msdu_list.sw_cookie[i]);
4616
4617                        spin_lock_bh(&rx_ring->idr_lock);
4618                        msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4619                        spin_unlock_bh(&rx_ring->idr_lock);
4620                        if (!msdu) {
4621                                ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4622                                           "msdu_pop: invalid buf_id %d\n", buf_id);
4623                                break;
4624                        }
4625                        rxcb = ATH11K_SKB_RXCB(msdu);
4626                        if (!rxcb->unmapped) {
4627                                dma_unmap_single(ar->ab->dev, rxcb->paddr,
4628                                                 msdu->len +
4629                                                 skb_tailroom(msdu),
4630                                                 DMA_FROM_DEVICE);
4631                                rxcb->unmapped = 1;
4632                        }
4633                        if (drop_mpdu) {
4634                                ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4635                                           "i %d drop msdu %p *ppdu_id %x\n",
4636                                           i, msdu, *ppdu_id);
4637                                dev_kfree_skb_any(msdu);
4638                                msdu = NULL;
4639                                goto next_msdu;
4640                        }
4641
4642                        rx_desc = (struct hal_rx_desc *)msdu->data;
4643
4644                        rx_pkt_offset = sizeof(struct hal_rx_desc);
4645                        l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4646
4647                        if (is_first_msdu) {
4648                                if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4649                                        drop_mpdu = true;
4650                                        dev_kfree_skb_any(msdu);
4651                                        msdu = NULL;
4652                                        pmon->mon_last_linkdesc_paddr = paddr;
4653                                        goto next_msdu;
4654                                }
4655
4656                                msdu_ppdu_id =
4657                                        ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4658
4659                                if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4660                                                                 ppdu_id,
4661                                                                 &rx_bufs_used)) {
4662                                        if (rx_bufs_used) {
4663                                                drop_mpdu = true;
4664                                                dev_kfree_skb_any(msdu);
4665                                                msdu = NULL;
4666                                                goto next_msdu;
4667                                        }
4668                                        return rx_bufs_used;
4669                                }
4670                                pmon->mon_last_linkdesc_paddr = paddr;
4671                                is_first_msdu = false;
4672                        }
4673                        ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4674                                                  &is_frag, &total_len,
4675                                                  &frag_len, &msdu_cnt);
4676                        rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4677
4678                        ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4679
4680                        if (!(*head_msdu))
4681                                *head_msdu = msdu;
4682                        else if (last)
4683                                last->next = msdu;
4684
4685                        last = msdu;
4686next_msdu:
4687                        pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4688                        rx_bufs_used++;
4689                        spin_lock_bh(&rx_ring->idr_lock);
4690                        idr_remove(&rx_ring->bufs_idr, buf_id);
4691                        spin_unlock_bh(&rx_ring->idr_lock);
4692                }
4693
4694                ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4695
4696                ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4697                                                    &sw_cookie, &rbm,
4698                                                    &p_buf_addr_info);
4699
4700                if (ar->ab->hw_params.rxdma1_enable) {
4701                        if (ath11k_dp_rx_monitor_link_desc_return(ar,
4702                                                                  p_last_buf_addr_info,
4703                                                                  dp->mac_id))
4704                                ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4705                                           "dp_rx_monitor_link_desc_return failed");
4706                } else {
4707                        ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4708                                                      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4709                }
4710
4711                p_last_buf_addr_info = p_buf_addr_info;
4712
4713        } while (paddr && msdu_cnt);
4714
4715        if (last)
4716                last->next = NULL;
4717
4718        *tail_msdu = msdu;
4719
4720        if (msdu_cnt == 0)
4721                *npackets = 1;
4722
4723        return rx_bufs_used;
4724}
4725
4726static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4727{
4728        u32 rx_pkt_offset, l2_hdr_offset;
4729
4730        rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4731        l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4732                                                      (struct hal_rx_desc *)msdu->data);
4733        skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4734}
4735
4736static struct sk_buff *
4737ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4738                            u32 mac_id, struct sk_buff *head_msdu,
4739                            struct sk_buff *last_msdu,
4740                            struct ieee80211_rx_status *rxs)
4741{
4742        struct ath11k_base *ab = ar->ab;
4743        struct sk_buff *msdu, *mpdu_buf, *prev_buf;
4744        u32 wifi_hdr_len;
4745        struct hal_rx_desc *rx_desc;
4746        char *hdr_desc;
4747        u8 *dest, decap_format;
4748        struct ieee80211_hdr_3addr *wh;
4749        struct rx_attention *rx_attention;
4750
4751        mpdu_buf = NULL;
4752
4753        if (!head_msdu)
4754                goto err_merge_fail;
4755
4756        rx_desc = (struct hal_rx_desc *)head_msdu->data;
4757        rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4758
4759        if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4760                return NULL;
4761
4762        decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4763
4764        ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4765
4766        if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4767                ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4768
4769                prev_buf = head_msdu;
4770                msdu = head_msdu->next;
4771
4772                while (msdu) {
4773                        ath11k_dp_rx_msdus_set_payload(ar, msdu);
4774
4775                        prev_buf = msdu;
4776                        msdu = msdu->next;
4777                }
4778
4779                prev_buf->next = NULL;
4780
4781                skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4782        } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4783                __le16 qos_field;
4784                u8 qos_pkt = 0;
4785
4786                rx_desc = (struct hal_rx_desc *)head_msdu->data;
4787                hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4788
4789                /* Base size */
4790                wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4791                wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4792
4793                if (ieee80211_is_data_qos(wh->frame_control)) {
4794                        struct ieee80211_qos_hdr *qwh =
4795                                        (struct ieee80211_qos_hdr *)hdr_desc;
4796
4797                        qos_field = qwh->qos_ctrl;
4798                        qos_pkt = 1;
4799                }
4800                msdu = head_msdu;
4801
4802                while (msdu) {
4803                        rx_desc = (struct hal_rx_desc *)msdu->data;
4804                        hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4805
4806                        if (qos_pkt) {
4807                                dest = skb_push(msdu, sizeof(__le16));
4808                                if (!dest)
4809                                        goto err_merge_fail;
4810                                memcpy(dest, hdr_desc, wifi_hdr_len);
4811                                memcpy(dest + wifi_hdr_len,
4812                                       (u8 *)&qos_field, sizeof(__le16));
4813                        }
4814                        ath11k_dp_rx_msdus_set_payload(ar, msdu);
4815                        prev_buf = msdu;
4816                        msdu = msdu->next;
4817                }
4818                dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4819                if (!dest)
4820                        goto err_merge_fail;
4821
4822                ath11k_dbg(ab, ATH11K_DBG_DATA,
4823                           "mpdu_buf %pK mpdu_buf->len %u",
4824                           prev_buf, prev_buf->len);
4825        } else {
4826                ath11k_dbg(ab, ATH11K_DBG_DATA,
4827                           "decap format %d is not supported!\n",
4828                           decap_format);
4829                goto err_merge_fail;
4830        }
4831
4832        return head_msdu;
4833
4834err_merge_fail:
4835        if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
4836                ath11k_dbg(ab, ATH11K_DBG_DATA,
4837                           "err_merge_fail mpdu_buf %pK", mpdu_buf);
4838                /* Free the head buffer */
4839                dev_kfree_skb_any(mpdu_buf);
4840        }
4841        return NULL;
4842}
4843
4844static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4845                                    struct sk_buff *head_msdu,
4846                                    struct sk_buff *tail_msdu,
4847                                    struct napi_struct *napi)
4848{
4849        struct ath11k_pdev_dp *dp = &ar->dp;
4850        struct sk_buff *mon_skb, *skb_next, *header;
4851        struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
4852
4853        mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4854                                              tail_msdu, rxs);
4855
4856        if (!mon_skb)
4857                goto mon_deliver_fail;
4858
4859        header = mon_skb;
4860
4861        rxs->flag = 0;
4862        do {
4863                skb_next = mon_skb->next;
4864                if (!skb_next)
4865                        rxs->flag &= ~RX_FLAG_AMSDU_MORE;
4866                else
4867                        rxs->flag |= RX_FLAG_AMSDU_MORE;
4868
4869                if (mon_skb == header) {
4870                        header = NULL;
4871                        rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
4872                } else {
4873                        rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
4874                }
4875                rxs->flag |= RX_FLAG_ONLY_MONITOR;
4876
4877                status = IEEE80211_SKB_RXCB(mon_skb);
4878                *status = *rxs;
4879
4880                ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
4881                mon_skb = skb_next;
4882        } while (mon_skb);
4883        rxs->flag = 0;
4884
4885        return 0;
4886
4887mon_deliver_fail:
4888        mon_skb = head_msdu;
4889        while (mon_skb) {
4890                skb_next = mon_skb->next;
4891                dev_kfree_skb_any(mon_skb);
4892                mon_skb = skb_next;
4893        }
4894        return -EINVAL;
4895}
4896
4897static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
4898                                          u32 quota, struct napi_struct *napi)
4899{
4900        struct ath11k_pdev_dp *dp = &ar->dp;
4901        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4902        void *ring_entry;
4903        void *mon_dst_srng;
4904        u32 ppdu_id;
4905        u32 rx_bufs_used;
4906        u32 ring_id;
4907        struct ath11k_pdev_mon_stats *rx_mon_stats;
4908        u32      npackets = 0;
4909
4910        if (ar->ab->hw_params.rxdma1_enable)
4911                ring_id = dp->rxdma_mon_dst_ring.ring_id;
4912        else
4913                ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
4914
4915        mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
4916
4917        if (!mon_dst_srng) {
4918                ath11k_warn(ar->ab,
4919                            "HAL Monitor Destination Ring Init Failed -- %pK",
4920                            mon_dst_srng);
4921                return;
4922        }
4923
4924        spin_lock_bh(&pmon->mon_lock);
4925
4926        ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
4927
4928        ppdu_id = pmon->mon_ppdu_info.ppdu_id;
4929        rx_bufs_used = 0;
4930        rx_mon_stats = &pmon->rx_mon_stats;
4931
4932        while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
4933                struct sk_buff *head_msdu, *tail_msdu;
4934
4935                head_msdu = NULL;
4936                tail_msdu = NULL;
4937
4938                rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
4939                                                          &head_msdu,
4940                                                          &tail_msdu,
4941                                                          &npackets, &ppdu_id);
4942
4943                if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
4944                        pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4945                        ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4946                                   "dest_rx: new ppdu_id %x != status ppdu_id %x",
4947                                   ppdu_id, pmon->mon_ppdu_info.ppdu_id);
4948                        break;
4949                }
4950                if (head_msdu && tail_msdu) {
4951                        ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
4952                                                 tail_msdu, napi);
4953                        rx_mon_stats->dest_mpdu_done++;
4954                }
4955
4956                ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
4957                                                                mon_dst_srng);
4958        }
4959        ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
4960
4961        spin_unlock_bh(&pmon->mon_lock);
4962
4963        if (rx_bufs_used) {
4964                rx_mon_stats->dest_ppdu_done++;
4965                if (ar->ab->hw_params.rxdma1_enable)
4966                        ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4967                                                   &dp->rxdma_mon_buf_ring,
4968                                                   rx_bufs_used,
4969                                                   HAL_RX_BUF_RBM_SW3_BM);
4970                else
4971                        ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
4972                                                   &dp->rx_refill_buf_ring,
4973                                                   rx_bufs_used,
4974                                                   HAL_RX_BUF_RBM_SW3_BM);
4975        }
4976}
4977
4978static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
4979                                                int mac_id, u32 quota,
4980                                                struct napi_struct *napi)
4981{
4982        struct ath11k_pdev_dp *dp = &ar->dp;
4983        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4984        struct hal_rx_mon_ppdu_info *ppdu_info;
4985        struct sk_buff *status_skb;
4986        u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
4987        struct ath11k_pdev_mon_stats *rx_mon_stats;
4988
4989        ppdu_info = &pmon->mon_ppdu_info;
4990        rx_mon_stats = &pmon->rx_mon_stats;
4991
4992        if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
4993                return;
4994
4995        while (!skb_queue_empty(&pmon->rx_status_q)) {
4996                status_skb = skb_dequeue(&pmon->rx_status_q);
4997
4998                tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
4999                                                            status_skb);
5000                if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
5001                        rx_mon_stats->status_ppdu_done++;
5002                        pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5003                        ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
5004                        pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5005                }
5006                dev_kfree_skb_any(status_skb);
5007        }
5008}
5009
5010static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
5011                                    struct napi_struct *napi, int budget)
5012{
5013        struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5014        struct ath11k_pdev_dp *dp = &ar->dp;
5015        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5016        int num_buffs_reaped = 0;
5017
5018        num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
5019                                                             &pmon->rx_status_q);
5020        if (num_buffs_reaped)
5021                ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
5022
5023        return num_buffs_reaped;
5024}
5025
5026int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5027                                   struct napi_struct *napi, int budget)
5028{
5029        struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5030        int ret = 0;
5031
5032        if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
5033                ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
5034        else
5035                ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5036        return ret;
5037}
5038
5039static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5040{
5041        struct ath11k_pdev_dp *dp = &ar->dp;
5042        struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5043
5044        skb_queue_head_init(&pmon->rx_status_q);
5045
5046        pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5047
5048        memset(&pmon->rx_mon_stats, 0,
5049               sizeof(pmon->rx_mon_stats));
5050        return 0;
5051}
5052
5053int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5054{
5055        struct ath11k_pdev_dp *dp = &ar->dp;
5056        struct ath11k_mon_data *pmon = &dp->mon_data;
5057        struct hal_srng *mon_desc_srng = NULL;
5058        struct dp_srng *dp_srng;
5059        int ret = 0;
5060        u32 n_link_desc = 0;
5061
5062        ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5063        if (ret) {
5064                ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5065                return ret;
5066        }
5067
5068        /* if rxdma1_enable is false, no need to setup
5069         * rxdma_mon_desc_ring.
5070         */
5071        if (!ar->ab->hw_params.rxdma1_enable)
5072                return 0;
5073
5074        dp_srng = &dp->rxdma_mon_desc_ring;
5075        n_link_desc = dp_srng->size /
5076                ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5077        mon_desc_srng =
5078                &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5079
5080        ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5081                                        HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5082                                        n_link_desc);
5083        if (ret) {
5084                ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5085                return ret;
5086        }
5087        pmon->mon_last_linkdesc_paddr = 0;
5088        pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5089        spin_lock_init(&pmon->mon_lock);
5090
5091        return 0;
5092}
5093
5094static int ath11k_dp_mon_link_free(struct ath11k *ar)
5095{
5096        struct ath11k_pdev_dp *dp = &ar->dp;
5097        struct ath11k_mon_data *pmon = &dp->mon_data;
5098
5099        ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5100                                    HAL_RXDMA_MONITOR_DESC,
5101                                    &dp->rxdma_mon_desc_ring);
5102        return 0;
5103}
5104
5105int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5106{
5107        ath11k_dp_mon_link_free(ar);
5108        return 0;
5109}
5110
5111int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5112{
5113        /* start reap timer */
5114        mod_timer(&ab->mon_reap_timer,
5115                  jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5116
5117        return 0;
5118}
5119
5120int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5121{
5122        int ret;
5123
5124        if (stop_timer)
5125                del_timer_sync(&ab->mon_reap_timer);
5126
5127        /* reap all the monitor related rings */
5128        ret = ath11k_dp_purge_mon_ring(ab);
5129        if (ret) {
5130                ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5131                return ret;
5132        }
5133
5134        return 0;
5135}
5136