linux/drivers/net/wireless/ath/ath9k/recv.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008-2011 Atheros Communications Inc.
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#include <linux/dma-mapping.h>
  18#include "ath9k.h"
  19#include "ar9003_mac.h"
  20
  21#define SKB_CB_ATHBUF(__skb)    (*((struct ath_rxbuf **)__skb->cb))
  22
  23static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
  24{
  25        return sc->ps_enabled &&
  26               (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
  27}
  28
  29/*
  30 * Setup and link descriptors.
  31 *
  32 * 11N: we can no longer afford to self link the last descriptor.
  33 * MAC acknowledges BA status as long as it copies frames to host
  34 * buffer (or rx fifo). This can incorrectly acknowledge packets
  35 * to a sender if last desc is self-linked.
  36 */
  37static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
  38                            bool flush)
  39{
  40        struct ath_hw *ah = sc->sc_ah;
  41        struct ath_common *common = ath9k_hw_common(ah);
  42        struct ath_desc *ds;
  43        struct sk_buff *skb;
  44
  45        ds = bf->bf_desc;
  46        ds->ds_link = 0; /* link to null */
  47        ds->ds_data = bf->bf_buf_addr;
  48
  49        /* virtual addr of the beginning of the buffer. */
  50        skb = bf->bf_mpdu;
  51        BUG_ON(skb == NULL);
  52        ds->ds_vdata = skb->data;
  53
  54        /*
  55         * setup rx descriptors. The rx_bufsize here tells the hardware
  56         * how much data it can DMA to us and that we are prepared
  57         * to process
  58         */
  59        ath9k_hw_setuprxdesc(ah, ds,
  60                             common->rx_bufsize,
  61                             0);
  62
  63        if (sc->rx.rxlink)
  64                *sc->rx.rxlink = bf->bf_daddr;
  65        else if (!flush)
  66                ath9k_hw_putrxbuf(ah, bf->bf_daddr);
  67
  68        sc->rx.rxlink = &ds->ds_link;
  69}
  70
  71static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
  72                              bool flush)
  73{
  74        if (sc->rx.buf_hold)
  75                ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
  76
  77        sc->rx.buf_hold = bf;
  78}
  79
  80static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
  81{
  82        /* XXX block beacon interrupts */
  83        ath9k_hw_setantenna(sc->sc_ah, antenna);
  84        sc->rx.defant = antenna;
  85        sc->rx.rxotherant = 0;
  86}
  87
  88static void ath_opmode_init(struct ath_softc *sc)
  89{
  90        struct ath_hw *ah = sc->sc_ah;
  91        struct ath_common *common = ath9k_hw_common(ah);
  92
  93        u32 rfilt, mfilt[2];
  94
  95        /* configure rx filter */
  96        rfilt = ath_calcrxfilter(sc);
  97        ath9k_hw_setrxfilter(ah, rfilt);
  98
  99        /* configure bssid mask */
 100        ath_hw_setbssidmask(common);
 101
 102        /* configure operational mode */
 103        ath9k_hw_setopmode(ah);
 104
 105        /* calculate and install multicast filter */
 106        mfilt[0] = mfilt[1] = ~0;
 107        ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
 108}
 109
 110static bool ath_rx_edma_buf_link(struct ath_softc *sc,
 111                                 enum ath9k_rx_qtype qtype)
 112{
 113        struct ath_hw *ah = sc->sc_ah;
 114        struct ath_rx_edma *rx_edma;
 115        struct sk_buff *skb;
 116        struct ath_rxbuf *bf;
 117
 118        rx_edma = &sc->rx.rx_edma[qtype];
 119        if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
 120                return false;
 121
 122        bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 123        list_del_init(&bf->list);
 124
 125        skb = bf->bf_mpdu;
 126
 127        memset(skb->data, 0, ah->caps.rx_status_len);
 128        dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
 129                                ah->caps.rx_status_len, DMA_TO_DEVICE);
 130
 131        SKB_CB_ATHBUF(skb) = bf;
 132        ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
 133        __skb_queue_tail(&rx_edma->rx_fifo, skb);
 134
 135        return true;
 136}
 137
 138static void ath_rx_addbuffer_edma(struct ath_softc *sc,
 139                                  enum ath9k_rx_qtype qtype)
 140{
 141        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 142        struct ath_rxbuf *bf, *tbf;
 143
 144        if (list_empty(&sc->rx.rxbuf)) {
 145                ath_dbg(common, QUEUE, "No free rx buf available\n");
 146                return;
 147        }
 148
 149        list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
 150                if (!ath_rx_edma_buf_link(sc, qtype))
 151                        break;
 152
 153}
 154
 155static void ath_rx_remove_buffer(struct ath_softc *sc,
 156                                 enum ath9k_rx_qtype qtype)
 157{
 158        struct ath_rxbuf *bf;
 159        struct ath_rx_edma *rx_edma;
 160        struct sk_buff *skb;
 161
 162        rx_edma = &sc->rx.rx_edma[qtype];
 163
 164        while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
 165                bf = SKB_CB_ATHBUF(skb);
 166                BUG_ON(!bf);
 167                list_add_tail(&bf->list, &sc->rx.rxbuf);
 168        }
 169}
 170
 171static void ath_rx_edma_cleanup(struct ath_softc *sc)
 172{
 173        struct ath_hw *ah = sc->sc_ah;
 174        struct ath_common *common = ath9k_hw_common(ah);
 175        struct ath_rxbuf *bf;
 176
 177        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
 178        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
 179
 180        list_for_each_entry(bf, &sc->rx.rxbuf, list) {
 181                if (bf->bf_mpdu) {
 182                        dma_unmap_single(sc->dev, bf->bf_buf_addr,
 183                                        common->rx_bufsize,
 184                                        DMA_BIDIRECTIONAL);
 185                        dev_kfree_skb_any(bf->bf_mpdu);
 186                        bf->bf_buf_addr = 0;
 187                        bf->bf_mpdu = NULL;
 188                }
 189        }
 190}
 191
 192static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
 193{
 194        __skb_queue_head_init(&rx_edma->rx_fifo);
 195        rx_edma->rx_fifo_hwsize = size;
 196}
 197
 198static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
 199{
 200        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 201        struct ath_hw *ah = sc->sc_ah;
 202        struct sk_buff *skb;
 203        struct ath_rxbuf *bf;
 204        int error = 0, i;
 205        u32 size;
 206
 207        ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
 208                                    ah->caps.rx_status_len);
 209
 210        ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
 211                               ah->caps.rx_lp_qdepth);
 212        ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
 213                               ah->caps.rx_hp_qdepth);
 214
 215        size = sizeof(struct ath_rxbuf) * nbufs;
 216        bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
 217        if (!bf)
 218                return -ENOMEM;
 219
 220        INIT_LIST_HEAD(&sc->rx.rxbuf);
 221
 222        for (i = 0; i < nbufs; i++, bf++) {
 223                skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
 224                if (!skb) {
 225                        error = -ENOMEM;
 226                        goto rx_init_fail;
 227                }
 228
 229                memset(skb->data, 0, common->rx_bufsize);
 230                bf->bf_mpdu = skb;
 231
 232                bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
 233                                                 common->rx_bufsize,
 234                                                 DMA_BIDIRECTIONAL);
 235                if (unlikely(dma_mapping_error(sc->dev,
 236                                                bf->bf_buf_addr))) {
 237                                dev_kfree_skb_any(skb);
 238                                bf->bf_mpdu = NULL;
 239                                bf->bf_buf_addr = 0;
 240                                ath_err(common,
 241                                        "dma_mapping_error() on RX init\n");
 242                                error = -ENOMEM;
 243                                goto rx_init_fail;
 244                }
 245
 246                list_add_tail(&bf->list, &sc->rx.rxbuf);
 247        }
 248
 249        return 0;
 250
 251rx_init_fail:
 252        ath_rx_edma_cleanup(sc);
 253        return error;
 254}
 255
 256static void ath_edma_start_recv(struct ath_softc *sc)
 257{
 258        ath9k_hw_rxena(sc->sc_ah);
 259        ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
 260        ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
 261        ath_opmode_init(sc);
 262        ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
 263}
 264
 265static void ath_edma_stop_recv(struct ath_softc *sc)
 266{
 267        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
 268        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
 269}
 270
 271int ath_rx_init(struct ath_softc *sc, int nbufs)
 272{
 273        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 274        struct sk_buff *skb;
 275        struct ath_rxbuf *bf;
 276        int error = 0;
 277
 278        spin_lock_init(&sc->sc_pcu_lock);
 279
 280        common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
 281                             sc->sc_ah->caps.rx_status_len;
 282
 283        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 284                return ath_rx_edma_init(sc, nbufs);
 285
 286        ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
 287                common->cachelsz, common->rx_bufsize);
 288
 289        /* Initialize rx descriptors */
 290
 291        error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
 292                                  "rx", nbufs, 1, 0);
 293        if (error != 0) {
 294                ath_err(common,
 295                        "failed to allocate rx descriptors: %d\n",
 296                        error);
 297                goto err;
 298        }
 299
 300        list_for_each_entry(bf, &sc->rx.rxbuf, list) {
 301                skb = ath_rxbuf_alloc(common, common->rx_bufsize,
 302                                      GFP_KERNEL);
 303                if (skb == NULL) {
 304                        error = -ENOMEM;
 305                        goto err;
 306                }
 307
 308                bf->bf_mpdu = skb;
 309                bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
 310                                                 common->rx_bufsize,
 311                                                 DMA_FROM_DEVICE);
 312                if (unlikely(dma_mapping_error(sc->dev,
 313                                               bf->bf_buf_addr))) {
 314                        dev_kfree_skb_any(skb);
 315                        bf->bf_mpdu = NULL;
 316                        bf->bf_buf_addr = 0;
 317                        ath_err(common,
 318                                "dma_mapping_error() on RX init\n");
 319                        error = -ENOMEM;
 320                        goto err;
 321                }
 322        }
 323        sc->rx.rxlink = NULL;
 324err:
 325        if (error)
 326                ath_rx_cleanup(sc);
 327
 328        return error;
 329}
 330
 331void ath_rx_cleanup(struct ath_softc *sc)
 332{
 333        struct ath_hw *ah = sc->sc_ah;
 334        struct ath_common *common = ath9k_hw_common(ah);
 335        struct sk_buff *skb;
 336        struct ath_rxbuf *bf;
 337
 338        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 339                ath_rx_edma_cleanup(sc);
 340                return;
 341        }
 342
 343        list_for_each_entry(bf, &sc->rx.rxbuf, list) {
 344                skb = bf->bf_mpdu;
 345                if (skb) {
 346                        dma_unmap_single(sc->dev, bf->bf_buf_addr,
 347                                         common->rx_bufsize,
 348                                         DMA_FROM_DEVICE);
 349                        dev_kfree_skb(skb);
 350                        bf->bf_buf_addr = 0;
 351                        bf->bf_mpdu = NULL;
 352                }
 353        }
 354}
 355
 356/*
 357 * Calculate the receive filter according to the
 358 * operating mode and state:
 359 *
 360 * o always accept unicast, broadcast, and multicast traffic
 361 * o maintain current state of phy error reception (the hal
 362 *   may enable phy error frames for noise immunity work)
 363 * o probe request frames are accepted only when operating in
 364 *   hostap, adhoc, or monitor modes
 365 * o enable promiscuous mode according to the interface state
 366 * o accept beacons:
 367 *   - when operating in adhoc mode so the 802.11 layer creates
 368 *     node table entries for peers,
 369 *   - when operating in station mode for collecting rssi data when
 370 *     the station is otherwise quiet, or
 371 *   - when operating as a repeater so we see repeater-sta beacons
 372 *   - when scanning
 373 */
 374
 375u32 ath_calcrxfilter(struct ath_softc *sc)
 376{
 377        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 378        u32 rfilt;
 379
 380        if (IS_ENABLED(CONFIG_ATH9K_TX99))
 381                return 0;
 382
 383        rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
 384                | ATH9K_RX_FILTER_MCAST;
 385
 386        /* if operating on a DFS channel, enable radar pulse detection */
 387        if (sc->hw->conf.radar_enabled)
 388                rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
 389
 390        spin_lock_bh(&sc->chan_lock);
 391
 392        if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
 393                rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 394
 395        if (sc->sc_ah->is_monitoring)
 396                rfilt |= ATH9K_RX_FILTER_PROM;
 397
 398        if ((sc->cur_chan->rxfilter & FIF_CONTROL) ||
 399            sc->sc_ah->dynack.enabled)
 400                rfilt |= ATH9K_RX_FILTER_CONTROL;
 401
 402        if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
 403            (sc->cur_chan->nvifs <= 1) &&
 404            !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
 405                rfilt |= ATH9K_RX_FILTER_MYBEACON;
 406        else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB)
 407                rfilt |= ATH9K_RX_FILTER_BEACON;
 408
 409        if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
 410            (sc->cur_chan->rxfilter & FIF_PSPOLL))
 411                rfilt |= ATH9K_RX_FILTER_PSPOLL;
 412
 413        if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
 414                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
 415
 416        if (sc->cur_chan->nvifs > 1 ||
 417            (sc->cur_chan->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) {
 418                /* This is needed for older chips */
 419                if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
 420                        rfilt |= ATH9K_RX_FILTER_PROM;
 421                rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
 422        }
 423
 424        if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
 425            AR_SREV_9561(sc->sc_ah))
 426                rfilt |= ATH9K_RX_FILTER_4ADDRESS;
 427
 428        if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
 429                rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
 430
 431        if (ath9k_is_chanctx_enabled() &&
 432            test_bit(ATH_OP_SCANNING, &common->op_flags))
 433                rfilt |= ATH9K_RX_FILTER_BEACON;
 434
 435        spin_unlock_bh(&sc->chan_lock);
 436
 437        return rfilt;
 438
 439}
 440
 441void ath_startrecv(struct ath_softc *sc)
 442{
 443        struct ath_hw *ah = sc->sc_ah;
 444        struct ath_rxbuf *bf, *tbf;
 445
 446        if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 447                ath_edma_start_recv(sc);
 448                return;
 449        }
 450
 451        if (list_empty(&sc->rx.rxbuf))
 452                goto start_recv;
 453
 454        sc->rx.buf_hold = NULL;
 455        sc->rx.rxlink = NULL;
 456        list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
 457                ath_rx_buf_link(sc, bf, false);
 458        }
 459
 460        /* We could have deleted elements so the list may be empty now */
 461        if (list_empty(&sc->rx.rxbuf))
 462                goto start_recv;
 463
 464        bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 465        ath9k_hw_putrxbuf(ah, bf->bf_daddr);
 466        ath9k_hw_rxena(ah);
 467
 468start_recv:
 469        ath_opmode_init(sc);
 470        ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
 471}
 472
 473static void ath_flushrecv(struct ath_softc *sc)
 474{
 475        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 476                ath_rx_tasklet(sc, 1, true);
 477        ath_rx_tasklet(sc, 1, false);
 478}
 479
 480bool ath_stoprecv(struct ath_softc *sc)
 481{
 482        struct ath_hw *ah = sc->sc_ah;
 483        bool stopped, reset = false;
 484
 485        ath9k_hw_abortpcurecv(ah);
 486        ath9k_hw_setrxfilter(ah, 0);
 487        stopped = ath9k_hw_stopdmarecv(ah, &reset);
 488
 489        ath_flushrecv(sc);
 490
 491        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
 492                ath_edma_stop_recv(sc);
 493        else
 494                sc->rx.rxlink = NULL;
 495
 496        if (!(ah->ah_flags & AH_UNPLUGGED) &&
 497            unlikely(!stopped)) {
 498                ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
 499                        "Failed to stop Rx DMA\n");
 500                RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
 501        }
 502        return stopped && !reset;
 503}
 504
 505static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
 506{
 507        /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
 508        struct ieee80211_mgmt *mgmt;
 509        u8 *pos, *end, id, elen;
 510        struct ieee80211_tim_ie *tim;
 511
 512        mgmt = (struct ieee80211_mgmt *)skb->data;
 513        pos = mgmt->u.beacon.variable;
 514        end = skb->data + skb->len;
 515
 516        while (pos + 2 < end) {
 517                id = *pos++;
 518                elen = *pos++;
 519                if (pos + elen > end)
 520                        break;
 521
 522                if (id == WLAN_EID_TIM) {
 523                        if (elen < sizeof(*tim))
 524                                break;
 525                        tim = (struct ieee80211_tim_ie *) pos;
 526                        if (tim->dtim_count != 0)
 527                                break;
 528                        return tim->bitmap_ctrl & 0x01;
 529                }
 530
 531                pos += elen;
 532        }
 533
 534        return false;
 535}
 536
 537static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
 538{
 539        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 540        bool skip_beacon = false;
 541
 542        if (skb->len < 24 + 8 + 2 + 2)
 543                return;
 544
 545        sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
 546
 547        if (sc->ps_flags & PS_BEACON_SYNC) {
 548                sc->ps_flags &= ~PS_BEACON_SYNC;
 549                ath_dbg(common, PS,
 550                        "Reconfigure beacon timers based on synchronized timestamp\n");
 551
 552#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
 553                if (ath9k_is_chanctx_enabled()) {
 554                        if (sc->cur_chan == &sc->offchannel.chan)
 555                                skip_beacon = true;
 556                }
 557#endif
 558
 559                if (!skip_beacon &&
 560                    !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
 561                        ath9k_set_beacon(sc);
 562
 563                ath9k_p2p_beacon_sync(sc);
 564        }
 565
 566        if (ath_beacon_dtim_pending_cab(skb)) {
 567                /*
 568                 * Remain awake waiting for buffered broadcast/multicast
 569                 * frames. If the last broadcast/multicast frame is not
 570                 * received properly, the next beacon frame will work as
 571                 * a backup trigger for returning into NETWORK SLEEP state,
 572                 * so we are waiting for it as well.
 573                 */
 574                ath_dbg(common, PS,
 575                        "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
 576                sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
 577                return;
 578        }
 579
 580        if (sc->ps_flags & PS_WAIT_FOR_CAB) {
 581                /*
 582                 * This can happen if a broadcast frame is dropped or the AP
 583                 * fails to send a frame indicating that all CAB frames have
 584                 * been delivered.
 585                 */
 586                sc->ps_flags &= ~PS_WAIT_FOR_CAB;
 587                ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
 588        }
 589}
 590
 591static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
 592{
 593        struct ieee80211_hdr *hdr;
 594        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 595
 596        hdr = (struct ieee80211_hdr *)skb->data;
 597
 598        /* Process Beacon and CAB receive in PS state */
 599        if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
 600            && mybeacon) {
 601                ath_rx_ps_beacon(sc, skb);
 602        } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
 603                   (ieee80211_is_data(hdr->frame_control) ||
 604                    ieee80211_is_action(hdr->frame_control)) &&
 605                   is_multicast_ether_addr(hdr->addr1) &&
 606                   !ieee80211_has_moredata(hdr->frame_control)) {
 607                /*
 608                 * No more broadcast/multicast frames to be received at this
 609                 * point.
 610                 */
 611                sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
 612                ath_dbg(common, PS,
 613                        "All PS CAB frames received, back to sleep\n");
 614        } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
 615                   !is_multicast_ether_addr(hdr->addr1) &&
 616                   !ieee80211_has_morefrags(hdr->frame_control)) {
 617                sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
 618                ath_dbg(common, PS,
 619                        "Going back to sleep after having received PS-Poll data (0x%lx)\n",
 620                        sc->ps_flags & (PS_WAIT_FOR_BEACON |
 621                                        PS_WAIT_FOR_CAB |
 622                                        PS_WAIT_FOR_PSPOLL_DATA |
 623                                        PS_WAIT_FOR_TX_ACK));
 624        }
 625}
 626
 627static bool ath_edma_get_buffers(struct ath_softc *sc,
 628                                 enum ath9k_rx_qtype qtype,
 629                                 struct ath_rx_status *rs,
 630                                 struct ath_rxbuf **dest)
 631{
 632        struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
 633        struct ath_hw *ah = sc->sc_ah;
 634        struct ath_common *common = ath9k_hw_common(ah);
 635        struct sk_buff *skb;
 636        struct ath_rxbuf *bf;
 637        int ret;
 638
 639        skb = skb_peek(&rx_edma->rx_fifo);
 640        if (!skb)
 641                return false;
 642
 643        bf = SKB_CB_ATHBUF(skb);
 644        BUG_ON(!bf);
 645
 646        dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
 647                                common->rx_bufsize, DMA_FROM_DEVICE);
 648
 649        ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
 650        if (ret == -EINPROGRESS) {
 651                /*let device gain the buffer again*/
 652                dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
 653                                common->rx_bufsize, DMA_FROM_DEVICE);
 654                return false;
 655        }
 656
 657        __skb_unlink(skb, &rx_edma->rx_fifo);
 658        if (ret == -EINVAL) {
 659                /* corrupt descriptor, skip this one and the following one */
 660                list_add_tail(&bf->list, &sc->rx.rxbuf);
 661                ath_rx_edma_buf_link(sc, qtype);
 662
 663                skb = skb_peek(&rx_edma->rx_fifo);
 664                if (skb) {
 665                        bf = SKB_CB_ATHBUF(skb);
 666                        BUG_ON(!bf);
 667
 668                        __skb_unlink(skb, &rx_edma->rx_fifo);
 669                        list_add_tail(&bf->list, &sc->rx.rxbuf);
 670                        ath_rx_edma_buf_link(sc, qtype);
 671                }
 672
 673                bf = NULL;
 674        }
 675
 676        *dest = bf;
 677        return true;
 678}
 679
 680static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
 681                                                struct ath_rx_status *rs,
 682                                                enum ath9k_rx_qtype qtype)
 683{
 684        struct ath_rxbuf *bf = NULL;
 685
 686        while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
 687                if (!bf)
 688                        continue;
 689
 690                return bf;
 691        }
 692        return NULL;
 693}
 694
 695static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
 696                                           struct ath_rx_status *rs)
 697{
 698        struct ath_hw *ah = sc->sc_ah;
 699        struct ath_common *common = ath9k_hw_common(ah);
 700        struct ath_desc *ds;
 701        struct ath_rxbuf *bf;
 702        int ret;
 703
 704        if (list_empty(&sc->rx.rxbuf)) {
 705                sc->rx.rxlink = NULL;
 706                return NULL;
 707        }
 708
 709        bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
 710        if (bf == sc->rx.buf_hold)
 711                return NULL;
 712
 713        ds = bf->bf_desc;
 714
 715        /*
 716         * Must provide the virtual address of the current
 717         * descriptor, the physical address, and the virtual
 718         * address of the next descriptor in the h/w chain.
 719         * This allows the HAL to look ahead to see if the
 720         * hardware is done with a descriptor by checking the
 721         * done bit in the following descriptor and the address
 722         * of the current descriptor the DMA engine is working
 723         * on.  All this is necessary because of our use of
 724         * a self-linked list to avoid rx overruns.
 725         */
 726        ret = ath9k_hw_rxprocdesc(ah, ds, rs);
 727        if (ret == -EINPROGRESS) {
 728                struct ath_rx_status trs;
 729                struct ath_rxbuf *tbf;
 730                struct ath_desc *tds;
 731
 732                memset(&trs, 0, sizeof(trs));
 733                if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
 734                        sc->rx.rxlink = NULL;
 735                        return NULL;
 736                }
 737
 738                tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
 739
 740                /*
 741                 * On some hardware the descriptor status words could
 742                 * get corrupted, including the done bit. Because of
 743                 * this, check if the next descriptor's done bit is
 744                 * set or not.
 745                 *
 746                 * If the next descriptor's done bit is set, the current
 747                 * descriptor has been corrupted. Force s/w to discard
 748                 * this descriptor and continue...
 749                 */
 750
 751                tds = tbf->bf_desc;
 752                ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
 753                if (ret == -EINPROGRESS)
 754                        return NULL;
 755
 756                /*
 757                 * Re-check previous descriptor, in case it has been filled
 758                 * in the mean time.
 759                 */
 760                ret = ath9k_hw_rxprocdesc(ah, ds, rs);
 761                if (ret == -EINPROGRESS) {
 762                        /*
 763                         * mark descriptor as zero-length and set the 'more'
 764                         * flag to ensure that both buffers get discarded
 765                         */
 766                        rs->rs_datalen = 0;
 767                        rs->rs_more = true;
 768                }
 769        }
 770
 771        list_del(&bf->list);
 772        if (!bf->bf_mpdu)
 773                return bf;
 774
 775        /*
 776         * Synchronize the DMA transfer with CPU before
 777         * 1. accessing the frame
 778         * 2. requeueing the same buffer to h/w
 779         */
 780        dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
 781                        common->rx_bufsize,
 782                        DMA_FROM_DEVICE);
 783
 784        return bf;
 785}
 786
 787static void ath9k_process_tsf(struct ath_rx_status *rs,
 788                              struct ieee80211_rx_status *rxs,
 789                              u64 tsf)
 790{
 791        u32 tsf_lower = tsf & 0xffffffff;
 792
 793        rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
 794        if (rs->rs_tstamp > tsf_lower &&
 795            unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
 796                rxs->mactime -= 0x100000000ULL;
 797
 798        if (rs->rs_tstamp < tsf_lower &&
 799            unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
 800                rxs->mactime += 0x100000000ULL;
 801}
 802
 803/*
 804 * For Decrypt or Demic errors, we only mark packet status here and always push
 805 * up the frame up to let mac80211 handle the actual error case, be it no
 806 * decryption key or real decryption error. This let us keep statistics there.
 807 */
 808static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
 809                                   struct sk_buff *skb,
 810                                   struct ath_rx_status *rx_stats,
 811                                   struct ieee80211_rx_status *rx_status,
 812                                   bool *decrypt_error, u64 tsf)
 813{
 814        struct ieee80211_hw *hw = sc->hw;
 815        struct ath_hw *ah = sc->sc_ah;
 816        struct ath_common *common = ath9k_hw_common(ah);
 817        struct ieee80211_hdr *hdr;
 818        bool discard_current = sc->rx.discard_next;
 819        bool is_phyerr;
 820
 821        /*
 822         * Discard corrupt descriptors which are marked in
 823         * ath_get_next_rx_buf().
 824         */
 825        if (discard_current)
 826                goto corrupt;
 827
 828        sc->rx.discard_next = false;
 829
 830        /*
 831         * Discard zero-length packets and packets smaller than an ACK
 832         * which are not PHY_ERROR (short radar pulses have a length of 3)
 833         */
 834        is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
 835        if (!rx_stats->rs_datalen ||
 836            (rx_stats->rs_datalen < 10 && !is_phyerr)) {
 837                RX_STAT_INC(sc, rx_len_err);
 838                goto corrupt;
 839        }
 840
 841        /*
 842         * rs_status follows rs_datalen so if rs_datalen is too large
 843         * we can take a hint that hardware corrupted it, so ignore
 844         * those frames.
 845         */
 846        if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
 847                RX_STAT_INC(sc, rx_len_err);
 848                goto corrupt;
 849        }
 850
 851        /* Only use status info from the last fragment */
 852        if (rx_stats->rs_more)
 853                return 0;
 854
 855        /*
 856         * Return immediately if the RX descriptor has been marked
 857         * as corrupt based on the various error bits.
 858         *
 859         * This is different from the other corrupt descriptor
 860         * condition handled above.
 861         */
 862        if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
 863                goto corrupt;
 864
 865        hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
 866
 867        ath9k_process_tsf(rx_stats, rx_status, tsf);
 868        ath_debug_stat_rx(sc, rx_stats);
 869
 870        /*
 871         * Process PHY errors and return so that the packet
 872         * can be dropped.
 873         */
 874        if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
 875                /*
 876                 * DFS and spectral are mutually exclusive
 877                 *
 878                 * Since some chips use PHYERR_RADAR as indication for both, we
 879                 * need to double check which feature is enabled to prevent
 880                 * feeding spectral or dfs-detector with wrong frames.
 881                 */
 882                if (hw->conf.radar_enabled) {
 883                        ath9k_dfs_process_phyerr(sc, hdr, rx_stats,
 884                                                 rx_status->mactime);
 885                } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
 886                           ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
 887                                               rx_status->mactime)) {
 888                        RX_STAT_INC(sc, rx_spectral);
 889                }
 890                return -EINVAL;
 891        }
 892
 893        /*
 894         * everything but the rate is checked here, the rate check is done
 895         * separately to avoid doing two lookups for a rate for each frame.
 896         */
 897        spin_lock_bh(&sc->chan_lock);
 898        if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error,
 899                                 sc->cur_chan->rxfilter)) {
 900                spin_unlock_bh(&sc->chan_lock);
 901                return -EINVAL;
 902        }
 903        spin_unlock_bh(&sc->chan_lock);
 904
 905        if (ath_is_mybeacon(common, hdr)) {
 906                RX_STAT_INC(sc, rx_beacons);
 907                rx_stats->is_mybeacon = true;
 908        }
 909
 910        /*
 911         * This shouldn't happen, but have a safety check anyway.
 912         */
 913        if (WARN_ON(!ah->curchan))
 914                return -EINVAL;
 915
 916        if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
 917                /*
 918                 * No valid hardware bitrate found -- we should not get here
 919                 * because hardware has already validated this frame as OK.
 920                 */
 921                ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
 922                        rx_stats->rs_rate);
 923                RX_STAT_INC(sc, rx_rate_err);
 924                return -EINVAL;
 925        }
 926
 927        if (ath9k_is_chanctx_enabled()) {
 928                if (rx_stats->is_mybeacon)
 929                        ath_chanctx_beacon_recv_ev(sc,
 930                                           ATH_CHANCTX_EVENT_BEACON_RECEIVED);
 931        }
 932
 933        ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
 934
 935        rx_status->band = ah->curchan->chan->band;
 936        rx_status->freq = ah->curchan->chan->center_freq;
 937        rx_status->antenna = rx_stats->rs_antenna;
 938        rx_status->flag |= RX_FLAG_MACTIME_END;
 939
 940#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 941        if (ieee80211_is_data_present(hdr->frame_control) &&
 942            !ieee80211_is_qos_nullfunc(hdr->frame_control))
 943                sc->rx.num_pkts++;
 944#endif
 945
 946        return 0;
 947
 948corrupt:
 949        sc->rx.discard_next = rx_stats->rs_more;
 950        return -EINVAL;
 951}
 952
 953/*
 954 * Run the LNA combining algorithm only in these cases:
 955 *
 956 * Standalone WLAN cards with both LNA/Antenna diversity
 957 * enabled in the EEPROM.
 958 *
 959 * WLAN+BT cards which are in the supported card list
 960 * in ath_pci_id_table and the user has loaded the
 961 * driver with "bt_ant_diversity" set to true.
 962 */
 963static void ath9k_antenna_check(struct ath_softc *sc,
 964                                struct ath_rx_status *rs)
 965{
 966        struct ath_hw *ah = sc->sc_ah;
 967        struct ath9k_hw_capabilities *pCap = &ah->caps;
 968        struct ath_common *common = ath9k_hw_common(ah);
 969
 970        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
 971                return;
 972
 973        /*
 974         * Change the default rx antenna if rx diversity
 975         * chooses the other antenna 3 times in a row.
 976         */
 977        if (sc->rx.defant != rs->rs_antenna) {
 978                if (++sc->rx.rxotherant >= 3)
 979                        ath_setdefantenna(sc, rs->rs_antenna);
 980        } else {
 981                sc->rx.rxotherant = 0;
 982        }
 983
 984        if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
 985                if (common->bt_ant_diversity)
 986                        ath_ant_comb_scan(sc, rs);
 987        } else {
 988                ath_ant_comb_scan(sc, rs);
 989        }
 990}
 991
 992static void ath9k_apply_ampdu_details(struct ath_softc *sc,
 993        struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
 994{
 995        if (rs->rs_isaggr) {
 996                rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
 997
 998                rxs->ampdu_reference = sc->rx.ampdu_ref;
 999
1000                if (!rs->rs_moreaggr) {
1001                        rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
1002                        sc->rx.ampdu_ref++;
1003                }
1004
1005                if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
1006                        rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
1007        }
1008}
1009
1010static void ath_rx_count_airtime(struct ath_softc *sc,
1011                                 struct ath_rx_status *rs,
1012                                 struct sk_buff *skb)
1013{
1014        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1015        struct ath_hw *ah = sc->sc_ah;
1016        struct ath_common *common = ath9k_hw_common(ah);
1017        struct ieee80211_sta *sta;
1018        struct ieee80211_rx_status *rxs;
1019        const struct ieee80211_rate *rate;
1020        bool is_sgi, is_40, is_sp;
1021        int phy;
1022        u16 len = rs->rs_datalen;
1023        u32 airtime = 0;
1024        u8 tidno;
1025
1026        if (!ieee80211_is_data(hdr->frame_control))
1027                return;
1028
1029        rcu_read_lock();
1030
1031        sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
1032        if (!sta)
1033                goto exit;
1034        tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1035
1036        rxs = IEEE80211_SKB_RXCB(skb);
1037
1038        is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI);
1039        is_40 = !!(rxs->bw == RATE_INFO_BW_40);
1040        is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE);
1041
1042        if (!!(rxs->encoding == RX_ENC_HT)) {
1043                /* MCS rates */
1044
1045                airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
1046                                        is_40, is_sgi, is_sp);
1047        } else {
1048
1049                phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
1050                rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx];
1051                airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100,
1052                                                len, rxs->rate_idx, is_sp);
1053        }
1054
1055        ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1056exit:
1057        rcu_read_unlock();
1058}
1059
1060int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1061{
1062        struct ath_rxbuf *bf;
1063        struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1064        struct ieee80211_rx_status *rxs;
1065        struct ath_hw *ah = sc->sc_ah;
1066        struct ath_common *common = ath9k_hw_common(ah);
1067        struct ieee80211_hw *hw = sc->hw;
1068        int retval;
1069        struct ath_rx_status rs;
1070        enum ath9k_rx_qtype qtype;
1071        bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1072        int dma_type;
1073        u64 tsf = 0;
1074        unsigned long flags;
1075        dma_addr_t new_buf_addr;
1076        unsigned int budget = 512;
1077        struct ieee80211_hdr *hdr;
1078
1079        if (edma)
1080                dma_type = DMA_BIDIRECTIONAL;
1081        else
1082                dma_type = DMA_FROM_DEVICE;
1083
1084        qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1085
1086        tsf = ath9k_hw_gettsf64(ah);
1087
1088        do {
1089                bool decrypt_error = false;
1090
1091                memset(&rs, 0, sizeof(rs));
1092                if (edma)
1093                        bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1094                else
1095                        bf = ath_get_next_rx_buf(sc, &rs);
1096
1097                if (!bf)
1098                        break;
1099
1100                skb = bf->bf_mpdu;
1101                if (!skb)
1102                        continue;
1103
1104                /*
1105                 * Take frame header from the first fragment and RX status from
1106                 * the last one.
1107                 */
1108                if (sc->rx.frag)
1109                        hdr_skb = sc->rx.frag;
1110                else
1111                        hdr_skb = skb;
1112
1113                rxs = IEEE80211_SKB_RXCB(hdr_skb);
1114                memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1115
1116                retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
1117                                                 &decrypt_error, tsf);
1118                if (retval)
1119                        goto requeue_drop_frag;
1120
1121                /* Ensure we always have an skb to requeue once we are done
1122                 * processing the current buffer's skb */
1123                requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1124
1125                /* If there is no memory we ignore the current RX'd frame,
1126                 * tell hardware it can give us a new frame using the old
1127                 * skb and put it at the tail of the sc->rx.rxbuf list for
1128                 * processing. */
1129                if (!requeue_skb) {
1130                        RX_STAT_INC(sc, rx_oom_err);
1131                        goto requeue_drop_frag;
1132                }
1133
1134                /* We will now give hardware our shiny new allocated skb */
1135                new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1136                                              common->rx_bufsize, dma_type);
1137                if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1138                        dev_kfree_skb_any(requeue_skb);
1139                        goto requeue_drop_frag;
1140                }
1141
1142                /* Unmap the frame */
1143                dma_unmap_single(sc->dev, bf->bf_buf_addr,
1144                                 common->rx_bufsize, dma_type);
1145
1146                bf->bf_mpdu = requeue_skb;
1147                bf->bf_buf_addr = new_buf_addr;
1148
1149                skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1150                if (ah->caps.rx_status_len)
1151                        skb_pull(skb, ah->caps.rx_status_len);
1152
1153                if (!rs.rs_more)
1154                        ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
1155                                                     rxs, decrypt_error);
1156
1157                if (rs.rs_more) {
1158                        RX_STAT_INC(sc, rx_frags);
1159                        /*
1160                         * rs_more indicates chained descriptors which can be
1161                         * used to link buffers together for a sort of
1162                         * scatter-gather operation.
1163                         */
1164                        if (sc->rx.frag) {
1165                                /* too many fragments - cannot handle frame */
1166                                dev_kfree_skb_any(sc->rx.frag);
1167                                dev_kfree_skb_any(skb);
1168                                RX_STAT_INC(sc, rx_too_many_frags_err);
1169                                skb = NULL;
1170                        }
1171                        sc->rx.frag = skb;
1172                        goto requeue;
1173                }
1174
1175                if (sc->rx.frag) {
1176                        int space = skb->len - skb_tailroom(hdr_skb);
1177
1178                        if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1179                                dev_kfree_skb(skb);
1180                                RX_STAT_INC(sc, rx_oom_err);
1181                                goto requeue_drop_frag;
1182                        }
1183
1184                        sc->rx.frag = NULL;
1185
1186                        skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1187                                                  skb->len);
1188                        dev_kfree_skb_any(skb);
1189                        skb = hdr_skb;
1190                }
1191
1192                if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1193                        skb_trim(skb, skb->len - 8);
1194
1195                spin_lock_irqsave(&sc->sc_pm_lock, flags);
1196                if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1197                                     PS_WAIT_FOR_CAB |
1198                                     PS_WAIT_FOR_PSPOLL_DATA)) ||
1199                    ath9k_check_auto_sleep(sc))
1200                        ath_rx_ps(sc, skb, rs.is_mybeacon);
1201                spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1202
1203                ath9k_antenna_check(sc, &rs);
1204                ath9k_apply_ampdu_details(sc, &rs, rxs);
1205                ath_debug_rate_stats(sc, &rs, skb);
1206                ath_rx_count_airtime(sc, &rs, skb);
1207
1208                hdr = (struct ieee80211_hdr *)skb->data;
1209                if (ieee80211_is_ack(hdr->frame_control))
1210                        ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp);
1211
1212                ieee80211_rx(hw, skb);
1213
1214requeue_drop_frag:
1215                if (sc->rx.frag) {
1216                        dev_kfree_skb_any(sc->rx.frag);
1217                        sc->rx.frag = NULL;
1218                }
1219requeue:
1220                list_add_tail(&bf->list, &sc->rx.rxbuf);
1221
1222                if (!edma) {
1223                        ath_rx_buf_relink(sc, bf, flush);
1224                        if (!flush)
1225                                ath9k_hw_rxena(ah);
1226                } else if (!flush) {
1227                        ath_rx_edma_buf_link(sc, qtype);
1228                }
1229
1230                if (!budget--)
1231                        break;
1232        } while (1);
1233
1234        if (!(ah->imask & ATH9K_INT_RXEOL)) {
1235                ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1236                ath9k_hw_set_interrupts(ah);
1237        }
1238
1239        return 0;
1240}
1241