linux/drivers/net/wireless/ath/ath6kl/txrx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include "core.h"
  21#include "debug.h"
  22#include "htc-ops.h"
  23#include "trace.h"
  24
  25/*
  26 * tid - tid_mux0..tid_mux3
  27 * aid - tid_mux4..tid_mux7
  28 */
  29#define ATH6KL_TID_MASK 0xf
  30#define ATH6KL_AID_SHIFT 4
  31
  32static inline u8 ath6kl_get_tid(u8 tid_mux)
  33{
  34        return tid_mux & ATH6KL_TID_MASK;
  35}
  36
  37static inline u8 ath6kl_get_aid(u8 tid_mux)
  38{
  39        return tid_mux >> ATH6KL_AID_SHIFT;
  40}
  41
  42static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
  43                               u32 *map_no)
  44{
  45        struct ath6kl *ar = ath6kl_priv(dev);
  46        struct ethhdr *eth_hdr;
  47        u32 i, ep_map = -1;
  48        u8 *datap;
  49
  50        *map_no = 0;
  51        datap = skb->data;
  52        eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
  53
  54        if (is_multicast_ether_addr(eth_hdr->h_dest))
  55                return ENDPOINT_2;
  56
  57        for (i = 0; i < ar->node_num; i++) {
  58                if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
  59                           ETH_ALEN) == 0) {
  60                        *map_no = i + 1;
  61                        ar->node_map[i].tx_pend++;
  62                        return ar->node_map[i].ep_id;
  63                }
  64
  65                if ((ep_map == -1) && !ar->node_map[i].tx_pend)
  66                        ep_map = i;
  67        }
  68
  69        if (ep_map == -1) {
  70                ep_map = ar->node_num;
  71                ar->node_num++;
  72                if (ar->node_num > MAX_NODE_NUM)
  73                        return ENDPOINT_UNUSED;
  74        }
  75
  76        memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
  77
  78        for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
  79                if (!ar->tx_pending[i]) {
  80                        ar->node_map[ep_map].ep_id = i;
  81                        break;
  82                }
  83
  84                /*
  85                 * No free endpoint is available, start redistribution on
  86                 * the inuse endpoints.
  87                 */
  88                if (i == ENDPOINT_5) {
  89                        ar->node_map[ep_map].ep_id = ar->next_ep_id;
  90                        ar->next_ep_id++;
  91                        if (ar->next_ep_id > ENDPOINT_5)
  92                                ar->next_ep_id = ENDPOINT_2;
  93                }
  94        }
  95
  96        *map_no = ep_map + 1;
  97        ar->node_map[ep_map].tx_pend++;
  98
  99        return ar->node_map[ep_map].ep_id;
 100}
 101
 102static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
 103                                struct ath6kl_vif *vif,
 104                                struct sk_buff *skb,
 105                                u32 *flags)
 106{
 107        struct ath6kl *ar = vif->ar;
 108        bool is_apsdq_empty = false;
 109        struct ethhdr *datap = (struct ethhdr *) skb->data;
 110        u8 up = 0, traffic_class, *ip_hdr;
 111        u16 ether_type;
 112        struct ath6kl_llc_snap_hdr *llc_hdr;
 113
 114        if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
 115                /*
 116                 * This tx is because of a uAPSD trigger, determine
 117                 * more and EOSP bit. Set EOSP if queue is empty
 118                 * or sufficient frames are delivered for this trigger.
 119                 */
 120                spin_lock_bh(&conn->psq_lock);
 121                if (!skb_queue_empty(&conn->apsdq))
 122                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 123                else if (conn->sta_flags & STA_PS_APSD_EOSP)
 124                        *flags |= WMI_DATA_HDR_FLAGS_EOSP;
 125                *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
 126                spin_unlock_bh(&conn->psq_lock);
 127                return false;
 128        } else if (!conn->apsd_info) {
 129                return false;
 130        }
 131
 132        if (test_bit(WMM_ENABLED, &vif->flags)) {
 133                ether_type = be16_to_cpu(datap->h_proto);
 134                if (is_ethertype(ether_type)) {
 135                        /* packet is in DIX format  */
 136                        ip_hdr = (u8 *)(datap + 1);
 137                } else {
 138                        /* packet is in 802.3 format */
 139                        llc_hdr = (struct ath6kl_llc_snap_hdr *)
 140                                                        (datap + 1);
 141                        ether_type = be16_to_cpu(llc_hdr->eth_type);
 142                        ip_hdr = (u8 *)(llc_hdr + 1);
 143                }
 144
 145                if (ether_type == IP_ETHERTYPE)
 146                        up = ath6kl_wmi_determine_user_priority(
 147                                                        ip_hdr, 0);
 148        }
 149
 150        traffic_class = ath6kl_wmi_get_traffic_class(up);
 151
 152        if ((conn->apsd_info & (1 << traffic_class)) == 0)
 153                return false;
 154
 155        /* Queue the frames if the STA is sleeping */
 156        spin_lock_bh(&conn->psq_lock);
 157        is_apsdq_empty = skb_queue_empty(&conn->apsdq);
 158        skb_queue_tail(&conn->apsdq, skb);
 159        spin_unlock_bh(&conn->psq_lock);
 160
 161        /*
 162         * If this is the first pkt getting queued
 163         * for this STA, update the PVB for this STA
 164         */
 165        if (is_apsdq_empty) {
 166                ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
 167                                              vif->fw_vif_idx,
 168                                              conn->aid, 1, 0);
 169        }
 170        *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
 171
 172        return true;
 173}
 174
 175static bool ath6kl_process_psq(struct ath6kl_sta *conn,
 176                                struct ath6kl_vif *vif,
 177                                struct sk_buff *skb,
 178                                u32 *flags)
 179{
 180        bool is_psq_empty = false;
 181        struct ath6kl *ar = vif->ar;
 182
 183        if (conn->sta_flags & STA_PS_POLLED) {
 184                spin_lock_bh(&conn->psq_lock);
 185                if (!skb_queue_empty(&conn->psq))
 186                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 187                spin_unlock_bh(&conn->psq_lock);
 188                return false;
 189        }
 190
 191        /* Queue the frames if the STA is sleeping */
 192        spin_lock_bh(&conn->psq_lock);
 193        is_psq_empty = skb_queue_empty(&conn->psq);
 194        skb_queue_tail(&conn->psq, skb);
 195        spin_unlock_bh(&conn->psq_lock);
 196
 197        /*
 198         * If this is the first pkt getting queued
 199         * for this STA, update the PVB for this
 200         * STA.
 201         */
 202        if (is_psq_empty)
 203                ath6kl_wmi_set_pvb_cmd(ar->wmi,
 204                                       vif->fw_vif_idx,
 205                                       conn->aid, 1);
 206        return true;
 207}
 208
 209static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
 210                                u32 *flags)
 211{
 212        struct ethhdr *datap = (struct ethhdr *) skb->data;
 213        struct ath6kl_sta *conn = NULL;
 214        bool ps_queued = false;
 215        struct ath6kl *ar = vif->ar;
 216
 217        if (is_multicast_ether_addr(datap->h_dest)) {
 218                u8 ctr = 0;
 219                bool q_mcast = false;
 220
 221                for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
 222                        if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
 223                                q_mcast = true;
 224                                break;
 225                        }
 226                }
 227
 228                if (q_mcast) {
 229                        /*
 230                         * If this transmit is not because of a Dtim Expiry
 231                         * q it.
 232                         */
 233                        if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
 234                                bool is_mcastq_empty = false;
 235
 236                                spin_lock_bh(&ar->mcastpsq_lock);
 237                                is_mcastq_empty =
 238                                        skb_queue_empty(&ar->mcastpsq);
 239                                skb_queue_tail(&ar->mcastpsq, skb);
 240                                spin_unlock_bh(&ar->mcastpsq_lock);
 241
 242                                /*
 243                                 * If this is the first Mcast pkt getting
 244                                 * queued indicate to the target to set the
 245                                 * BitmapControl LSB of the TIM IE.
 246                                 */
 247                                if (is_mcastq_empty)
 248                                        ath6kl_wmi_set_pvb_cmd(ar->wmi,
 249                                                               vif->fw_vif_idx,
 250                                                               MCAST_AID, 1);
 251
 252                                ps_queued = true;
 253                        } else {
 254                                /*
 255                                 * This transmit is because of Dtim expiry.
 256                                 * Determine if MoreData bit has to be set.
 257                                 */
 258                                spin_lock_bh(&ar->mcastpsq_lock);
 259                                if (!skb_queue_empty(&ar->mcastpsq))
 260                                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 261                                spin_unlock_bh(&ar->mcastpsq_lock);
 262                        }
 263                }
 264        } else {
 265                conn = ath6kl_find_sta(vif, datap->h_dest);
 266                if (!conn) {
 267                        dev_kfree_skb(skb);
 268
 269                        /* Inform the caller that the skb is consumed */
 270                        return true;
 271                }
 272
 273                if (conn->sta_flags & STA_PS_SLEEP) {
 274                        ps_queued = ath6kl_process_uapsdq(conn,
 275                                                vif, skb, flags);
 276                        if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
 277                                ps_queued = ath6kl_process_psq(conn,
 278                                                vif, skb, flags);
 279                }
 280        }
 281        return ps_queued;
 282}
 283
 284/* Tx functions */
 285
 286int ath6kl_control_tx(void *devt, struct sk_buff *skb,
 287                      enum htc_endpoint_id eid)
 288{
 289        struct ath6kl *ar = devt;
 290        int status = 0;
 291        struct ath6kl_cookie *cookie = NULL;
 292
 293        trace_ath6kl_wmi_cmd(skb->data, skb->len);
 294
 295        if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
 296                dev_kfree_skb(skb);
 297                return -EACCES;
 298        }
 299
 300        if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
 301                         eid >= ENDPOINT_MAX)) {
 302                status = -EINVAL;
 303                goto fail_ctrl_tx;
 304        }
 305
 306        spin_lock_bh(&ar->lock);
 307
 308        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 309                   "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
 310                   skb, skb->len, eid);
 311
 312        if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
 313                /*
 314                 * Control endpoint is full, don't allocate resources, we
 315                 * are just going to drop this packet.
 316                 */
 317                cookie = NULL;
 318                ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
 319                           skb, skb->len);
 320        } else {
 321                cookie = ath6kl_alloc_cookie(ar);
 322        }
 323
 324        if (cookie == NULL) {
 325                spin_unlock_bh(&ar->lock);
 326                status = -ENOMEM;
 327                goto fail_ctrl_tx;
 328        }
 329
 330        ar->tx_pending[eid]++;
 331
 332        if (eid != ar->ctrl_ep)
 333                ar->total_tx_data_pend++;
 334
 335        spin_unlock_bh(&ar->lock);
 336
 337        cookie->skb = skb;
 338        cookie->map_no = 0;
 339        set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 340                         eid, ATH6KL_CONTROL_PKT_TAG);
 341        cookie->htc_pkt.skb = skb;
 342
 343        /*
 344         * This interface is asynchronous, if there is an error, cleanup
 345         * will happen in the TX completion callback.
 346         */
 347        ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
 348
 349        return 0;
 350
 351fail_ctrl_tx:
 352        dev_kfree_skb(skb);
 353        return status;
 354}
 355
 356int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
 357{
 358        struct ath6kl *ar = ath6kl_priv(dev);
 359        struct ath6kl_cookie *cookie = NULL;
 360        enum htc_endpoint_id eid = ENDPOINT_UNUSED;
 361        struct ath6kl_vif *vif = netdev_priv(dev);
 362        u32 map_no = 0;
 363        u16 htc_tag = ATH6KL_DATA_PKT_TAG;
 364        u8 ac = 99; /* initialize to unmapped ac */
 365        bool chk_adhoc_ps_mapping = false;
 366        int ret;
 367        struct wmi_tx_meta_v2 meta_v2;
 368        void *meta;
 369        u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
 370        u8 meta_ver = 0;
 371        u32 flags = 0;
 372
 373        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 374                   "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
 375                   skb, skb->data, skb->len);
 376
 377        /* If target is not associated */
 378        if (!test_bit(CONNECTED, &vif->flags))
 379                goto fail_tx;
 380
 381        if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
 382                goto fail_tx;
 383
 384        if (!test_bit(WMI_READY, &ar->flag))
 385                goto fail_tx;
 386
 387        /* AP mode Power saving processing */
 388        if (vif->nw_type == AP_NETWORK) {
 389                if (ath6kl_powersave_ap(vif, skb, &flags))
 390                        return 0;
 391        }
 392
 393        if (test_bit(WMI_ENABLED, &ar->flag)) {
 394                if ((dev->features & NETIF_F_IP_CSUM) &&
 395                    (csum == CHECKSUM_PARTIAL)) {
 396                        csum_start = skb->csum_start -
 397                                        (skb_network_header(skb) - skb->head) +
 398                                        sizeof(struct ath6kl_llc_snap_hdr);
 399                        csum_dest = skb->csum_offset + csum_start;
 400                }
 401
 402                if (skb_headroom(skb) < dev->needed_headroom) {
 403                        struct sk_buff *tmp_skb = skb;
 404
 405                        skb = skb_realloc_headroom(skb, dev->needed_headroom);
 406                        kfree_skb(tmp_skb);
 407                        if (skb == NULL) {
 408                                vif->net_stats.tx_dropped++;
 409                                return 0;
 410                        }
 411                }
 412
 413                if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
 414                        ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
 415                        goto fail_tx;
 416                }
 417
 418                if ((dev->features & NETIF_F_IP_CSUM) &&
 419                    (csum == CHECKSUM_PARTIAL)) {
 420                        meta_v2.csum_start = csum_start;
 421                        meta_v2.csum_dest = csum_dest;
 422
 423                        /* instruct target to calculate checksum */
 424                        meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
 425                        meta_ver = WMI_META_VERSION_2;
 426                        meta = &meta_v2;
 427                } else {
 428                        meta_ver = 0;
 429                        meta = NULL;
 430                }
 431
 432                ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
 433                                DATA_MSGTYPE, flags, 0,
 434                                meta_ver,
 435                                meta, vif->fw_vif_idx);
 436
 437                if (ret) {
 438                        ath6kl_warn("failed to add wmi data header:%d\n"
 439                                , ret);
 440                        goto fail_tx;
 441                }
 442
 443                if ((vif->nw_type == ADHOC_NETWORK) &&
 444                    ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
 445                        chk_adhoc_ps_mapping = true;
 446                else {
 447                        /* get the stream mapping */
 448                        ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
 449                                    vif->fw_vif_idx, skb,
 450                                    0, test_bit(WMM_ENABLED, &vif->flags), &ac);
 451                        if (ret)
 452                                goto fail_tx;
 453                }
 454        } else {
 455                goto fail_tx;
 456        }
 457
 458        spin_lock_bh(&ar->lock);
 459
 460        if (chk_adhoc_ps_mapping)
 461                eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
 462        else
 463                eid = ar->ac2ep_map[ac];
 464
 465        if (eid == 0 || eid == ENDPOINT_UNUSED) {
 466                ath6kl_err("eid %d is not mapped!\n", eid);
 467                spin_unlock_bh(&ar->lock);
 468                goto fail_tx;
 469        }
 470
 471        /* allocate resource for this packet */
 472        cookie = ath6kl_alloc_cookie(ar);
 473
 474        if (!cookie) {
 475                spin_unlock_bh(&ar->lock);
 476                goto fail_tx;
 477        }
 478
 479        /* update counts while the lock is held */
 480        ar->tx_pending[eid]++;
 481        ar->total_tx_data_pend++;
 482
 483        spin_unlock_bh(&ar->lock);
 484
 485        if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
 486            skb_cloned(skb)) {
 487                /*
 488                 * We will touch (move the buffer data to align it. Since the
 489                 * skb buffer is cloned and not only the header is changed, we
 490                 * have to copy it to allow the changes. Since we are copying
 491                 * the data here, we may as well align it by reserving suitable
 492                 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
 493                 */
 494                struct sk_buff *nskb;
 495
 496                nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
 497                if (nskb == NULL)
 498                        goto fail_tx;
 499                kfree_skb(skb);
 500                skb = nskb;
 501        }
 502
 503        cookie->skb = skb;
 504        cookie->map_no = map_no;
 505        set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 506                         eid, htc_tag);
 507        cookie->htc_pkt.skb = skb;
 508
 509        ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
 510                        skb->data, skb->len);
 511
 512        /*
 513         * HTC interface is asynchronous, if this fails, cleanup will
 514         * happen in the ath6kl_tx_complete callback.
 515         */
 516        ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
 517
 518        return 0;
 519
 520fail_tx:
 521        dev_kfree_skb(skb);
 522
 523        vif->net_stats.tx_dropped++;
 524        vif->net_stats.tx_aborted_errors++;
 525
 526        return 0;
 527}
 528
 529/* indicate tx activity or inactivity on a WMI stream */
 530void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
 531{
 532        struct ath6kl *ar = devt;
 533        enum htc_endpoint_id eid;
 534        int i;
 535
 536        eid = ar->ac2ep_map[traffic_class];
 537
 538        if (!test_bit(WMI_ENABLED, &ar->flag))
 539                goto notify_htc;
 540
 541        spin_lock_bh(&ar->lock);
 542
 543        ar->ac_stream_active[traffic_class] = active;
 544
 545        if (active) {
 546                /*
 547                 * Keep track of the active stream with the highest
 548                 * priority.
 549                 */
 550                if (ar->ac_stream_pri_map[traffic_class] >
 551                    ar->hiac_stream_active_pri)
 552                        /* set the new highest active priority */
 553                        ar->hiac_stream_active_pri =
 554                                        ar->ac_stream_pri_map[traffic_class];
 555
 556        } else {
 557                /*
 558                 * We may have to search for the next active stream
 559                 * that is the highest priority.
 560                 */
 561                if (ar->hiac_stream_active_pri ==
 562                        ar->ac_stream_pri_map[traffic_class]) {
 563                        /*
 564                         * The highest priority stream just went inactive
 565                         * reset and search for the "next" highest "active"
 566                         * priority stream.
 567                         */
 568                        ar->hiac_stream_active_pri = 0;
 569
 570                        for (i = 0; i < WMM_NUM_AC; i++) {
 571                                if (ar->ac_stream_active[i] &&
 572                                    (ar->ac_stream_pri_map[i] >
 573                                     ar->hiac_stream_active_pri))
 574                                        /*
 575                                         * Set the new highest active
 576                                         * priority.
 577                                         */
 578                                        ar->hiac_stream_active_pri =
 579                                                ar->ac_stream_pri_map[i];
 580                        }
 581                }
 582        }
 583
 584        spin_unlock_bh(&ar->lock);
 585
 586notify_htc:
 587        /* notify HTC, this may cause credit distribution changes */
 588        ath6kl_htc_activity_changed(ar->htc_target, eid, active);
 589}
 590
 591enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
 592                                               struct htc_packet *packet)
 593{
 594        struct ath6kl *ar = target->dev->ar;
 595        struct ath6kl_vif *vif;
 596        enum htc_endpoint_id endpoint = packet->endpoint;
 597        enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
 598
 599        if (endpoint == ar->ctrl_ep) {
 600                /*
 601                 * Under normal WMI if this is getting full, then something
 602                 * is running rampant the host should not be exhausting the
 603                 * WMI queue with too many commands the only exception to
 604                 * this is during testing using endpointping.
 605                 */
 606                set_bit(WMI_CTRL_EP_FULL, &ar->flag);
 607                ath6kl_err("wmi ctrl ep is full\n");
 608                ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
 609                return action;
 610        }
 611
 612        if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
 613                return action;
 614
 615        /*
 616         * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
 617         * the highest active stream.
 618         */
 619        if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
 620            ar->hiac_stream_active_pri &&
 621            ar->cookie_count <=
 622                        target->endpoint[endpoint].tx_drop_packet_threshold)
 623                /*
 624                 * Give preference to the highest priority stream by
 625                 * dropping the packets which overflowed.
 626                 */
 627                action = HTC_SEND_FULL_DROP;
 628
 629        /* FIXME: Locking */
 630        spin_lock_bh(&ar->list_lock);
 631        list_for_each_entry(vif, &ar->vif_list, list) {
 632                if (vif->nw_type == ADHOC_NETWORK ||
 633                    action != HTC_SEND_FULL_DROP) {
 634                        spin_unlock_bh(&ar->list_lock);
 635
 636                        set_bit(NETQ_STOPPED, &vif->flags);
 637                        netif_stop_queue(vif->ndev);
 638
 639                        return action;
 640                }
 641        }
 642        spin_unlock_bh(&ar->list_lock);
 643
 644        return action;
 645}
 646
 647/* TODO this needs to be looked at */
 648static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
 649                                     enum htc_endpoint_id eid, u32 map_no)
 650{
 651        struct ath6kl *ar = vif->ar;
 652        u32 i;
 653
 654        if (vif->nw_type != ADHOC_NETWORK)
 655                return;
 656
 657        if (!ar->ibss_ps_enable)
 658                return;
 659
 660        if (eid == ar->ctrl_ep)
 661                return;
 662
 663        if (map_no == 0)
 664                return;
 665
 666        map_no--;
 667        ar->node_map[map_no].tx_pend--;
 668
 669        if (ar->node_map[map_no].tx_pend)
 670                return;
 671
 672        if (map_no != (ar->node_num - 1))
 673                return;
 674
 675        for (i = ar->node_num; i > 0; i--) {
 676                if (ar->node_map[i - 1].tx_pend)
 677                        break;
 678
 679                memset(&ar->node_map[i - 1], 0,
 680                       sizeof(struct ath6kl_node_mapping));
 681                ar->node_num--;
 682        }
 683}
 684
 685void ath6kl_tx_complete(struct htc_target *target,
 686                        struct list_head *packet_queue)
 687{
 688        struct ath6kl *ar = target->dev->ar;
 689        struct sk_buff_head skb_queue;
 690        struct htc_packet *packet;
 691        struct sk_buff *skb;
 692        struct ath6kl_cookie *ath6kl_cookie;
 693        u32 map_no = 0;
 694        int status;
 695        enum htc_endpoint_id eid;
 696        bool wake_event = false;
 697        bool flushing[ATH6KL_VIF_MAX] = {false};
 698        u8 if_idx;
 699        struct ath6kl_vif *vif;
 700
 701        skb_queue_head_init(&skb_queue);
 702
 703        /* lock the driver as we update internal state */
 704        spin_lock_bh(&ar->lock);
 705
 706        /* reap completed packets */
 707        while (!list_empty(packet_queue)) {
 708                packet = list_first_entry(packet_queue, struct htc_packet,
 709                                          list);
 710                list_del(&packet->list);
 711
 712                if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
 713                                 packet->endpoint >= ENDPOINT_MAX))
 714                        continue;
 715
 716                ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
 717                if (WARN_ON_ONCE(!ath6kl_cookie))
 718                        continue;
 719
 720                status = packet->status;
 721                skb = ath6kl_cookie->skb;
 722                eid = packet->endpoint;
 723                map_no = ath6kl_cookie->map_no;
 724
 725                if (WARN_ON_ONCE(!skb || !skb->data)) {
 726                        dev_kfree_skb(skb);
 727                        ath6kl_free_cookie(ar, ath6kl_cookie);
 728                        continue;
 729                }
 730
 731                __skb_queue_tail(&skb_queue, skb);
 732
 733                if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
 734                        ath6kl_free_cookie(ar, ath6kl_cookie);
 735                        continue;
 736                }
 737
 738                ar->tx_pending[eid]--;
 739
 740                if (eid != ar->ctrl_ep)
 741                        ar->total_tx_data_pend--;
 742
 743                if (eid == ar->ctrl_ep) {
 744                        if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
 745                                clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
 746
 747                        if (ar->tx_pending[eid] == 0)
 748                                wake_event = true;
 749                }
 750
 751                if (eid == ar->ctrl_ep) {
 752                        if_idx = wmi_cmd_hdr_get_if_idx(
 753                                (struct wmi_cmd_hdr *) packet->buf);
 754                } else {
 755                        if_idx = wmi_data_hdr_get_if_idx(
 756                                (struct wmi_data_hdr *) packet->buf);
 757                }
 758
 759                vif = ath6kl_get_vif_by_index(ar, if_idx);
 760                if (!vif) {
 761                        ath6kl_free_cookie(ar, ath6kl_cookie);
 762                        continue;
 763                }
 764
 765                if (status) {
 766                        if (status == -ECANCELED)
 767                                /* a packet was flushed  */
 768                                flushing[if_idx] = true;
 769
 770                        vif->net_stats.tx_errors++;
 771
 772                        if (status != -ENOSPC && status != -ECANCELED)
 773                                ath6kl_warn("tx complete error: %d\n", status);
 774
 775                        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 776                                   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
 777                                   __func__, skb, packet->buf, packet->act_len,
 778                                   eid, "error!");
 779                } else {
 780                        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 781                                   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
 782                                   __func__, skb, packet->buf, packet->act_len,
 783                                   eid, "OK");
 784
 785                        flushing[if_idx] = false;
 786                        vif->net_stats.tx_packets++;
 787                        vif->net_stats.tx_bytes += skb->len;
 788                }
 789
 790                ath6kl_tx_clear_node_map(vif, eid, map_no);
 791
 792                ath6kl_free_cookie(ar, ath6kl_cookie);
 793
 794                if (test_bit(NETQ_STOPPED, &vif->flags))
 795                        clear_bit(NETQ_STOPPED, &vif->flags);
 796        }
 797
 798        spin_unlock_bh(&ar->lock);
 799
 800        __skb_queue_purge(&skb_queue);
 801
 802        /* FIXME: Locking */
 803        spin_lock_bh(&ar->list_lock);
 804        list_for_each_entry(vif, &ar->vif_list, list) {
 805                if (test_bit(CONNECTED, &vif->flags) &&
 806                    !flushing[vif->fw_vif_idx]) {
 807                        spin_unlock_bh(&ar->list_lock);
 808                        netif_wake_queue(vif->ndev);
 809                        spin_lock_bh(&ar->list_lock);
 810                }
 811        }
 812        spin_unlock_bh(&ar->list_lock);
 813
 814        if (wake_event)
 815                wake_up(&ar->event_wq);
 816
 817        return;
 818}
 819
 820void ath6kl_tx_data_cleanup(struct ath6kl *ar)
 821{
 822        int i;
 823
 824        /* flush all the data (non-control) streams */
 825        for (i = 0; i < WMM_NUM_AC; i++)
 826                ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
 827                                      ATH6KL_DATA_PKT_TAG);
 828}
 829
 830/* Rx functions */
 831
 832static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
 833                                              struct sk_buff *skb)
 834{
 835        if (!skb)
 836                return;
 837
 838        skb->dev = dev;
 839
 840        if (!(skb->dev->flags & IFF_UP)) {
 841                dev_kfree_skb(skb);
 842                return;
 843        }
 844
 845        skb->protocol = eth_type_trans(skb, skb->dev);
 846
 847        netif_rx_ni(skb);
 848}
 849
 850static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
 851{
 852        struct sk_buff *skb;
 853
 854        while (num) {
 855                skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
 856                if (!skb) {
 857                        ath6kl_err("netbuf allocation failed\n");
 858                        return;
 859                }
 860                skb_queue_tail(q, skb);
 861                num--;
 862        }
 863}
 864
 865static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
 866{
 867        struct sk_buff *skb = NULL;
 868
 869        if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
 870            (AGGR_NUM_OF_FREE_NETBUFS >> 2))
 871                ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
 872                                     AGGR_NUM_OF_FREE_NETBUFS);
 873
 874        skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
 875
 876        return skb;
 877}
 878
 879void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
 880{
 881        struct ath6kl *ar = target->dev->ar;
 882        struct sk_buff *skb;
 883        int rx_buf;
 884        int n_buf_refill;
 885        struct htc_packet *packet;
 886        struct list_head queue;
 887
 888        n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
 889                          ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
 890
 891        if (n_buf_refill <= 0)
 892                return;
 893
 894        INIT_LIST_HEAD(&queue);
 895
 896        ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
 897                   "%s: providing htc with %d buffers at eid=%d\n",
 898                   __func__, n_buf_refill, endpoint);
 899
 900        for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
 901                skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
 902                if (!skb)
 903                        break;
 904
 905                packet = (struct htc_packet *) skb->head;
 906                if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
 907                        size_t len = skb_headlen(skb);
 908                        skb->data = PTR_ALIGN(skb->data - 4, 4);
 909                        skb_set_tail_pointer(skb, len);
 910                }
 911                set_htc_rxpkt_info(packet, skb, skb->data,
 912                                   ATH6KL_BUFFER_SIZE, endpoint);
 913                packet->skb = skb;
 914                list_add_tail(&packet->list, &queue);
 915        }
 916
 917        if (!list_empty(&queue))
 918                ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
 919}
 920
 921void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
 922{
 923        struct htc_packet *packet;
 924        struct sk_buff *skb;
 925
 926        while (count) {
 927                skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
 928                if (!skb)
 929                        return;
 930
 931                packet = (struct htc_packet *) skb->head;
 932                if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
 933                        size_t len = skb_headlen(skb);
 934                        skb->data = PTR_ALIGN(skb->data - 4, 4);
 935                        skb_set_tail_pointer(skb, len);
 936                }
 937                set_htc_rxpkt_info(packet, skb, skb->data,
 938                                   ATH6KL_AMSDU_BUFFER_SIZE, 0);
 939                packet->skb = skb;
 940
 941                spin_lock_bh(&ar->lock);
 942                list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
 943                spin_unlock_bh(&ar->lock);
 944                count--;
 945        }
 946}
 947
 948/*
 949 * Callback to allocate a receive buffer for a pending packet. We use a
 950 * pre-allocated list of buffers of maximum AMSDU size (4K).
 951 */
 952struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
 953                                            enum htc_endpoint_id endpoint,
 954                                            int len)
 955{
 956        struct ath6kl *ar = target->dev->ar;
 957        struct htc_packet *packet = NULL;
 958        struct list_head *pkt_pos;
 959        int refill_cnt = 0, depth = 0;
 960
 961        ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
 962                   __func__, endpoint, len);
 963
 964        if ((len <= ATH6KL_BUFFER_SIZE) ||
 965            (len > ATH6KL_AMSDU_BUFFER_SIZE))
 966                return NULL;
 967
 968        spin_lock_bh(&ar->lock);
 969
 970        if (list_empty(&ar->amsdu_rx_buffer_queue)) {
 971                spin_unlock_bh(&ar->lock);
 972                refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
 973                goto refill_buf;
 974        }
 975
 976        packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
 977                                  struct htc_packet, list);
 978        list_del(&packet->list);
 979        list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
 980                depth++;
 981
 982        refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
 983        spin_unlock_bh(&ar->lock);
 984
 985        /* set actual endpoint ID */
 986        packet->endpoint = endpoint;
 987
 988refill_buf:
 989        if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
 990                ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
 991
 992        return packet;
 993}
 994
 995static void aggr_slice_amsdu(struct aggr_info *p_aggr,
 996                             struct rxtid *rxtid, struct sk_buff *skb)
 997{
 998        struct sk_buff *new_skb;
 999        struct ethhdr *hdr;
1000        u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
1001        u8 *framep;
1002
1003        mac_hdr_len = sizeof(struct ethhdr);
1004        framep = skb->data + mac_hdr_len;
1005        amsdu_len = skb->len - mac_hdr_len;
1006
1007        while (amsdu_len > mac_hdr_len) {
1008                hdr = (struct ethhdr *) framep;
1009                payload_8023_len = ntohs(hdr->h_proto);
1010
1011                if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
1012                    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
1013                        ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1014                                   payload_8023_len);
1015                        break;
1016                }
1017
1018                frame_8023_len = payload_8023_len + mac_hdr_len;
1019                new_skb = aggr_get_free_skb(p_aggr);
1020                if (!new_skb) {
1021                        ath6kl_err("no buffer available\n");
1022                        break;
1023                }
1024
1025                memcpy(new_skb->data, framep, frame_8023_len);
1026                skb_put(new_skb, frame_8023_len);
1027                if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1028                        ath6kl_err("dot3_2_dix error\n");
1029                        dev_kfree_skb(new_skb);
1030                        break;
1031                }
1032
1033                skb_queue_tail(&rxtid->q, new_skb);
1034
1035                /* Is this the last subframe within this aggregate ? */
1036                if ((amsdu_len - frame_8023_len) == 0)
1037                        break;
1038
1039                /* Add the length of A-MSDU subframe padding bytes -
1040                 * Round to nearest word.
1041                 */
1042                frame_8023_len = ALIGN(frame_8023_len, 4);
1043
1044                framep += frame_8023_len;
1045                amsdu_len -= frame_8023_len;
1046        }
1047
1048        dev_kfree_skb(skb);
1049}
1050
1051static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1052                            u16 seq_no, u8 order)
1053{
1054        struct sk_buff *skb;
1055        struct rxtid *rxtid;
1056        struct skb_hold_q *node;
1057        u16 idx, idx_end, seq_end;
1058        struct rxtid_stats *stats;
1059
1060        rxtid = &agg_conn->rx_tid[tid];
1061        stats = &agg_conn->stat[tid];
1062
1063        spin_lock_bh(&rxtid->lock);
1064        idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1065
1066        /*
1067         * idx_end is typically the last possible frame in the window,
1068         * but changes to 'the' seq_no, when BAR comes. If seq_no
1069         * is non-zero, we will go up to that and stop.
1070         * Note: last seq no in current window will occupy the same
1071         * index position as index that is just previous to start.
1072         * An imp point : if win_sz is 7, for seq_no space of 4095,
1073         * then, there would be holes when sequence wrap around occurs.
1074         * Target should judiciously choose the win_sz, based on
1075         * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1076         * 2, 4, 8, 16 win_sz works fine).
1077         * We must deque from "idx" to "idx_end", including both.
1078         */
1079        seq_end = seq_no ? seq_no : rxtid->seq_next;
1080        idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1081
1082        do {
1083                node = &rxtid->hold_q[idx];
1084                if ((order == 1) && (!node->skb))
1085                        break;
1086
1087                if (node->skb) {
1088                        if (node->is_amsdu)
1089                                aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1090                                                 node->skb);
1091                        else
1092                                skb_queue_tail(&rxtid->q, node->skb);
1093                        node->skb = NULL;
1094                } else {
1095                        stats->num_hole++;
1096                }
1097
1098                rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1099                idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1100        } while (idx != idx_end);
1101
1102        spin_unlock_bh(&rxtid->lock);
1103
1104        stats->num_delivered += skb_queue_len(&rxtid->q);
1105
1106        while ((skb = skb_dequeue(&rxtid->q)))
1107                ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1108}
1109
1110static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1111                                  u16 seq_no,
1112                                  bool is_amsdu, struct sk_buff *frame)
1113{
1114        struct rxtid *rxtid;
1115        struct rxtid_stats *stats;
1116        struct sk_buff *skb;
1117        struct skb_hold_q *node;
1118        u16 idx, st, cur, end;
1119        bool is_queued = false;
1120        u16 extended_end;
1121
1122        rxtid = &agg_conn->rx_tid[tid];
1123        stats = &agg_conn->stat[tid];
1124
1125        stats->num_into_aggr++;
1126
1127        if (!rxtid->aggr) {
1128                if (is_amsdu) {
1129                        aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1130                        is_queued = true;
1131                        stats->num_amsdu++;
1132                        while ((skb = skb_dequeue(&rxtid->q)))
1133                                ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1134                                                                  skb);
1135                }
1136                return is_queued;
1137        }
1138
1139        /* Check the incoming sequence no, if it's in the window */
1140        st = rxtid->seq_next;
1141        cur = seq_no;
1142        end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1143
1144        if (((st < end) && (cur < st || cur > end)) ||
1145            ((st > end) && (cur > end) && (cur < st))) {
1146                extended_end = (end + rxtid->hold_q_sz - 1) &
1147                        ATH6KL_MAX_SEQ_NO;
1148
1149                if (((end < extended_end) &&
1150                     (cur < end || cur > extended_end)) ||
1151                    ((end > extended_end) && (cur > extended_end) &&
1152                     (cur < end))) {
1153                        aggr_deque_frms(agg_conn, tid, 0, 0);
1154                        spin_lock_bh(&rxtid->lock);
1155                        if (cur >= rxtid->hold_q_sz - 1)
1156                                rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1157                        else
1158                                rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1159                                                  (rxtid->hold_q_sz - 2 - cur);
1160                        spin_unlock_bh(&rxtid->lock);
1161                } else {
1162                        /*
1163                         * Dequeue only those frames that are outside the
1164                         * new shifted window.
1165                         */
1166                        if (cur >= rxtid->hold_q_sz - 1)
1167                                st = cur - (rxtid->hold_q_sz - 1);
1168                        else
1169                                st = ATH6KL_MAX_SEQ_NO -
1170                                        (rxtid->hold_q_sz - 2 - cur);
1171
1172                        aggr_deque_frms(agg_conn, tid, st, 0);
1173                }
1174
1175                stats->num_oow++;
1176        }
1177
1178        idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1179
1180        node = &rxtid->hold_q[idx];
1181
1182        spin_lock_bh(&rxtid->lock);
1183
1184        /*
1185         * Is the cur frame duplicate or something beyond our window(hold_q
1186         * -> which is 2x, already)?
1187         *
1188         * 1. Duplicate is easy - drop incoming frame.
1189         * 2. Not falling in current sliding window.
1190         *  2a. is the frame_seq_no preceding current tid_seq_no?
1191         *      -> drop the frame. perhaps sender did not get our ACK.
1192         *         this is taken care of above.
1193         *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1194         *      -> Taken care of it above, by moving window forward.
1195         */
1196        dev_kfree_skb(node->skb);
1197        stats->num_dups++;
1198
1199        node->skb = frame;
1200        is_queued = true;
1201        node->is_amsdu = is_amsdu;
1202        node->seq_no = seq_no;
1203
1204        if (node->is_amsdu)
1205                stats->num_amsdu++;
1206        else
1207                stats->num_mpdu++;
1208
1209        spin_unlock_bh(&rxtid->lock);
1210
1211        aggr_deque_frms(agg_conn, tid, 0, 1);
1212
1213        if (agg_conn->timer_scheduled)
1214                return is_queued;
1215
1216        spin_lock_bh(&rxtid->lock);
1217        for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
1218                if (rxtid->hold_q[idx].skb) {
1219                        /*
1220                         * There is a frame in the queue and no
1221                         * timer so start a timer to ensure that
1222                         * the frame doesn't remain stuck
1223                         * forever.
1224                         */
1225                        agg_conn->timer_scheduled = true;
1226                        mod_timer(&agg_conn->timer,
1227                                  (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1228                        rxtid->timer_mon = true;
1229                        break;
1230                }
1231        }
1232        spin_unlock_bh(&rxtid->lock);
1233
1234        return is_queued;
1235}
1236
1237static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1238                                                 struct ath6kl_sta *conn)
1239{
1240        struct ath6kl *ar = vif->ar;
1241        bool is_apsdq_empty, is_apsdq_empty_at_start;
1242        u32 num_frames_to_deliver, flags;
1243        struct sk_buff *skb = NULL;
1244
1245        /*
1246         * If the APSD q for this STA is not empty, dequeue and
1247         * send a pkt from the head of the q. Also update the
1248         * More data bit in the WMI_DATA_HDR if there are
1249         * more pkts for this STA in the APSD q.
1250         * If there are no more pkts for this STA,
1251         * update the APSD bitmap for this STA.
1252         */
1253
1254        num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1255                                                    ATH6KL_APSD_FRAME_MASK;
1256        /*
1257         * Number of frames to send in a service period is
1258         * indicated by the station
1259         * in the QOS_INFO of the association request
1260         * If it is zero, send all frames
1261         */
1262        if (!num_frames_to_deliver)
1263                num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1264
1265        spin_lock_bh(&conn->psq_lock);
1266        is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1267        spin_unlock_bh(&conn->psq_lock);
1268        is_apsdq_empty_at_start = is_apsdq_empty;
1269
1270        while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1271                spin_lock_bh(&conn->psq_lock);
1272                skb = skb_dequeue(&conn->apsdq);
1273                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1274                spin_unlock_bh(&conn->psq_lock);
1275
1276                /*
1277                 * Set the STA flag to Trigger delivery,
1278                 * so that the frame will go out
1279                 */
1280                conn->sta_flags |= STA_PS_APSD_TRIGGER;
1281                num_frames_to_deliver--;
1282
1283                /* Last frame in the service period, set EOSP or queue empty */
1284                if ((is_apsdq_empty) || (!num_frames_to_deliver))
1285                        conn->sta_flags |= STA_PS_APSD_EOSP;
1286
1287                ath6kl_data_tx(skb, vif->ndev);
1288                conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1289                conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1290        }
1291
1292        if (is_apsdq_empty) {
1293                if (is_apsdq_empty_at_start)
1294                        flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1295                else
1296                        flags = 0;
1297
1298                ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1299                                              vif->fw_vif_idx,
1300                                              conn->aid, 0, flags);
1301        }
1302
1303        return;
1304}
1305
1306void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1307{
1308        struct ath6kl *ar = target->dev->ar;
1309        struct sk_buff *skb = packet->pkt_cntxt;
1310        struct wmi_rx_meta_v2 *meta;
1311        struct wmi_data_hdr *dhdr;
1312        int min_hdr_len;
1313        u8 meta_type, dot11_hdr = 0;
1314        u8 pad_before_data_start;
1315        int status = packet->status;
1316        enum htc_endpoint_id ept = packet->endpoint;
1317        bool is_amsdu, prev_ps, ps_state = false;
1318        bool trig_state = false;
1319        struct ath6kl_sta *conn = NULL;
1320        struct sk_buff *skb1 = NULL;
1321        struct ethhdr *datap = NULL;
1322        struct ath6kl_vif *vif;
1323        struct aggr_info_conn *aggr_conn;
1324        u16 seq_no, offset;
1325        u8 tid, if_idx;
1326
1327        ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1328                   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1329                   __func__, ar, ept, skb, packet->buf,
1330                   packet->act_len, status);
1331
1332        if (status || packet->act_len < HTC_HDR_LENGTH) {
1333                dev_kfree_skb(skb);
1334                return;
1335        }
1336
1337        skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1338        skb_pull(skb, HTC_HDR_LENGTH);
1339
1340        ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1341                        skb->data, skb->len);
1342
1343        if (ept == ar->ctrl_ep) {
1344                if (test_bit(WMI_ENABLED, &ar->flag)) {
1345                        ath6kl_check_wow_status(ar);
1346                        ath6kl_wmi_control_rx(ar->wmi, skb);
1347                        return;
1348                }
1349                if_idx =
1350                wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1351        } else {
1352                if_idx =
1353                wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1354        }
1355
1356        vif = ath6kl_get_vif_by_index(ar, if_idx);
1357        if (!vif) {
1358                dev_kfree_skb(skb);
1359                return;
1360        }
1361
1362        /*
1363         * Take lock to protect buffer counts and adaptive power throughput
1364         * state.
1365         */
1366        spin_lock_bh(&vif->if_lock);
1367
1368        vif->net_stats.rx_packets++;
1369        vif->net_stats.rx_bytes += packet->act_len;
1370
1371        spin_unlock_bh(&vif->if_lock);
1372
1373        skb->dev = vif->ndev;
1374
1375        if (!test_bit(WMI_ENABLED, &ar->flag)) {
1376                if (EPPING_ALIGNMENT_PAD > 0)
1377                        skb_pull(skb, EPPING_ALIGNMENT_PAD);
1378                ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1379                return;
1380        }
1381
1382        ath6kl_check_wow_status(ar);
1383
1384        min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1385                      sizeof(struct ath6kl_llc_snap_hdr);
1386
1387        dhdr = (struct wmi_data_hdr *) skb->data;
1388
1389        /*
1390         * In the case of AP mode we may receive NULL data frames
1391         * that do not have LLC hdr. They are 16 bytes in size.
1392         * Allow these frames in the AP mode.
1393         */
1394        if (vif->nw_type != AP_NETWORK &&
1395            ((packet->act_len < min_hdr_len) ||
1396             (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1397                ath6kl_info("frame len is too short or too long\n");
1398                vif->net_stats.rx_errors++;
1399                vif->net_stats.rx_length_errors++;
1400                dev_kfree_skb(skb);
1401                return;
1402        }
1403
1404        /* Get the Power save state of the STA */
1405        if (vif->nw_type == AP_NETWORK) {
1406                meta_type = wmi_data_hdr_get_meta(dhdr);
1407
1408                ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1409                              WMI_DATA_HDR_PS_MASK);
1410
1411                offset = sizeof(struct wmi_data_hdr);
1412                trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1413
1414                switch (meta_type) {
1415                case 0:
1416                        break;
1417                case WMI_META_VERSION_1:
1418                        offset += sizeof(struct wmi_rx_meta_v1);
1419                        break;
1420                case WMI_META_VERSION_2:
1421                        offset += sizeof(struct wmi_rx_meta_v2);
1422                        break;
1423                default:
1424                        break;
1425                }
1426
1427                datap = (struct ethhdr *) (skb->data + offset);
1428                conn = ath6kl_find_sta(vif, datap->h_source);
1429
1430                if (!conn) {
1431                        dev_kfree_skb(skb);
1432                        return;
1433                }
1434
1435                /*
1436                 * If there is a change in PS state of the STA,
1437                 * take appropriate steps:
1438                 *
1439                 * 1. If Sleep-->Awake, flush the psq for the STA
1440                 *    Clear the PVB for the STA.
1441                 * 2. If Awake-->Sleep, Starting queueing frames
1442                 *    the STA.
1443                 */
1444                prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1445
1446                if (ps_state)
1447                        conn->sta_flags |= STA_PS_SLEEP;
1448                else
1449                        conn->sta_flags &= ~STA_PS_SLEEP;
1450
1451                /* Accept trigger only when the station is in sleep */
1452                if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1453                        ath6kl_uapsd_trigger_frame_rx(vif, conn);
1454
1455                if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1456                        if (!(conn->sta_flags & STA_PS_SLEEP)) {
1457                                struct sk_buff *skbuff = NULL;
1458                                bool is_apsdq_empty;
1459                                struct ath6kl_mgmt_buff *mgmt;
1460                                u8 idx;
1461
1462                                spin_lock_bh(&conn->psq_lock);
1463                                while (conn->mgmt_psq_len > 0) {
1464                                        mgmt = list_first_entry(
1465                                                        &conn->mgmt_psq,
1466                                                        struct ath6kl_mgmt_buff,
1467                                                        list);
1468                                        list_del(&mgmt->list);
1469                                        conn->mgmt_psq_len--;
1470                                        spin_unlock_bh(&conn->psq_lock);
1471                                        idx = vif->fw_vif_idx;
1472
1473                                        ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1474                                                                 idx,
1475                                                                 mgmt->id,
1476                                                                 mgmt->freq,
1477                                                                 mgmt->wait,
1478                                                                 mgmt->buf,
1479                                                                 mgmt->len,
1480                                                                 mgmt->no_cck);
1481
1482                                        kfree(mgmt);
1483                                        spin_lock_bh(&conn->psq_lock);
1484                                }
1485                                conn->mgmt_psq_len = 0;
1486                                while ((skbuff = skb_dequeue(&conn->psq))) {
1487                                        spin_unlock_bh(&conn->psq_lock);
1488                                        ath6kl_data_tx(skbuff, vif->ndev);
1489                                        spin_lock_bh(&conn->psq_lock);
1490                                }
1491
1492                                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1493                                while ((skbuff = skb_dequeue(&conn->apsdq))) {
1494                                        spin_unlock_bh(&conn->psq_lock);
1495                                        ath6kl_data_tx(skbuff, vif->ndev);
1496                                        spin_lock_bh(&conn->psq_lock);
1497                                }
1498                                spin_unlock_bh(&conn->psq_lock);
1499
1500                                if (!is_apsdq_empty)
1501                                        ath6kl_wmi_set_apsd_bfrd_traf(
1502                                                        ar->wmi,
1503                                                        vif->fw_vif_idx,
1504                                                        conn->aid, 0, 0);
1505
1506                                /* Clear the PVB for this STA */
1507                                ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1508                                                       conn->aid, 0);
1509                        }
1510                }
1511
1512                /* drop NULL data frames here */
1513                if ((packet->act_len < min_hdr_len) ||
1514                    (packet->act_len >
1515                     WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1516                        dev_kfree_skb(skb);
1517                        return;
1518                }
1519        }
1520
1521        is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1522        tid = wmi_data_hdr_get_up(dhdr);
1523        seq_no = wmi_data_hdr_get_seqno(dhdr);
1524        meta_type = wmi_data_hdr_get_meta(dhdr);
1525        dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1526        pad_before_data_start =
1527                (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1528                        & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1529
1530        skb_pull(skb, sizeof(struct wmi_data_hdr));
1531
1532        switch (meta_type) {
1533        case WMI_META_VERSION_1:
1534                skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1535                break;
1536        case WMI_META_VERSION_2:
1537                meta = (struct wmi_rx_meta_v2 *) skb->data;
1538                if (meta->csum_flags & 0x1) {
1539                        skb->ip_summed = CHECKSUM_COMPLETE;
1540                        skb->csum = (__force __wsum) meta->csum;
1541                }
1542                skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1543                break;
1544        default:
1545                break;
1546        }
1547
1548        skb_pull(skb, pad_before_data_start);
1549
1550        if (dot11_hdr)
1551                status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1552        else if (!is_amsdu)
1553                status = ath6kl_wmi_dot3_2_dix(skb);
1554
1555        if (status) {
1556                /*
1557                 * Drop frames that could not be processed (lack of
1558                 * memory, etc.)
1559                 */
1560                dev_kfree_skb(skb);
1561                return;
1562        }
1563
1564        if (!(vif->ndev->flags & IFF_UP)) {
1565                dev_kfree_skb(skb);
1566                return;
1567        }
1568
1569        if (vif->nw_type == AP_NETWORK) {
1570                datap = (struct ethhdr *) skb->data;
1571                if (is_multicast_ether_addr(datap->h_dest))
1572                        /*
1573                         * Bcast/Mcast frames should be sent to the
1574                         * OS stack as well as on the air.
1575                         */
1576                        skb1 = skb_copy(skb, GFP_ATOMIC);
1577                else {
1578                        /*
1579                         * Search for a connected STA with dstMac
1580                         * as the Mac address. If found send the
1581                         * frame to it on the air else send the
1582                         * frame up the stack.
1583                         */
1584                        conn = ath6kl_find_sta(vif, datap->h_dest);
1585
1586                        if (conn && ar->intra_bss) {
1587                                skb1 = skb;
1588                                skb = NULL;
1589                        } else if (conn && !ar->intra_bss) {
1590                                dev_kfree_skb(skb);
1591                                skb = NULL;
1592                        }
1593                }
1594                if (skb1)
1595                        ath6kl_data_tx(skb1, vif->ndev);
1596
1597                if (skb == NULL) {
1598                        /* nothing to deliver up the stack */
1599                        return;
1600                }
1601        }
1602
1603        datap = (struct ethhdr *) skb->data;
1604
1605        if (is_unicast_ether_addr(datap->h_dest)) {
1606                if (vif->nw_type == AP_NETWORK) {
1607                        conn = ath6kl_find_sta(vif, datap->h_source);
1608                        if (!conn)
1609                                return;
1610                        aggr_conn = conn->aggr_conn;
1611                } else {
1612                        aggr_conn = vif->aggr_cntxt->aggr_conn;
1613                }
1614
1615                if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1616                                          is_amsdu, skb)) {
1617                        /* aggregation code will handle the skb */
1618                        return;
1619                }
1620        } else if (!is_broadcast_ether_addr(datap->h_dest)) {
1621                vif->net_stats.multicast++;
1622        }
1623
1624        ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1625}
1626
1627static void aggr_timeout(unsigned long arg)
1628{
1629        u8 i, j;
1630        struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
1631        struct rxtid *rxtid;
1632        struct rxtid_stats *stats;
1633
1634        for (i = 0; i < NUM_OF_TIDS; i++) {
1635                rxtid = &aggr_conn->rx_tid[i];
1636                stats = &aggr_conn->stat[i];
1637
1638                if (!rxtid->aggr || !rxtid->timer_mon)
1639                        continue;
1640
1641                stats->num_timeouts++;
1642                ath6kl_dbg(ATH6KL_DBG_AGGR,
1643                           "aggr timeout (st %d end %d)\n",
1644                           rxtid->seq_next,
1645                           ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1646                            ATH6KL_MAX_SEQ_NO));
1647                aggr_deque_frms(aggr_conn, i, 0, 0);
1648        }
1649
1650        aggr_conn->timer_scheduled = false;
1651
1652        for (i = 0; i < NUM_OF_TIDS; i++) {
1653                rxtid = &aggr_conn->rx_tid[i];
1654
1655                if (rxtid->aggr && rxtid->hold_q) {
1656                        spin_lock_bh(&rxtid->lock);
1657                        for (j = 0; j < rxtid->hold_q_sz; j++) {
1658                                if (rxtid->hold_q[j].skb) {
1659                                        aggr_conn->timer_scheduled = true;
1660                                        rxtid->timer_mon = true;
1661                                        break;
1662                                }
1663                        }
1664                        spin_unlock_bh(&rxtid->lock);
1665
1666                        if (j >= rxtid->hold_q_sz)
1667                                rxtid->timer_mon = false;
1668                }
1669        }
1670
1671        if (aggr_conn->timer_scheduled)
1672                mod_timer(&aggr_conn->timer,
1673                          jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1674}
1675
1676static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1677{
1678        struct rxtid *rxtid;
1679        struct rxtid_stats *stats;
1680
1681        if (!aggr_conn || tid >= NUM_OF_TIDS)
1682                return;
1683
1684        rxtid = &aggr_conn->rx_tid[tid];
1685        stats = &aggr_conn->stat[tid];
1686
1687        if (rxtid->aggr)
1688                aggr_deque_frms(aggr_conn, tid, 0, 0);
1689
1690        rxtid->aggr = false;
1691        rxtid->timer_mon = false;
1692        rxtid->win_sz = 0;
1693        rxtid->seq_next = 0;
1694        rxtid->hold_q_sz = 0;
1695
1696        kfree(rxtid->hold_q);
1697        rxtid->hold_q = NULL;
1698
1699        memset(stats, 0, sizeof(struct rxtid_stats));
1700}
1701
1702void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1703                             u8 win_sz)
1704{
1705        struct ath6kl_sta *sta;
1706        struct aggr_info_conn *aggr_conn = NULL;
1707        struct rxtid *rxtid;
1708        struct rxtid_stats *stats;
1709        u16 hold_q_size;
1710        u8 tid, aid;
1711
1712        if (vif->nw_type == AP_NETWORK) {
1713                aid = ath6kl_get_aid(tid_mux);
1714                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1715                if (sta)
1716                        aggr_conn = sta->aggr_conn;
1717        } else {
1718                aggr_conn = vif->aggr_cntxt->aggr_conn;
1719        }
1720
1721        if (!aggr_conn)
1722                return;
1723
1724        tid = ath6kl_get_tid(tid_mux);
1725        if (tid >= NUM_OF_TIDS)
1726                return;
1727
1728        rxtid = &aggr_conn->rx_tid[tid];
1729        stats = &aggr_conn->stat[tid];
1730
1731        if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1732                ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1733                           __func__, win_sz, tid);
1734
1735        if (rxtid->aggr)
1736                aggr_delete_tid_state(aggr_conn, tid);
1737
1738        rxtid->seq_next = seq_no;
1739        hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1740        rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1741        if (!rxtid->hold_q)
1742                return;
1743
1744        rxtid->win_sz = win_sz;
1745        rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1746        if (!skb_queue_empty(&rxtid->q))
1747                return;
1748
1749        rxtid->aggr = true;
1750}
1751
1752void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1753                    struct aggr_info_conn *aggr_conn)
1754{
1755        struct rxtid *rxtid;
1756        u8 i;
1757
1758        aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1759        aggr_conn->dev = vif->ndev;
1760        init_timer(&aggr_conn->timer);
1761        aggr_conn->timer.function = aggr_timeout;
1762        aggr_conn->timer.data = (unsigned long) aggr_conn;
1763        aggr_conn->aggr_info = aggr_info;
1764
1765        aggr_conn->timer_scheduled = false;
1766
1767        for (i = 0; i < NUM_OF_TIDS; i++) {
1768                rxtid = &aggr_conn->rx_tid[i];
1769                rxtid->aggr = false;
1770                rxtid->timer_mon = false;
1771                skb_queue_head_init(&rxtid->q);
1772                spin_lock_init(&rxtid->lock);
1773        }
1774}
1775
1776struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1777{
1778        struct aggr_info *p_aggr = NULL;
1779
1780        p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1781        if (!p_aggr) {
1782                ath6kl_err("failed to alloc memory for aggr_node\n");
1783                return NULL;
1784        }
1785
1786        p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1787        if (!p_aggr->aggr_conn) {
1788                ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1789                kfree(p_aggr);
1790                return NULL;
1791        }
1792
1793        aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1794
1795        skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1796        ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1797
1798        return p_aggr;
1799}
1800
1801void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1802{
1803        struct ath6kl_sta *sta;
1804        struct rxtid *rxtid;
1805        struct aggr_info_conn *aggr_conn = NULL;
1806        u8 tid, aid;
1807
1808        if (vif->nw_type == AP_NETWORK) {
1809                aid = ath6kl_get_aid(tid_mux);
1810                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1811                if (sta)
1812                        aggr_conn = sta->aggr_conn;
1813        } else {
1814                aggr_conn = vif->aggr_cntxt->aggr_conn;
1815        }
1816
1817        if (!aggr_conn)
1818                return;
1819
1820        tid = ath6kl_get_tid(tid_mux);
1821        if (tid >= NUM_OF_TIDS)
1822                return;
1823
1824        rxtid = &aggr_conn->rx_tid[tid];
1825
1826        if (rxtid->aggr)
1827                aggr_delete_tid_state(aggr_conn, tid);
1828}
1829
1830void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1831{
1832        u8 tid;
1833
1834        if (!aggr_conn)
1835                return;
1836
1837        if (aggr_conn->timer_scheduled) {
1838                del_timer(&aggr_conn->timer);
1839                aggr_conn->timer_scheduled = false;
1840        }
1841
1842        for (tid = 0; tid < NUM_OF_TIDS; tid++)
1843                aggr_delete_tid_state(aggr_conn, tid);
1844}
1845
1846/* clean up our amsdu buffer list */
1847void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1848{
1849        struct htc_packet *packet, *tmp_pkt;
1850
1851        spin_lock_bh(&ar->lock);
1852        if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1853                spin_unlock_bh(&ar->lock);
1854                return;
1855        }
1856
1857        list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1858                                 list) {
1859                list_del(&packet->list);
1860                spin_unlock_bh(&ar->lock);
1861                dev_kfree_skb(packet->pkt_cntxt);
1862                spin_lock_bh(&ar->lock);
1863        }
1864
1865        spin_unlock_bh(&ar->lock);
1866}
1867
1868void aggr_module_destroy(struct aggr_info *aggr_info)
1869{
1870        if (!aggr_info)
1871                return;
1872
1873        aggr_reset_state(aggr_info->aggr_conn);
1874        skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1875        kfree(aggr_info->aggr_conn);
1876        kfree(aggr_info);
1877}
1878