linux/drivers/net/wireless/ath/ath6kl/txrx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include "core.h"
  21#include "debug.h"
  22#include "htc-ops.h"
  23#include "trace.h"
  24
  25/*
  26 * tid - tid_mux0..tid_mux3
  27 * aid - tid_mux4..tid_mux7
  28 */
  29#define ATH6KL_TID_MASK 0xf
  30#define ATH6KL_AID_SHIFT 4
  31
  32static inline u8 ath6kl_get_tid(u8 tid_mux)
  33{
  34        return tid_mux & ATH6KL_TID_MASK;
  35}
  36
  37static inline u8 ath6kl_get_aid(u8 tid_mux)
  38{
  39        return tid_mux >> ATH6KL_AID_SHIFT;
  40}
  41
  42static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
  43                               u32 *map_no)
  44{
  45        struct ath6kl *ar = ath6kl_priv(dev);
  46        struct ethhdr *eth_hdr;
  47        u32 i, ep_map = -1;
  48        u8 *datap;
  49
  50        *map_no = 0;
  51        datap = skb->data;
  52        eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
  53
  54        if (is_multicast_ether_addr(eth_hdr->h_dest))
  55                return ENDPOINT_2;
  56
  57        for (i = 0; i < ar->node_num; i++) {
  58                if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
  59                           ETH_ALEN) == 0) {
  60                        *map_no = i + 1;
  61                        ar->node_map[i].tx_pend++;
  62                        return ar->node_map[i].ep_id;
  63                }
  64
  65                if ((ep_map == -1) && !ar->node_map[i].tx_pend)
  66                        ep_map = i;
  67        }
  68
  69        if (ep_map == -1) {
  70                ep_map = ar->node_num;
  71                ar->node_num++;
  72                if (ar->node_num > MAX_NODE_NUM)
  73                        return ENDPOINT_UNUSED;
  74        }
  75
  76        memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
  77
  78        for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
  79                if (!ar->tx_pending[i]) {
  80                        ar->node_map[ep_map].ep_id = i;
  81                        break;
  82                }
  83
  84                /*
  85                 * No free endpoint is available, start redistribution on
  86                 * the inuse endpoints.
  87                 */
  88                if (i == ENDPOINT_5) {
  89                        ar->node_map[ep_map].ep_id = ar->next_ep_id;
  90                        ar->next_ep_id++;
  91                        if (ar->next_ep_id > ENDPOINT_5)
  92                                ar->next_ep_id = ENDPOINT_2;
  93                }
  94        }
  95
  96        *map_no = ep_map + 1;
  97        ar->node_map[ep_map].tx_pend++;
  98
  99        return ar->node_map[ep_map].ep_id;
 100}
 101
 102static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
 103                                struct ath6kl_vif *vif,
 104                                struct sk_buff *skb,
 105                                u32 *flags)
 106{
 107        struct ath6kl *ar = vif->ar;
 108        bool is_apsdq_empty = false;
 109        struct ethhdr *datap = (struct ethhdr *) skb->data;
 110        u8 up = 0, traffic_class, *ip_hdr;
 111        u16 ether_type;
 112        struct ath6kl_llc_snap_hdr *llc_hdr;
 113
 114        if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
 115                /*
 116                 * This tx is because of a uAPSD trigger, determine
 117                 * more and EOSP bit. Set EOSP if queue is empty
 118                 * or sufficient frames are delivered for this trigger.
 119                 */
 120                spin_lock_bh(&conn->psq_lock);
 121                if (!skb_queue_empty(&conn->apsdq))
 122                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 123                else if (conn->sta_flags & STA_PS_APSD_EOSP)
 124                        *flags |= WMI_DATA_HDR_FLAGS_EOSP;
 125                *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
 126                spin_unlock_bh(&conn->psq_lock);
 127                return false;
 128        } else if (!conn->apsd_info) {
 129                return false;
 130        }
 131
 132        if (test_bit(WMM_ENABLED, &vif->flags)) {
 133                ether_type = be16_to_cpu(datap->h_proto);
 134                if (is_ethertype(ether_type)) {
 135                        /* packet is in DIX format  */
 136                        ip_hdr = (u8 *)(datap + 1);
 137                } else {
 138                        /* packet is in 802.3 format */
 139                        llc_hdr = (struct ath6kl_llc_snap_hdr *)
 140                                                        (datap + 1);
 141                        ether_type = be16_to_cpu(llc_hdr->eth_type);
 142                        ip_hdr = (u8 *)(llc_hdr + 1);
 143                }
 144
 145                if (ether_type == IP_ETHERTYPE)
 146                        up = ath6kl_wmi_determine_user_priority(
 147                                                        ip_hdr, 0);
 148        }
 149
 150        traffic_class = ath6kl_wmi_get_traffic_class(up);
 151
 152        if ((conn->apsd_info & (1 << traffic_class)) == 0)
 153                return false;
 154
 155        /* Queue the frames if the STA is sleeping */
 156        spin_lock_bh(&conn->psq_lock);
 157        is_apsdq_empty = skb_queue_empty(&conn->apsdq);
 158        skb_queue_tail(&conn->apsdq, skb);
 159        spin_unlock_bh(&conn->psq_lock);
 160
 161        /*
 162         * If this is the first pkt getting queued
 163         * for this STA, update the PVB for this STA
 164         */
 165        if (is_apsdq_empty) {
 166                ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
 167                                              vif->fw_vif_idx,
 168                                              conn->aid, 1, 0);
 169        }
 170        *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
 171
 172        return true;
 173}
 174
 175static bool ath6kl_process_psq(struct ath6kl_sta *conn,
 176                                struct ath6kl_vif *vif,
 177                                struct sk_buff *skb,
 178                                u32 *flags)
 179{
 180        bool is_psq_empty = false;
 181        struct ath6kl *ar = vif->ar;
 182
 183        if (conn->sta_flags & STA_PS_POLLED) {
 184                spin_lock_bh(&conn->psq_lock);
 185                if (!skb_queue_empty(&conn->psq))
 186                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 187                spin_unlock_bh(&conn->psq_lock);
 188                return false;
 189        }
 190
 191        /* Queue the frames if the STA is sleeping */
 192        spin_lock_bh(&conn->psq_lock);
 193        is_psq_empty = skb_queue_empty(&conn->psq);
 194        skb_queue_tail(&conn->psq, skb);
 195        spin_unlock_bh(&conn->psq_lock);
 196
 197        /*
 198         * If this is the first pkt getting queued
 199         * for this STA, update the PVB for this
 200         * STA.
 201         */
 202        if (is_psq_empty)
 203                ath6kl_wmi_set_pvb_cmd(ar->wmi,
 204                                       vif->fw_vif_idx,
 205                                       conn->aid, 1);
 206        return true;
 207}
 208
 209static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
 210                                u32 *flags)
 211{
 212        struct ethhdr *datap = (struct ethhdr *) skb->data;
 213        struct ath6kl_sta *conn = NULL;
 214        bool ps_queued = false;
 215        struct ath6kl *ar = vif->ar;
 216
 217        if (is_multicast_ether_addr(datap->h_dest)) {
 218                u8 ctr = 0;
 219                bool q_mcast = false;
 220
 221                for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
 222                        if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
 223                                q_mcast = true;
 224                                break;
 225                        }
 226                }
 227
 228                if (q_mcast) {
 229                        /*
 230                         * If this transmit is not because of a Dtim Expiry
 231                         * q it.
 232                         */
 233                        if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
 234                                bool is_mcastq_empty = false;
 235
 236                                spin_lock_bh(&ar->mcastpsq_lock);
 237                                is_mcastq_empty =
 238                                        skb_queue_empty(&ar->mcastpsq);
 239                                skb_queue_tail(&ar->mcastpsq, skb);
 240                                spin_unlock_bh(&ar->mcastpsq_lock);
 241
 242                                /*
 243                                 * If this is the first Mcast pkt getting
 244                                 * queued indicate to the target to set the
 245                                 * BitmapControl LSB of the TIM IE.
 246                                 */
 247                                if (is_mcastq_empty)
 248                                        ath6kl_wmi_set_pvb_cmd(ar->wmi,
 249                                                               vif->fw_vif_idx,
 250                                                               MCAST_AID, 1);
 251
 252                                ps_queued = true;
 253                        } else {
 254                                /*
 255                                 * This transmit is because of Dtim expiry.
 256                                 * Determine if MoreData bit has to be set.
 257                                 */
 258                                spin_lock_bh(&ar->mcastpsq_lock);
 259                                if (!skb_queue_empty(&ar->mcastpsq))
 260                                        *flags |= WMI_DATA_HDR_FLAGS_MORE;
 261                                spin_unlock_bh(&ar->mcastpsq_lock);
 262                        }
 263                }
 264        } else {
 265                conn = ath6kl_find_sta(vif, datap->h_dest);
 266                if (!conn) {
 267                        dev_kfree_skb(skb);
 268
 269                        /* Inform the caller that the skb is consumed */
 270                        return true;
 271                }
 272
 273                if (conn->sta_flags & STA_PS_SLEEP) {
 274                        ps_queued = ath6kl_process_uapsdq(conn,
 275                                                vif, skb, flags);
 276                        if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
 277                                ps_queued = ath6kl_process_psq(conn,
 278                                                vif, skb, flags);
 279                }
 280        }
 281        return ps_queued;
 282}
 283
 284/* Tx functions */
 285
 286int ath6kl_control_tx(void *devt, struct sk_buff *skb,
 287                      enum htc_endpoint_id eid)
 288{
 289        struct ath6kl *ar = devt;
 290        int status = 0;
 291        struct ath6kl_cookie *cookie = NULL;
 292
 293        trace_ath6kl_wmi_cmd(skb->data, skb->len);
 294
 295        if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
 296                dev_kfree_skb(skb);
 297                return -EACCES;
 298        }
 299
 300        if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
 301                         eid >= ENDPOINT_MAX)) {
 302                status = -EINVAL;
 303                goto fail_ctrl_tx;
 304        }
 305
 306        spin_lock_bh(&ar->lock);
 307
 308        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 309                   "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
 310                   skb, skb->len, eid);
 311
 312        if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
 313                /*
 314                 * Control endpoint is full, don't allocate resources, we
 315                 * are just going to drop this packet.
 316                 */
 317                cookie = NULL;
 318                ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
 319                           skb, skb->len);
 320        } else {
 321                cookie = ath6kl_alloc_cookie(ar);
 322        }
 323
 324        if (cookie == NULL) {
 325                spin_unlock_bh(&ar->lock);
 326                status = -ENOMEM;
 327                goto fail_ctrl_tx;
 328        }
 329
 330        ar->tx_pending[eid]++;
 331
 332        if (eid != ar->ctrl_ep)
 333                ar->total_tx_data_pend++;
 334
 335        spin_unlock_bh(&ar->lock);
 336
 337        cookie->skb = skb;
 338        cookie->map_no = 0;
 339        set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 340                         eid, ATH6KL_CONTROL_PKT_TAG);
 341        cookie->htc_pkt.skb = skb;
 342
 343        /*
 344         * This interface is asynchronous, if there is an error, cleanup
 345         * will happen in the TX completion callback.
 346         */
 347        ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
 348
 349        return 0;
 350
 351fail_ctrl_tx:
 352        dev_kfree_skb(skb);
 353        return status;
 354}
 355
 356netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
 357{
 358        struct ath6kl *ar = ath6kl_priv(dev);
 359        struct ath6kl_cookie *cookie = NULL;
 360        enum htc_endpoint_id eid = ENDPOINT_UNUSED;
 361        struct ath6kl_vif *vif = netdev_priv(dev);
 362        u32 map_no = 0;
 363        u16 htc_tag = ATH6KL_DATA_PKT_TAG;
 364        u8 ac = 99; /* initialize to unmapped ac */
 365        bool chk_adhoc_ps_mapping = false;
 366        int ret;
 367        struct wmi_tx_meta_v2 meta_v2;
 368        void *meta;
 369        u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
 370        u8 meta_ver = 0;
 371        u32 flags = 0;
 372
 373        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 374                   "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
 375                   skb, skb->data, skb->len);
 376
 377        /* If target is not associated */
 378        if (!test_bit(CONNECTED, &vif->flags))
 379                goto fail_tx;
 380
 381        if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
 382                goto fail_tx;
 383
 384        if (!test_bit(WMI_READY, &ar->flag))
 385                goto fail_tx;
 386
 387        /* AP mode Power saving processing */
 388        if (vif->nw_type == AP_NETWORK) {
 389                if (ath6kl_powersave_ap(vif, skb, &flags))
 390                        return 0;
 391        }
 392
 393        if (test_bit(WMI_ENABLED, &ar->flag)) {
 394                if ((dev->features & NETIF_F_IP_CSUM) &&
 395                    (csum == CHECKSUM_PARTIAL)) {
 396                        csum_start = skb->csum_start -
 397                                        (skb_network_header(skb) - skb->head) +
 398                                        sizeof(struct ath6kl_llc_snap_hdr);
 399                        csum_dest = skb->csum_offset + csum_start;
 400                }
 401
 402                if (skb_cow_head(skb, dev->needed_headroom)) {
 403                        dev->stats.tx_dropped++;
 404                        kfree_skb(skb);
 405                        return 0;
 406                }
 407
 408                if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
 409                        ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
 410                        goto fail_tx;
 411                }
 412
 413                if ((dev->features & NETIF_F_IP_CSUM) &&
 414                    (csum == CHECKSUM_PARTIAL)) {
 415                        meta_v2.csum_start = csum_start;
 416                        meta_v2.csum_dest = csum_dest;
 417
 418                        /* instruct target to calculate checksum */
 419                        meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
 420                        meta_ver = WMI_META_VERSION_2;
 421                        meta = &meta_v2;
 422                } else {
 423                        meta_ver = 0;
 424                        meta = NULL;
 425                }
 426
 427                ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
 428                                DATA_MSGTYPE, flags, 0,
 429                                meta_ver,
 430                                meta, vif->fw_vif_idx);
 431
 432                if (ret) {
 433                        ath6kl_warn("failed to add wmi data header:%d\n"
 434                                , ret);
 435                        goto fail_tx;
 436                }
 437
 438                if ((vif->nw_type == ADHOC_NETWORK) &&
 439                    ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
 440                        chk_adhoc_ps_mapping = true;
 441                else {
 442                        /* get the stream mapping */
 443                        ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
 444                                    vif->fw_vif_idx, skb,
 445                                    0, test_bit(WMM_ENABLED, &vif->flags), &ac);
 446                        if (ret)
 447                                goto fail_tx;
 448                }
 449        } else {
 450                goto fail_tx;
 451        }
 452
 453        spin_lock_bh(&ar->lock);
 454
 455        if (chk_adhoc_ps_mapping)
 456                eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
 457        else
 458                eid = ar->ac2ep_map[ac];
 459
 460        if (eid == 0 || eid == ENDPOINT_UNUSED) {
 461                ath6kl_err("eid %d is not mapped!\n", eid);
 462                spin_unlock_bh(&ar->lock);
 463                goto fail_tx;
 464        }
 465
 466        /* allocate resource for this packet */
 467        cookie = ath6kl_alloc_cookie(ar);
 468
 469        if (!cookie) {
 470                spin_unlock_bh(&ar->lock);
 471                goto fail_tx;
 472        }
 473
 474        /* update counts while the lock is held */
 475        ar->tx_pending[eid]++;
 476        ar->total_tx_data_pend++;
 477
 478        spin_unlock_bh(&ar->lock);
 479
 480        if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
 481            skb_cloned(skb)) {
 482                /*
 483                 * We will touch (move the buffer data to align it. Since the
 484                 * skb buffer is cloned and not only the header is changed, we
 485                 * have to copy it to allow the changes. Since we are copying
 486                 * the data here, we may as well align it by reserving suitable
 487                 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
 488                 */
 489                struct sk_buff *nskb;
 490
 491                nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
 492                if (nskb == NULL)
 493                        goto fail_tx;
 494                kfree_skb(skb);
 495                skb = nskb;
 496        }
 497
 498        cookie->skb = skb;
 499        cookie->map_no = map_no;
 500        set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 501                         eid, htc_tag);
 502        cookie->htc_pkt.skb = skb;
 503
 504        ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
 505                        skb->data, skb->len);
 506
 507        /*
 508         * HTC interface is asynchronous, if this fails, cleanup will
 509         * happen in the ath6kl_tx_complete callback.
 510         */
 511        ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
 512
 513        return 0;
 514
 515fail_tx:
 516        dev_kfree_skb(skb);
 517
 518        dev->stats.tx_dropped++;
 519        dev->stats.tx_aborted_errors++;
 520
 521        return 0;
 522}
 523
 524/* indicate tx activity or inactivity on a WMI stream */
 525void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
 526{
 527        struct ath6kl *ar = devt;
 528        enum htc_endpoint_id eid;
 529        int i;
 530
 531        eid = ar->ac2ep_map[traffic_class];
 532
 533        if (!test_bit(WMI_ENABLED, &ar->flag))
 534                goto notify_htc;
 535
 536        spin_lock_bh(&ar->lock);
 537
 538        ar->ac_stream_active[traffic_class] = active;
 539
 540        if (active) {
 541                /*
 542                 * Keep track of the active stream with the highest
 543                 * priority.
 544                 */
 545                if (ar->ac_stream_pri_map[traffic_class] >
 546                    ar->hiac_stream_active_pri)
 547                        /* set the new highest active priority */
 548                        ar->hiac_stream_active_pri =
 549                                        ar->ac_stream_pri_map[traffic_class];
 550
 551        } else {
 552                /*
 553                 * We may have to search for the next active stream
 554                 * that is the highest priority.
 555                 */
 556                if (ar->hiac_stream_active_pri ==
 557                        ar->ac_stream_pri_map[traffic_class]) {
 558                        /*
 559                         * The highest priority stream just went inactive
 560                         * reset and search for the "next" highest "active"
 561                         * priority stream.
 562                         */
 563                        ar->hiac_stream_active_pri = 0;
 564
 565                        for (i = 0; i < WMM_NUM_AC; i++) {
 566                                if (ar->ac_stream_active[i] &&
 567                                    (ar->ac_stream_pri_map[i] >
 568                                     ar->hiac_stream_active_pri))
 569                                        /*
 570                                         * Set the new highest active
 571                                         * priority.
 572                                         */
 573                                        ar->hiac_stream_active_pri =
 574                                                ar->ac_stream_pri_map[i];
 575                        }
 576                }
 577        }
 578
 579        spin_unlock_bh(&ar->lock);
 580
 581notify_htc:
 582        /* notify HTC, this may cause credit distribution changes */
 583        ath6kl_htc_activity_changed(ar->htc_target, eid, active);
 584}
 585
 586enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
 587                                               struct htc_packet *packet)
 588{
 589        struct ath6kl *ar = target->dev->ar;
 590        struct ath6kl_vif *vif;
 591        enum htc_endpoint_id endpoint = packet->endpoint;
 592        enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
 593
 594        if (endpoint == ar->ctrl_ep) {
 595                /*
 596                 * Under normal WMI if this is getting full, then something
 597                 * is running rampant the host should not be exhausting the
 598                 * WMI queue with too many commands the only exception to
 599                 * this is during testing using endpointping.
 600                 */
 601                set_bit(WMI_CTRL_EP_FULL, &ar->flag);
 602                ath6kl_err("wmi ctrl ep is full\n");
 603                ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
 604                return action;
 605        }
 606
 607        if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
 608                return action;
 609
 610        /*
 611         * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
 612         * the highest active stream.
 613         */
 614        if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
 615            ar->hiac_stream_active_pri &&
 616            ar->cookie_count <=
 617                        target->endpoint[endpoint].tx_drop_packet_threshold)
 618                /*
 619                 * Give preference to the highest priority stream by
 620                 * dropping the packets which overflowed.
 621                 */
 622                action = HTC_SEND_FULL_DROP;
 623
 624        /* FIXME: Locking */
 625        spin_lock_bh(&ar->list_lock);
 626        list_for_each_entry(vif, &ar->vif_list, list) {
 627                if (vif->nw_type == ADHOC_NETWORK ||
 628                    action != HTC_SEND_FULL_DROP) {
 629                        spin_unlock_bh(&ar->list_lock);
 630
 631                        set_bit(NETQ_STOPPED, &vif->flags);
 632                        netif_stop_queue(vif->ndev);
 633
 634                        return action;
 635                }
 636        }
 637        spin_unlock_bh(&ar->list_lock);
 638
 639        return action;
 640}
 641
 642/* TODO this needs to be looked at */
 643static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
 644                                     enum htc_endpoint_id eid, u32 map_no)
 645{
 646        struct ath6kl *ar = vif->ar;
 647        u32 i;
 648
 649        if (vif->nw_type != ADHOC_NETWORK)
 650                return;
 651
 652        if (!ar->ibss_ps_enable)
 653                return;
 654
 655        if (eid == ar->ctrl_ep)
 656                return;
 657
 658        if (map_no == 0)
 659                return;
 660
 661        map_no--;
 662        ar->node_map[map_no].tx_pend--;
 663
 664        if (ar->node_map[map_no].tx_pend)
 665                return;
 666
 667        if (map_no != (ar->node_num - 1))
 668                return;
 669
 670        for (i = ar->node_num; i > 0; i--) {
 671                if (ar->node_map[i - 1].tx_pend)
 672                        break;
 673
 674                memset(&ar->node_map[i - 1], 0,
 675                       sizeof(struct ath6kl_node_mapping));
 676                ar->node_num--;
 677        }
 678}
 679
 680void ath6kl_tx_complete(struct htc_target *target,
 681                        struct list_head *packet_queue)
 682{
 683        struct ath6kl *ar = target->dev->ar;
 684        struct sk_buff_head skb_queue;
 685        struct htc_packet *packet;
 686        struct sk_buff *skb;
 687        struct ath6kl_cookie *ath6kl_cookie;
 688        u32 map_no = 0;
 689        int status;
 690        enum htc_endpoint_id eid;
 691        bool wake_event = false;
 692        bool flushing[ATH6KL_VIF_MAX] = {false};
 693        u8 if_idx;
 694        struct ath6kl_vif *vif;
 695
 696        skb_queue_head_init(&skb_queue);
 697
 698        /* lock the driver as we update internal state */
 699        spin_lock_bh(&ar->lock);
 700
 701        /* reap completed packets */
 702        while (!list_empty(packet_queue)) {
 703                packet = list_first_entry(packet_queue, struct htc_packet,
 704                                          list);
 705                list_del(&packet->list);
 706
 707                if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
 708                                 packet->endpoint >= ENDPOINT_MAX))
 709                        continue;
 710
 711                ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
 712                if (WARN_ON_ONCE(!ath6kl_cookie))
 713                        continue;
 714
 715                status = packet->status;
 716                skb = ath6kl_cookie->skb;
 717                eid = packet->endpoint;
 718                map_no = ath6kl_cookie->map_no;
 719
 720                if (WARN_ON_ONCE(!skb || !skb->data)) {
 721                        dev_kfree_skb(skb);
 722                        ath6kl_free_cookie(ar, ath6kl_cookie);
 723                        continue;
 724                }
 725
 726                __skb_queue_tail(&skb_queue, skb);
 727
 728                if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
 729                        ath6kl_free_cookie(ar, ath6kl_cookie);
 730                        continue;
 731                }
 732
 733                ar->tx_pending[eid]--;
 734
 735                if (eid != ar->ctrl_ep)
 736                        ar->total_tx_data_pend--;
 737
 738                if (eid == ar->ctrl_ep) {
 739                        if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
 740                                clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
 741
 742                        if (ar->tx_pending[eid] == 0)
 743                                wake_event = true;
 744                }
 745
 746                if (eid == ar->ctrl_ep) {
 747                        if_idx = wmi_cmd_hdr_get_if_idx(
 748                                (struct wmi_cmd_hdr *) packet->buf);
 749                } else {
 750                        if_idx = wmi_data_hdr_get_if_idx(
 751                                (struct wmi_data_hdr *) packet->buf);
 752                }
 753
 754                vif = ath6kl_get_vif_by_index(ar, if_idx);
 755                if (!vif) {
 756                        ath6kl_free_cookie(ar, ath6kl_cookie);
 757                        continue;
 758                }
 759
 760                if (status) {
 761                        if (status == -ECANCELED)
 762                                /* a packet was flushed  */
 763                                flushing[if_idx] = true;
 764
 765                        vif->ndev->stats.tx_errors++;
 766
 767                        if (status != -ENOSPC && status != -ECANCELED)
 768                                ath6kl_warn("tx complete error: %d\n", status);
 769
 770                        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 771                                   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
 772                                   __func__, skb, packet->buf, packet->act_len,
 773                                   eid, "error!");
 774                } else {
 775                        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
 776                                   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
 777                                   __func__, skb, packet->buf, packet->act_len,
 778                                   eid, "OK");
 779
 780                        flushing[if_idx] = false;
 781                        vif->ndev->stats.tx_packets++;
 782                        vif->ndev->stats.tx_bytes += skb->len;
 783                }
 784
 785                ath6kl_tx_clear_node_map(vif, eid, map_no);
 786
 787                ath6kl_free_cookie(ar, ath6kl_cookie);
 788
 789                if (test_bit(NETQ_STOPPED, &vif->flags))
 790                        clear_bit(NETQ_STOPPED, &vif->flags);
 791        }
 792
 793        spin_unlock_bh(&ar->lock);
 794
 795        __skb_queue_purge(&skb_queue);
 796
 797        /* FIXME: Locking */
 798        spin_lock_bh(&ar->list_lock);
 799        list_for_each_entry(vif, &ar->vif_list, list) {
 800                if (test_bit(CONNECTED, &vif->flags) &&
 801                    !flushing[vif->fw_vif_idx]) {
 802                        spin_unlock_bh(&ar->list_lock);
 803                        netif_wake_queue(vif->ndev);
 804                        spin_lock_bh(&ar->list_lock);
 805                }
 806        }
 807        spin_unlock_bh(&ar->list_lock);
 808
 809        if (wake_event)
 810                wake_up(&ar->event_wq);
 811
 812        return;
 813}
 814
 815void ath6kl_tx_data_cleanup(struct ath6kl *ar)
 816{
 817        int i;
 818
 819        /* flush all the data (non-control) streams */
 820        for (i = 0; i < WMM_NUM_AC; i++)
 821                ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
 822                                      ATH6KL_DATA_PKT_TAG);
 823}
 824
 825/* Rx functions */
 826
 827static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
 828                                              struct sk_buff *skb)
 829{
 830        if (!skb)
 831                return;
 832
 833        skb->dev = dev;
 834
 835        if (!(skb->dev->flags & IFF_UP)) {
 836                dev_kfree_skb(skb);
 837                return;
 838        }
 839
 840        skb->protocol = eth_type_trans(skb, skb->dev);
 841
 842        netif_rx_ni(skb);
 843}
 844
 845static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
 846{
 847        struct sk_buff *skb;
 848
 849        while (num) {
 850                skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
 851                if (!skb) {
 852                        ath6kl_err("netbuf allocation failed\n");
 853                        return;
 854                }
 855                skb_queue_tail(q, skb);
 856                num--;
 857        }
 858}
 859
 860static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
 861{
 862        struct sk_buff *skb = NULL;
 863
 864        if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
 865            (AGGR_NUM_OF_FREE_NETBUFS >> 2))
 866                ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
 867                                     AGGR_NUM_OF_FREE_NETBUFS);
 868
 869        skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
 870
 871        return skb;
 872}
 873
 874void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
 875{
 876        struct ath6kl *ar = target->dev->ar;
 877        struct sk_buff *skb;
 878        int rx_buf;
 879        int n_buf_refill;
 880        struct htc_packet *packet;
 881        struct list_head queue;
 882
 883        n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
 884                          ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
 885
 886        if (n_buf_refill <= 0)
 887                return;
 888
 889        INIT_LIST_HEAD(&queue);
 890
 891        ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
 892                   "%s: providing htc with %d buffers at eid=%d\n",
 893                   __func__, n_buf_refill, endpoint);
 894
 895        for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
 896                skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
 897                if (!skb)
 898                        break;
 899
 900                packet = (struct htc_packet *) skb->head;
 901                if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
 902                        size_t len = skb_headlen(skb);
 903                        skb->data = PTR_ALIGN(skb->data - 4, 4);
 904                        skb_set_tail_pointer(skb, len);
 905                }
 906                set_htc_rxpkt_info(packet, skb, skb->data,
 907                                   ATH6KL_BUFFER_SIZE, endpoint);
 908                packet->skb = skb;
 909                list_add_tail(&packet->list, &queue);
 910        }
 911
 912        if (!list_empty(&queue))
 913                ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
 914}
 915
 916void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
 917{
 918        struct htc_packet *packet;
 919        struct sk_buff *skb;
 920
 921        while (count) {
 922                skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
 923                if (!skb)
 924                        return;
 925
 926                packet = (struct htc_packet *) skb->head;
 927                if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
 928                        size_t len = skb_headlen(skb);
 929                        skb->data = PTR_ALIGN(skb->data - 4, 4);
 930                        skb_set_tail_pointer(skb, len);
 931                }
 932                set_htc_rxpkt_info(packet, skb, skb->data,
 933                                   ATH6KL_AMSDU_BUFFER_SIZE, 0);
 934                packet->skb = skb;
 935
 936                spin_lock_bh(&ar->lock);
 937                list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
 938                spin_unlock_bh(&ar->lock);
 939                count--;
 940        }
 941}
 942
 943/*
 944 * Callback to allocate a receive buffer for a pending packet. We use a
 945 * pre-allocated list of buffers of maximum AMSDU size (4K).
 946 */
 947struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
 948                                            enum htc_endpoint_id endpoint,
 949                                            int len)
 950{
 951        struct ath6kl *ar = target->dev->ar;
 952        struct htc_packet *packet = NULL;
 953        struct list_head *pkt_pos;
 954        int refill_cnt = 0, depth = 0;
 955
 956        ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
 957                   __func__, endpoint, len);
 958
 959        if ((len <= ATH6KL_BUFFER_SIZE) ||
 960            (len > ATH6KL_AMSDU_BUFFER_SIZE))
 961                return NULL;
 962
 963        spin_lock_bh(&ar->lock);
 964
 965        if (list_empty(&ar->amsdu_rx_buffer_queue)) {
 966                spin_unlock_bh(&ar->lock);
 967                refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
 968                goto refill_buf;
 969        }
 970
 971        packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
 972                                  struct htc_packet, list);
 973        list_del(&packet->list);
 974        list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
 975                depth++;
 976
 977        refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
 978        spin_unlock_bh(&ar->lock);
 979
 980        /* set actual endpoint ID */
 981        packet->endpoint = endpoint;
 982
 983refill_buf:
 984        if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
 985                ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
 986
 987        return packet;
 988}
 989
 990static void aggr_slice_amsdu(struct aggr_info *p_aggr,
 991                             struct rxtid *rxtid, struct sk_buff *skb)
 992{
 993        struct sk_buff *new_skb;
 994        struct ethhdr *hdr;
 995        u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
 996        u8 *framep;
 997
 998        mac_hdr_len = sizeof(struct ethhdr);
 999        framep = skb->data + mac_hdr_len;
1000        amsdu_len = skb->len - mac_hdr_len;
1001
1002        while (amsdu_len > mac_hdr_len) {
1003                hdr = (struct ethhdr *) framep;
1004                payload_8023_len = be16_to_cpu(hdr->h_proto);
1005
1006                if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
1007                    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
1008                        ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1009                                   payload_8023_len);
1010                        break;
1011                }
1012
1013                frame_8023_len = payload_8023_len + mac_hdr_len;
1014                new_skb = aggr_get_free_skb(p_aggr);
1015                if (!new_skb) {
1016                        ath6kl_err("no buffer available\n");
1017                        break;
1018                }
1019
1020                memcpy(new_skb->data, framep, frame_8023_len);
1021                skb_put(new_skb, frame_8023_len);
1022                if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1023                        ath6kl_err("dot3_2_dix error\n");
1024                        dev_kfree_skb(new_skb);
1025                        break;
1026                }
1027
1028                skb_queue_tail(&rxtid->q, new_skb);
1029
1030                /* Is this the last subframe within this aggregate ? */
1031                if ((amsdu_len - frame_8023_len) == 0)
1032                        break;
1033
1034                /* Add the length of A-MSDU subframe padding bytes -
1035                 * Round to nearest word.
1036                 */
1037                frame_8023_len = ALIGN(frame_8023_len, 4);
1038
1039                framep += frame_8023_len;
1040                amsdu_len -= frame_8023_len;
1041        }
1042
1043        dev_kfree_skb(skb);
1044}
1045
1046static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1047                            u16 seq_no, u8 order)
1048{
1049        struct sk_buff *skb;
1050        struct rxtid *rxtid;
1051        struct skb_hold_q *node;
1052        u16 idx, idx_end, seq_end;
1053        struct rxtid_stats *stats;
1054
1055        rxtid = &agg_conn->rx_tid[tid];
1056        stats = &agg_conn->stat[tid];
1057
1058        spin_lock_bh(&rxtid->lock);
1059        idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1060
1061        /*
1062         * idx_end is typically the last possible frame in the window,
1063         * but changes to 'the' seq_no, when BAR comes. If seq_no
1064         * is non-zero, we will go up to that and stop.
1065         * Note: last seq no in current window will occupy the same
1066         * index position as index that is just previous to start.
1067         * An imp point : if win_sz is 7, for seq_no space of 4095,
1068         * then, there would be holes when sequence wrap around occurs.
1069         * Target should judiciously choose the win_sz, based on
1070         * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1071         * 2, 4, 8, 16 win_sz works fine).
1072         * We must deque from "idx" to "idx_end", including both.
1073         */
1074        seq_end = seq_no ? seq_no : rxtid->seq_next;
1075        idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1076
1077        do {
1078                node = &rxtid->hold_q[idx];
1079                if ((order == 1) && (!node->skb))
1080                        break;
1081
1082                if (node->skb) {
1083                        if (node->is_amsdu)
1084                                aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1085                                                 node->skb);
1086                        else
1087                                skb_queue_tail(&rxtid->q, node->skb);
1088                        node->skb = NULL;
1089                } else {
1090                        stats->num_hole++;
1091                }
1092
1093                rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1094                idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1095        } while (idx != idx_end);
1096
1097        spin_unlock_bh(&rxtid->lock);
1098
1099        stats->num_delivered += skb_queue_len(&rxtid->q);
1100
1101        while ((skb = skb_dequeue(&rxtid->q)))
1102                ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1103}
1104
1105static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1106                                  u16 seq_no,
1107                                  bool is_amsdu, struct sk_buff *frame)
1108{
1109        struct rxtid *rxtid;
1110        struct rxtid_stats *stats;
1111        struct sk_buff *skb;
1112        struct skb_hold_q *node;
1113        u16 idx, st, cur, end;
1114        bool is_queued = false;
1115        u16 extended_end;
1116
1117        rxtid = &agg_conn->rx_tid[tid];
1118        stats = &agg_conn->stat[tid];
1119
1120        stats->num_into_aggr++;
1121
1122        if (!rxtid->aggr) {
1123                if (is_amsdu) {
1124                        aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1125                        is_queued = true;
1126                        stats->num_amsdu++;
1127                        while ((skb = skb_dequeue(&rxtid->q)))
1128                                ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1129                                                                  skb);
1130                }
1131                return is_queued;
1132        }
1133
1134        /* Check the incoming sequence no, if it's in the window */
1135        st = rxtid->seq_next;
1136        cur = seq_no;
1137        end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1138
1139        if (((st < end) && (cur < st || cur > end)) ||
1140            ((st > end) && (cur > end) && (cur < st))) {
1141                extended_end = (end + rxtid->hold_q_sz - 1) &
1142                        ATH6KL_MAX_SEQ_NO;
1143
1144                if (((end < extended_end) &&
1145                     (cur < end || cur > extended_end)) ||
1146                    ((end > extended_end) && (cur > extended_end) &&
1147                     (cur < end))) {
1148                        aggr_deque_frms(agg_conn, tid, 0, 0);
1149                        spin_lock_bh(&rxtid->lock);
1150                        if (cur >= rxtid->hold_q_sz - 1)
1151                                rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1152                        else
1153                                rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1154                                                  (rxtid->hold_q_sz - 2 - cur);
1155                        spin_unlock_bh(&rxtid->lock);
1156                } else {
1157                        /*
1158                         * Dequeue only those frames that are outside the
1159                         * new shifted window.
1160                         */
1161                        if (cur >= rxtid->hold_q_sz - 1)
1162                                st = cur - (rxtid->hold_q_sz - 1);
1163                        else
1164                                st = ATH6KL_MAX_SEQ_NO -
1165                                        (rxtid->hold_q_sz - 2 - cur);
1166
1167                        aggr_deque_frms(agg_conn, tid, st, 0);
1168                }
1169
1170                stats->num_oow++;
1171        }
1172
1173        idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1174
1175        node = &rxtid->hold_q[idx];
1176
1177        spin_lock_bh(&rxtid->lock);
1178
1179        /*
1180         * Is the cur frame duplicate or something beyond our window(hold_q
1181         * -> which is 2x, already)?
1182         *
1183         * 1. Duplicate is easy - drop incoming frame.
1184         * 2. Not falling in current sliding window.
1185         *  2a. is the frame_seq_no preceding current tid_seq_no?
1186         *      -> drop the frame. perhaps sender did not get our ACK.
1187         *         this is taken care of above.
1188         *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1189         *      -> Taken care of it above, by moving window forward.
1190         */
1191        dev_kfree_skb(node->skb);
1192        stats->num_dups++;
1193
1194        node->skb = frame;
1195        is_queued = true;
1196        node->is_amsdu = is_amsdu;
1197        node->seq_no = seq_no;
1198
1199        if (node->is_amsdu)
1200                stats->num_amsdu++;
1201        else
1202                stats->num_mpdu++;
1203
1204        spin_unlock_bh(&rxtid->lock);
1205
1206        aggr_deque_frms(agg_conn, tid, 0, 1);
1207
1208        if (agg_conn->timer_scheduled)
1209                return is_queued;
1210
1211        spin_lock_bh(&rxtid->lock);
1212        for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
1213                if (rxtid->hold_q[idx].skb) {
1214                        /*
1215                         * There is a frame in the queue and no
1216                         * timer so start a timer to ensure that
1217                         * the frame doesn't remain stuck
1218                         * forever.
1219                         */
1220                        agg_conn->timer_scheduled = true;
1221                        mod_timer(&agg_conn->timer,
1222                                  (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1223                        rxtid->timer_mon = true;
1224                        break;
1225                }
1226        }
1227        spin_unlock_bh(&rxtid->lock);
1228
1229        return is_queued;
1230}
1231
1232static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1233                                                 struct ath6kl_sta *conn)
1234{
1235        struct ath6kl *ar = vif->ar;
1236        bool is_apsdq_empty, is_apsdq_empty_at_start;
1237        u32 num_frames_to_deliver, flags;
1238        struct sk_buff *skb = NULL;
1239
1240        /*
1241         * If the APSD q for this STA is not empty, dequeue and
1242         * send a pkt from the head of the q. Also update the
1243         * More data bit in the WMI_DATA_HDR if there are
1244         * more pkts for this STA in the APSD q.
1245         * If there are no more pkts for this STA,
1246         * update the APSD bitmap for this STA.
1247         */
1248
1249        num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1250                                                    ATH6KL_APSD_FRAME_MASK;
1251        /*
1252         * Number of frames to send in a service period is
1253         * indicated by the station
1254         * in the QOS_INFO of the association request
1255         * If it is zero, send all frames
1256         */
1257        if (!num_frames_to_deliver)
1258                num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1259
1260        spin_lock_bh(&conn->psq_lock);
1261        is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1262        spin_unlock_bh(&conn->psq_lock);
1263        is_apsdq_empty_at_start = is_apsdq_empty;
1264
1265        while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1266                spin_lock_bh(&conn->psq_lock);
1267                skb = skb_dequeue(&conn->apsdq);
1268                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1269                spin_unlock_bh(&conn->psq_lock);
1270
1271                /*
1272                 * Set the STA flag to Trigger delivery,
1273                 * so that the frame will go out
1274                 */
1275                conn->sta_flags |= STA_PS_APSD_TRIGGER;
1276                num_frames_to_deliver--;
1277
1278                /* Last frame in the service period, set EOSP or queue empty */
1279                if ((is_apsdq_empty) || (!num_frames_to_deliver))
1280                        conn->sta_flags |= STA_PS_APSD_EOSP;
1281
1282                ath6kl_data_tx(skb, vif->ndev);
1283                conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1284                conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1285        }
1286
1287        if (is_apsdq_empty) {
1288                if (is_apsdq_empty_at_start)
1289                        flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1290                else
1291                        flags = 0;
1292
1293                ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1294                                              vif->fw_vif_idx,
1295                                              conn->aid, 0, flags);
1296        }
1297
1298        return;
1299}
1300
1301void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1302{
1303        struct ath6kl *ar = target->dev->ar;
1304        struct sk_buff *skb = packet->pkt_cntxt;
1305        struct wmi_rx_meta_v2 *meta;
1306        struct wmi_data_hdr *dhdr;
1307        int min_hdr_len;
1308        u8 meta_type, dot11_hdr = 0;
1309        u8 pad_before_data_start;
1310        int status = packet->status;
1311        enum htc_endpoint_id ept = packet->endpoint;
1312        bool is_amsdu, prev_ps, ps_state = false;
1313        bool trig_state = false;
1314        struct ath6kl_sta *conn = NULL;
1315        struct sk_buff *skb1 = NULL;
1316        struct ethhdr *datap = NULL;
1317        struct ath6kl_vif *vif;
1318        struct aggr_info_conn *aggr_conn;
1319        u16 seq_no, offset;
1320        u8 tid, if_idx;
1321
1322        ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1323                   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1324                   __func__, ar, ept, skb, packet->buf,
1325                   packet->act_len, status);
1326
1327        if (status || packet->act_len < HTC_HDR_LENGTH) {
1328                dev_kfree_skb(skb);
1329                return;
1330        }
1331
1332        skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1333        skb_pull(skb, HTC_HDR_LENGTH);
1334
1335        ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1336                        skb->data, skb->len);
1337
1338        if (ept == ar->ctrl_ep) {
1339                if (test_bit(WMI_ENABLED, &ar->flag)) {
1340                        ath6kl_check_wow_status(ar);
1341                        ath6kl_wmi_control_rx(ar->wmi, skb);
1342                        return;
1343                }
1344                if_idx =
1345                wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1346        } else {
1347                if_idx =
1348                wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1349        }
1350
1351        vif = ath6kl_get_vif_by_index(ar, if_idx);
1352        if (!vif) {
1353                dev_kfree_skb(skb);
1354                return;
1355        }
1356
1357        /*
1358         * Take lock to protect buffer counts and adaptive power throughput
1359         * state.
1360         */
1361        spin_lock_bh(&vif->if_lock);
1362
1363        vif->ndev->stats.rx_packets++;
1364        vif->ndev->stats.rx_bytes += packet->act_len;
1365
1366        spin_unlock_bh(&vif->if_lock);
1367
1368        skb->dev = vif->ndev;
1369
1370        if (!test_bit(WMI_ENABLED, &ar->flag)) {
1371                if (EPPING_ALIGNMENT_PAD > 0)
1372                        skb_pull(skb, EPPING_ALIGNMENT_PAD);
1373                ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1374                return;
1375        }
1376
1377        ath6kl_check_wow_status(ar);
1378
1379        min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1380                      sizeof(struct ath6kl_llc_snap_hdr);
1381
1382        dhdr = (struct wmi_data_hdr *) skb->data;
1383
1384        /*
1385         * In the case of AP mode we may receive NULL data frames
1386         * that do not have LLC hdr. They are 16 bytes in size.
1387         * Allow these frames in the AP mode.
1388         */
1389        if (vif->nw_type != AP_NETWORK &&
1390            ((packet->act_len < min_hdr_len) ||
1391             (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1392                ath6kl_info("frame len is too short or too long\n");
1393                vif->ndev->stats.rx_errors++;
1394                vif->ndev->stats.rx_length_errors++;
1395                dev_kfree_skb(skb);
1396                return;
1397        }
1398
1399        pad_before_data_start =
1400                (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1401                        & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1402
1403        /* Get the Power save state of the STA */
1404        if (vif->nw_type == AP_NETWORK) {
1405                meta_type = wmi_data_hdr_get_meta(dhdr);
1406
1407                ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1408                              WMI_DATA_HDR_PS_MASK);
1409
1410                offset = sizeof(struct wmi_data_hdr) + pad_before_data_start;
1411                trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1412
1413                switch (meta_type) {
1414                case 0:
1415                        break;
1416                case WMI_META_VERSION_1:
1417                        offset += sizeof(struct wmi_rx_meta_v1);
1418                        break;
1419                case WMI_META_VERSION_2:
1420                        offset += sizeof(struct wmi_rx_meta_v2);
1421                        break;
1422                default:
1423                        break;
1424                }
1425
1426                datap = (struct ethhdr *) (skb->data + offset);
1427                conn = ath6kl_find_sta(vif, datap->h_source);
1428
1429                if (!conn) {
1430                        dev_kfree_skb(skb);
1431                        return;
1432                }
1433
1434                /*
1435                 * If there is a change in PS state of the STA,
1436                 * take appropriate steps:
1437                 *
1438                 * 1. If Sleep-->Awake, flush the psq for the STA
1439                 *    Clear the PVB for the STA.
1440                 * 2. If Awake-->Sleep, Starting queueing frames
1441                 *    the STA.
1442                 */
1443                prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1444
1445                if (ps_state)
1446                        conn->sta_flags |= STA_PS_SLEEP;
1447                else
1448                        conn->sta_flags &= ~STA_PS_SLEEP;
1449
1450                /* Accept trigger only when the station is in sleep */
1451                if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1452                        ath6kl_uapsd_trigger_frame_rx(vif, conn);
1453
1454                if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1455                        if (!(conn->sta_flags & STA_PS_SLEEP)) {
1456                                struct sk_buff *skbuff = NULL;
1457                                bool is_apsdq_empty;
1458                                struct ath6kl_mgmt_buff *mgmt;
1459                                u8 idx;
1460
1461                                spin_lock_bh(&conn->psq_lock);
1462                                while (conn->mgmt_psq_len > 0) {
1463                                        mgmt = list_first_entry(
1464                                                        &conn->mgmt_psq,
1465                                                        struct ath6kl_mgmt_buff,
1466                                                        list);
1467                                        list_del(&mgmt->list);
1468                                        conn->mgmt_psq_len--;
1469                                        spin_unlock_bh(&conn->psq_lock);
1470                                        idx = vif->fw_vif_idx;
1471
1472                                        ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1473                                                                 idx,
1474                                                                 mgmt->id,
1475                                                                 mgmt->freq,
1476                                                                 mgmt->wait,
1477                                                                 mgmt->buf,
1478                                                                 mgmt->len,
1479                                                                 mgmt->no_cck);
1480
1481                                        kfree(mgmt);
1482                                        spin_lock_bh(&conn->psq_lock);
1483                                }
1484                                conn->mgmt_psq_len = 0;
1485                                while ((skbuff = skb_dequeue(&conn->psq))) {
1486                                        spin_unlock_bh(&conn->psq_lock);
1487                                        ath6kl_data_tx(skbuff, vif->ndev);
1488                                        spin_lock_bh(&conn->psq_lock);
1489                                }
1490
1491                                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1492                                while ((skbuff = skb_dequeue(&conn->apsdq))) {
1493                                        spin_unlock_bh(&conn->psq_lock);
1494                                        ath6kl_data_tx(skbuff, vif->ndev);
1495                                        spin_lock_bh(&conn->psq_lock);
1496                                }
1497                                spin_unlock_bh(&conn->psq_lock);
1498
1499                                if (!is_apsdq_empty)
1500                                        ath6kl_wmi_set_apsd_bfrd_traf(
1501                                                        ar->wmi,
1502                                                        vif->fw_vif_idx,
1503                                                        conn->aid, 0, 0);
1504
1505                                /* Clear the PVB for this STA */
1506                                ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1507                                                       conn->aid, 0);
1508                        }
1509                }
1510
1511                /* drop NULL data frames here */
1512                if ((packet->act_len < min_hdr_len) ||
1513                    (packet->act_len >
1514                     WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1515                        dev_kfree_skb(skb);
1516                        return;
1517                }
1518        }
1519
1520        is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1521        tid = wmi_data_hdr_get_up(dhdr);
1522        seq_no = wmi_data_hdr_get_seqno(dhdr);
1523        meta_type = wmi_data_hdr_get_meta(dhdr);
1524        dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1525
1526        skb_pull(skb, sizeof(struct wmi_data_hdr));
1527
1528        switch (meta_type) {
1529        case WMI_META_VERSION_1:
1530                skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1531                break;
1532        case WMI_META_VERSION_2:
1533                meta = (struct wmi_rx_meta_v2 *) skb->data;
1534                if (meta->csum_flags & 0x1) {
1535                        skb->ip_summed = CHECKSUM_COMPLETE;
1536                        skb->csum = (__force __wsum) meta->csum;
1537                }
1538                skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1539                break;
1540        default:
1541                break;
1542        }
1543
1544        skb_pull(skb, pad_before_data_start);
1545
1546        if (dot11_hdr)
1547                status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1548        else if (!is_amsdu)
1549                status = ath6kl_wmi_dot3_2_dix(skb);
1550
1551        if (status) {
1552                /*
1553                 * Drop frames that could not be processed (lack of
1554                 * memory, etc.)
1555                 */
1556                dev_kfree_skb(skb);
1557                return;
1558        }
1559
1560        if (!(vif->ndev->flags & IFF_UP)) {
1561                dev_kfree_skb(skb);
1562                return;
1563        }
1564
1565        if (vif->nw_type == AP_NETWORK) {
1566                datap = (struct ethhdr *) skb->data;
1567                if (is_multicast_ether_addr(datap->h_dest))
1568                        /*
1569                         * Bcast/Mcast frames should be sent to the
1570                         * OS stack as well as on the air.
1571                         */
1572                        skb1 = skb_copy(skb, GFP_ATOMIC);
1573                else {
1574                        /*
1575                         * Search for a connected STA with dstMac
1576                         * as the Mac address. If found send the
1577                         * frame to it on the air else send the
1578                         * frame up the stack.
1579                         */
1580                        conn = ath6kl_find_sta(vif, datap->h_dest);
1581
1582                        if (conn && ar->intra_bss) {
1583                                skb1 = skb;
1584                                skb = NULL;
1585                        } else if (conn && !ar->intra_bss) {
1586                                dev_kfree_skb(skb);
1587                                skb = NULL;
1588                        }
1589                }
1590                if (skb1)
1591                        ath6kl_data_tx(skb1, vif->ndev);
1592
1593                if (skb == NULL) {
1594                        /* nothing to deliver up the stack */
1595                        return;
1596                }
1597        }
1598
1599        datap = (struct ethhdr *) skb->data;
1600
1601        if (is_unicast_ether_addr(datap->h_dest)) {
1602                if (vif->nw_type == AP_NETWORK) {
1603                        conn = ath6kl_find_sta(vif, datap->h_source);
1604                        if (!conn)
1605                                return;
1606                        aggr_conn = conn->aggr_conn;
1607                } else {
1608                        aggr_conn = vif->aggr_cntxt->aggr_conn;
1609                }
1610
1611                if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1612                                          is_amsdu, skb)) {
1613                        /* aggregation code will handle the skb */
1614                        return;
1615                }
1616        } else if (!is_broadcast_ether_addr(datap->h_dest)) {
1617                vif->ndev->stats.multicast++;
1618        }
1619
1620        ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1621}
1622
1623static void aggr_timeout(struct timer_list *t)
1624{
1625        u8 i, j;
1626        struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer);
1627        struct rxtid *rxtid;
1628        struct rxtid_stats *stats;
1629
1630        for (i = 0; i < NUM_OF_TIDS; i++) {
1631                rxtid = &aggr_conn->rx_tid[i];
1632                stats = &aggr_conn->stat[i];
1633
1634                if (!rxtid->aggr || !rxtid->timer_mon)
1635                        continue;
1636
1637                stats->num_timeouts++;
1638                ath6kl_dbg(ATH6KL_DBG_AGGR,
1639                           "aggr timeout (st %d end %d)\n",
1640                           rxtid->seq_next,
1641                           ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1642                            ATH6KL_MAX_SEQ_NO));
1643                aggr_deque_frms(aggr_conn, i, 0, 0);
1644        }
1645
1646        aggr_conn->timer_scheduled = false;
1647
1648        for (i = 0; i < NUM_OF_TIDS; i++) {
1649                rxtid = &aggr_conn->rx_tid[i];
1650
1651                if (rxtid->aggr && rxtid->hold_q) {
1652                        spin_lock_bh(&rxtid->lock);
1653                        for (j = 0; j < rxtid->hold_q_sz; j++) {
1654                                if (rxtid->hold_q[j].skb) {
1655                                        aggr_conn->timer_scheduled = true;
1656                                        rxtid->timer_mon = true;
1657                                        break;
1658                                }
1659                        }
1660                        spin_unlock_bh(&rxtid->lock);
1661
1662                        if (j >= rxtid->hold_q_sz)
1663                                rxtid->timer_mon = false;
1664                }
1665        }
1666
1667        if (aggr_conn->timer_scheduled)
1668                mod_timer(&aggr_conn->timer,
1669                          jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1670}
1671
1672static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1673{
1674        struct rxtid *rxtid;
1675        struct rxtid_stats *stats;
1676
1677        if (!aggr_conn || tid >= NUM_OF_TIDS)
1678                return;
1679
1680        rxtid = &aggr_conn->rx_tid[tid];
1681        stats = &aggr_conn->stat[tid];
1682
1683        if (rxtid->aggr)
1684                aggr_deque_frms(aggr_conn, tid, 0, 0);
1685
1686        rxtid->aggr = false;
1687        rxtid->timer_mon = false;
1688        rxtid->win_sz = 0;
1689        rxtid->seq_next = 0;
1690        rxtid->hold_q_sz = 0;
1691
1692        kfree(rxtid->hold_q);
1693        rxtid->hold_q = NULL;
1694
1695        memset(stats, 0, sizeof(struct rxtid_stats));
1696}
1697
1698void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1699                             u8 win_sz)
1700{
1701        struct ath6kl_sta *sta;
1702        struct aggr_info_conn *aggr_conn = NULL;
1703        struct rxtid *rxtid;
1704        u16 hold_q_size;
1705        u8 tid, aid;
1706
1707        if (vif->nw_type == AP_NETWORK) {
1708                aid = ath6kl_get_aid(tid_mux);
1709                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1710                if (sta)
1711                        aggr_conn = sta->aggr_conn;
1712        } else {
1713                aggr_conn = vif->aggr_cntxt->aggr_conn;
1714        }
1715
1716        if (!aggr_conn)
1717                return;
1718
1719        tid = ath6kl_get_tid(tid_mux);
1720        if (tid >= NUM_OF_TIDS)
1721                return;
1722
1723        rxtid = &aggr_conn->rx_tid[tid];
1724
1725        if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1726                ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1727                           __func__, win_sz, tid);
1728
1729        if (rxtid->aggr)
1730                aggr_delete_tid_state(aggr_conn, tid);
1731
1732        rxtid->seq_next = seq_no;
1733        hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1734        rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1735        if (!rxtid->hold_q)
1736                return;
1737
1738        rxtid->win_sz = win_sz;
1739        rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1740        if (!skb_queue_empty(&rxtid->q))
1741                return;
1742
1743        rxtid->aggr = true;
1744}
1745
1746void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1747                    struct aggr_info_conn *aggr_conn)
1748{
1749        struct rxtid *rxtid;
1750        u8 i;
1751
1752        aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1753        aggr_conn->dev = vif->ndev;
1754        timer_setup(&aggr_conn->timer, aggr_timeout, 0);
1755        aggr_conn->aggr_info = aggr_info;
1756
1757        aggr_conn->timer_scheduled = false;
1758
1759        for (i = 0; i < NUM_OF_TIDS; i++) {
1760                rxtid = &aggr_conn->rx_tid[i];
1761                rxtid->aggr = false;
1762                rxtid->timer_mon = false;
1763                skb_queue_head_init(&rxtid->q);
1764                spin_lock_init(&rxtid->lock);
1765        }
1766}
1767
1768struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1769{
1770        struct aggr_info *p_aggr = NULL;
1771
1772        p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1773        if (!p_aggr) {
1774                ath6kl_err("failed to alloc memory for aggr_node\n");
1775                return NULL;
1776        }
1777
1778        p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1779        if (!p_aggr->aggr_conn) {
1780                ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1781                kfree(p_aggr);
1782                return NULL;
1783        }
1784
1785        aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1786
1787        skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1788        ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1789
1790        return p_aggr;
1791}
1792
1793void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1794{
1795        struct ath6kl_sta *sta;
1796        struct rxtid *rxtid;
1797        struct aggr_info_conn *aggr_conn = NULL;
1798        u8 tid, aid;
1799
1800        if (vif->nw_type == AP_NETWORK) {
1801                aid = ath6kl_get_aid(tid_mux);
1802                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1803                if (sta)
1804                        aggr_conn = sta->aggr_conn;
1805        } else {
1806                aggr_conn = vif->aggr_cntxt->aggr_conn;
1807        }
1808
1809        if (!aggr_conn)
1810                return;
1811
1812        tid = ath6kl_get_tid(tid_mux);
1813        if (tid >= NUM_OF_TIDS)
1814                return;
1815
1816        rxtid = &aggr_conn->rx_tid[tid];
1817
1818        if (rxtid->aggr)
1819                aggr_delete_tid_state(aggr_conn, tid);
1820}
1821
1822void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1823{
1824        u8 tid;
1825
1826        if (!aggr_conn)
1827                return;
1828
1829        if (aggr_conn->timer_scheduled) {
1830                del_timer(&aggr_conn->timer);
1831                aggr_conn->timer_scheduled = false;
1832        }
1833
1834        for (tid = 0; tid < NUM_OF_TIDS; tid++)
1835                aggr_delete_tid_state(aggr_conn, tid);
1836}
1837
1838/* clean up our amsdu buffer list */
1839void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1840{
1841        struct htc_packet *packet, *tmp_pkt;
1842
1843        spin_lock_bh(&ar->lock);
1844        if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1845                spin_unlock_bh(&ar->lock);
1846                return;
1847        }
1848
1849        list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1850                                 list) {
1851                list_del(&packet->list);
1852                spin_unlock_bh(&ar->lock);
1853                dev_kfree_skb(packet->pkt_cntxt);
1854                spin_lock_bh(&ar->lock);
1855        }
1856
1857        spin_unlock_bh(&ar->lock);
1858}
1859
1860void aggr_module_destroy(struct aggr_info *aggr_info)
1861{
1862        if (!aggr_info)
1863                return;
1864
1865        aggr_reset_state(aggr_info->aggr_conn);
1866        skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1867        kfree(aggr_info->aggr_conn);
1868        kfree(aggr_info);
1869}
1870