linux/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * The full GNU General Public License is included in this distribution
  22 * in the file called COPYING.
  23 *
  24 * Contact Information:
  25 *  Intel Linux Wireless <ilw@linux.intel.com>
  26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27 *
  28 * BSD LICENSE
  29 *
  30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  32 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  33 * All rights reserved.
  34 *
  35 * Redistribution and use in source and binary forms, with or without
  36 * modification, are permitted provided that the following conditions
  37 * are met:
  38 *
  39 *  * Redistributions of source code must retain the above copyright
  40 *    notice, this list of conditions and the following disclaimer.
  41 *  * Redistributions in binary form must reproduce the above copyright
  42 *    notice, this list of conditions and the following disclaimer in
  43 *    the documentation and/or other materials provided with the
  44 *    distribution.
  45 *  * Neither the name Intel Corporation nor the names of its
  46 *    contributors may be used to endorse or promote products derived
  47 *    from this software without specific prior written permission.
  48 *
  49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  60 *****************************************************************************/
  61#include <linux/etherdevice.h>
  62#include <linux/skbuff.h>
  63#include "iwl-trans.h"
  64#include "mvm.h"
  65#include "fw-api.h"
  66#include "fw-dbg.h"
  67
  68void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  69{
  70        mvm->ampdu_ref++;
  71
  72#ifdef CONFIG_IWLWIFI_DEBUGFS
  73        if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
  74                spin_lock(&mvm->drv_stats_lock);
  75                mvm->drv_rx_stats.ampdu_count++;
  76                spin_unlock(&mvm->drv_stats_lock);
  77        }
  78#endif
  79}
  80
  81static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
  82                                   int queue, struct ieee80211_sta *sta)
  83{
  84        struct iwl_mvm_sta *mvmsta;
  85        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  86        struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
  87        struct iwl_mvm_key_pn *ptk_pn;
  88        u8 tid, keyidx;
  89        u8 pn[IEEE80211_CCMP_PN_LEN];
  90        u8 *extiv;
  91
  92        /* do PN checking */
  93
  94        /* multicast and non-data only arrives on default queue */
  95        if (!ieee80211_is_data(hdr->frame_control) ||
  96            is_multicast_ether_addr(hdr->addr1))
  97                return 0;
  98
  99        /* do not check PN for open AP */
 100        if (!(stats->flag & RX_FLAG_DECRYPTED))
 101                return 0;
 102
 103        /*
 104         * avoid checking for default queue - we don't want to replicate
 105         * all the logic that's necessary for checking the PN on fragmented
 106         * frames, leave that to mac80211
 107         */
 108        if (queue == 0)
 109                return 0;
 110
 111        /* if we are here - this for sure is either CCMP or GCMP */
 112        if (IS_ERR_OR_NULL(sta)) {
 113                IWL_ERR(mvm,
 114                        "expected hw-decrypted unicast frame for station\n");
 115                return -1;
 116        }
 117
 118        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 119
 120        extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
 121        keyidx = extiv[3] >> 6;
 122
 123        ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
 124        if (!ptk_pn)
 125                return -1;
 126
 127        if (ieee80211_is_data_qos(hdr->frame_control))
 128                tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 129        else
 130                tid = 0;
 131
 132        /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
 133        if (tid >= IWL_MAX_TID_COUNT)
 134                return -1;
 135
 136        /* load pn */
 137        pn[0] = extiv[7];
 138        pn[1] = extiv[6];
 139        pn[2] = extiv[5];
 140        pn[3] = extiv[4];
 141        pn[4] = extiv[1];
 142        pn[5] = extiv[0];
 143
 144        if (memcmp(pn, ptk_pn->q[queue].pn[tid],
 145                   IEEE80211_CCMP_PN_LEN) <= 0)
 146                return -1;
 147
 148        memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
 149        stats->flag |= RX_FLAG_PN_VALIDATED;
 150
 151        return 0;
 152}
 153
 154/* iwl_mvm_create_skb Adds the rxb to a new skb */
 155static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
 156                               u16 len, u8 crypt_len,
 157                               struct iwl_rx_cmd_buffer *rxb)
 158{
 159        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 160        struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
 161        unsigned int headlen, fraglen, pad_len = 0;
 162        unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 163
 164        if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
 165                pad_len = 2;
 166        len -= pad_len;
 167
 168        /* If frame is small enough to fit in skb->head, pull it completely.
 169         * If not, only pull ieee80211_hdr (including crypto if present, and
 170         * an additional 8 bytes for SNAP/ethertype, see below) so that
 171         * splice() or TCP coalesce are more efficient.
 172         *
 173         * Since, in addition, ieee80211_data_to_8023() always pull in at
 174         * least 8 bytes (possibly more for mesh) we can do the same here
 175         * to save the cost of doing it later. That still doesn't pull in
 176         * the actual IP header since the typical case has a SNAP header.
 177         * If the latter changes (there are efforts in the standards group
 178         * to do so) we should revisit this and ieee80211_data_to_8023().
 179         */
 180        headlen = (len <= skb_tailroom(skb)) ? len :
 181                                               hdrlen + crypt_len + 8;
 182
 183        /* The firmware may align the packet to DWORD.
 184         * The padding is inserted after the IV.
 185         * After copying the header + IV skip the padding if
 186         * present before copying packet data.
 187         */
 188        hdrlen += crypt_len;
 189        memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
 190        memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
 191               headlen - hdrlen);
 192
 193        fraglen = len - headlen;
 194
 195        if (fraglen) {
 196                int offset = (void *)hdr + headlen + pad_len -
 197                             rxb_addr(rxb) + rxb_offset(rxb);
 198
 199                skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
 200                                fraglen, rxb->truesize);
 201        }
 202}
 203
 204/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
 205static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 206                                            struct napi_struct *napi,
 207                                            struct sk_buff *skb, int queue,
 208                                            struct ieee80211_sta *sta)
 209{
 210        if (iwl_mvm_check_pn(mvm, skb, queue, sta))
 211                kfree_skb(skb);
 212        else
 213                ieee80211_rx_napi(mvm->hw, skb, napi);
 214}
 215
 216static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
 217                                        struct iwl_rx_mpdu_desc *desc,
 218                                        struct ieee80211_rx_status *rx_status)
 219{
 220        int energy_a, energy_b, max_energy;
 221
 222        energy_a = desc->energy_a;
 223        energy_a = energy_a ? -energy_a : S8_MIN;
 224        energy_b = desc->energy_b;
 225        energy_b = energy_b ? -energy_b : S8_MIN;
 226        max_energy = max(energy_a, energy_b);
 227
 228        IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
 229                        energy_a, energy_b, max_energy);
 230
 231        rx_status->signal = max_energy;
 232        rx_status->chains = 0; /* TODO: phy info */
 233        rx_status->chain_signal[0] = energy_a;
 234        rx_status->chain_signal[1] = energy_b;
 235        rx_status->chain_signal[2] = S8_MIN;
 236}
 237
 238static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 239                             struct ieee80211_rx_status *stats,
 240                             struct iwl_rx_mpdu_desc *desc, int queue,
 241                             u8 *crypt_len)
 242{
 243        u16 status = le16_to_cpu(desc->status);
 244
 245        if (!ieee80211_has_protected(hdr->frame_control) ||
 246            (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
 247            IWL_RX_MPDU_STATUS_SEC_NONE)
 248                return 0;
 249
 250        /* TODO: handle packets encrypted with unknown alg */
 251
 252        switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
 253        case IWL_RX_MPDU_STATUS_SEC_CCM:
 254        case IWL_RX_MPDU_STATUS_SEC_GCM:
 255                BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
 256                /* alg is CCM: check MIC only */
 257                if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
 258                        return -1;
 259
 260                stats->flag |= RX_FLAG_DECRYPTED;
 261                *crypt_len = IEEE80211_CCMP_HDR_LEN;
 262                return 0;
 263        case IWL_RX_MPDU_STATUS_SEC_TKIP:
 264                /* Don't drop the frame and decrypt it in SW */
 265                if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
 266                        return 0;
 267
 268                *crypt_len = IEEE80211_TKIP_IV_LEN;
 269                /* fall through if TTAK OK */
 270        case IWL_RX_MPDU_STATUS_SEC_WEP:
 271                if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
 272                        return -1;
 273
 274                stats->flag |= RX_FLAG_DECRYPTED;
 275                if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
 276                                IWL_RX_MPDU_STATUS_SEC_WEP)
 277                        *crypt_len = IEEE80211_WEP_IV_LEN;
 278                return 0;
 279        case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
 280                if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
 281                        return -1;
 282                stats->flag |= RX_FLAG_DECRYPTED;
 283                return 0;
 284        default:
 285                IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
 286        }
 287
 288        return 0;
 289}
 290
 291static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
 292                            struct sk_buff *skb,
 293                            struct iwl_rx_mpdu_desc *desc)
 294{
 295        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 296        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 297
 298        if (mvmvif->features & NETIF_F_RXCSUM &&
 299            desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
 300            desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
 301                skb->ip_summed = CHECKSUM_UNNECESSARY;
 302}
 303
 304/*
 305 * returns true if a packet outside BA session is a duplicate and
 306 * should be dropped
 307 */
 308static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
 309                                  struct ieee80211_rx_status *rx_status,
 310                                  struct ieee80211_hdr *hdr,
 311                                  struct iwl_rx_mpdu_desc *desc)
 312{
 313        struct iwl_mvm_sta *mvm_sta;
 314        struct iwl_mvm_rxq_dup_data *dup_data;
 315        u8 baid, tid, sub_frame_idx;
 316
 317        if (WARN_ON(IS_ERR_OR_NULL(sta)))
 318                return false;
 319
 320        baid = (le32_to_cpu(desc->reorder_data) &
 321                IWL_RX_MPDU_REORDER_BAID_MASK) >>
 322                IWL_RX_MPDU_REORDER_BAID_SHIFT;
 323
 324        if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
 325                return false;
 326
 327        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 328        dup_data = &mvm_sta->dup_data[queue];
 329
 330        /*
 331         * Drop duplicate 802.11 retransmissions
 332         * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
 333         */
 334        if (ieee80211_is_ctl(hdr->frame_control) ||
 335            ieee80211_is_qos_nullfunc(hdr->frame_control) ||
 336            is_multicast_ether_addr(hdr->addr1)) {
 337                rx_status->flag |= RX_FLAG_DUP_VALIDATED;
 338                return false;
 339        }
 340
 341        if (ieee80211_is_data_qos(hdr->frame_control))
 342                /* frame has qos control */
 343                tid = *ieee80211_get_qos_ctl(hdr) &
 344                        IEEE80211_QOS_CTL_TID_MASK;
 345        else
 346                tid = IWL_MAX_TID_COUNT;
 347
 348        /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
 349        sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
 350
 351        if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
 352                     dup_data->last_seq[tid] == hdr->seq_ctrl &&
 353                     dup_data->last_sub_frame[tid] >= sub_frame_idx))
 354                return true;
 355
 356        dup_data->last_seq[tid] = hdr->seq_ctrl;
 357        dup_data->last_sub_frame[tid] = sub_frame_idx;
 358
 359        rx_status->flag |= RX_FLAG_DUP_VALIDATED;
 360
 361        return false;
 362}
 363
 364int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
 365                            const u8 *data, u32 count)
 366{
 367        struct iwl_rxq_sync_cmd *cmd;
 368        u32 data_size = sizeof(*cmd) + count;
 369        int ret;
 370
 371        /* should be DWORD aligned */
 372        if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
 373                return -EINVAL;
 374
 375        cmd = kzalloc(data_size, GFP_KERNEL);
 376        if (!cmd)
 377                return -ENOMEM;
 378
 379        cmd->rxq_mask = cpu_to_le32(rxq_mask);
 380        cmd->count =  cpu_to_le32(count);
 381        cmd->flags = 0;
 382        memcpy(cmd->payload, data, count);
 383
 384        ret = iwl_mvm_send_cmd_pdu(mvm,
 385                                   WIDE_ID(DATA_PATH_GROUP,
 386                                           TRIGGER_RX_QUEUES_NOTIF_CMD),
 387                                   0, data_size, cmd);
 388
 389        kfree(cmd);
 390        return ret;
 391}
 392
 393void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 394                            int queue)
 395{
 396        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 397        struct iwl_rxq_sync_notification *notif;
 398        struct iwl_mvm_internal_rxq_notif *internal_notif;
 399
 400        notif = (void *)pkt->data;
 401        internal_notif = (void *)notif->payload;
 402
 403        switch (internal_notif->type) {
 404        case IWL_MVM_RXQ_NOTIF_DEL_BA:
 405                /* TODO */
 406                break;
 407        default:
 408                WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
 409        }
 410}
 411
 412void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 413                        struct iwl_rx_cmd_buffer *rxb, int queue)
 414{
 415        struct ieee80211_rx_status *rx_status;
 416        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 417        struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
 418        struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
 419        u32 len = le16_to_cpu(desc->mpdu_len);
 420        u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
 421        struct ieee80211_sta *sta = NULL;
 422        struct sk_buff *skb;
 423        u8 crypt_len = 0;
 424
 425        /* Dont use dev_alloc_skb(), we'll have enough headroom once
 426         * ieee80211_hdr pulled.
 427         */
 428        skb = alloc_skb(128, GFP_ATOMIC);
 429        if (!skb) {
 430                IWL_ERR(mvm, "alloc_skb failed\n");
 431                return;
 432        }
 433
 434        rx_status = IEEE80211_SKB_RXCB(skb);
 435
 436        if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) {
 437                kfree_skb(skb);
 438                return;
 439        }
 440
 441        /*
 442         * Keep packets with CRC errors (and with overrun) for monitor mode
 443         * (otherwise the firmware discards them) but mark them as bad.
 444         */
 445        if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
 446            !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
 447                IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
 448                             le16_to_cpu(desc->status));
 449                rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
 450        }
 451
 452        rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
 453        rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
 454        rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
 455                                               IEEE80211_BAND_2GHZ;
 456        rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
 457                                                         rx_status->band);
 458        iwl_mvm_get_signal_strength(mvm, desc, rx_status);
 459        /* TSF as indicated by the firmware is at INA time */
 460        rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
 461
 462        rcu_read_lock();
 463
 464        if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
 465                u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
 466
 467                if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
 468                        sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
 469                        if (IS_ERR(sta))
 470                                sta = NULL;
 471                }
 472        } else if (!is_multicast_ether_addr(hdr->addr2)) {
 473                /*
 474                 * This is fine since we prevent two stations with the same
 475                 * address from being added.
 476                 */
 477                sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
 478        }
 479
 480        if (sta) {
 481                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 482
 483                /*
 484                 * We have tx blocked stations (with CS bit). If we heard
 485                 * frames from a blocked station on a new channel we can
 486                 * TX to it again.
 487                 */
 488                if (unlikely(mvm->csa_tx_block_bcn_timeout))
 489                        iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
 490
 491                rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
 492
 493                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
 494                    ieee80211_is_beacon(hdr->frame_control)) {
 495                        struct iwl_fw_dbg_trigger_tlv *trig;
 496                        struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
 497                        bool trig_check;
 498                        s32 rssi;
 499
 500                        trig = iwl_fw_dbg_get_trigger(mvm->fw,
 501                                                      FW_DBG_TRIGGER_RSSI);
 502                        rssi_trig = (void *)trig->data;
 503                        rssi = le32_to_cpu(rssi_trig->rssi);
 504
 505                        trig_check =
 506                                iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
 507                                                              trig);
 508                        if (trig_check && rx_status->signal < rssi)
 509                                iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
 510                }
 511
 512                /* TODO: multi queue TCM */
 513
 514                if (ieee80211_is_data(hdr->frame_control))
 515                        iwl_mvm_rx_csum(sta, skb, desc);
 516
 517                if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
 518                        kfree_skb(skb);
 519                        rcu_read_unlock();
 520                        return;
 521                }
 522
 523                /*
 524                 * Our hardware de-aggregates AMSDUs but copies the mac header
 525                 * as it to the de-aggregated MPDUs. We need to turn off the
 526                 * AMSDU bit in the QoS control ourselves.
 527                 */
 528                if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
 529                    !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
 530                        u8 *qc = ieee80211_get_qos_ctl(hdr);
 531
 532                        *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 533                }
 534        }
 535
 536        /*
 537         * TODO: PHY info.
 538         * Verify we don't have the information in the MPDU descriptor and
 539         * that it is not needed.
 540         * Make sure for monitor mode that we are on default queue, update
 541         * ampdu_ref and the rest of phy info then
 542         */
 543
 544        /* Set up the HT phy flags */
 545        switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
 546        case RATE_MCS_CHAN_WIDTH_20:
 547                break;
 548        case RATE_MCS_CHAN_WIDTH_40:
 549                rx_status->flag |= RX_FLAG_40MHZ;
 550                break;
 551        case RATE_MCS_CHAN_WIDTH_80:
 552                rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
 553                break;
 554        case RATE_MCS_CHAN_WIDTH_160:
 555                rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
 556                break;
 557        }
 558        if (rate_n_flags & RATE_MCS_SGI_MSK)
 559                rx_status->flag |= RX_FLAG_SHORT_GI;
 560        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 561                rx_status->flag |= RX_FLAG_HT_GF;
 562        if (rate_n_flags & RATE_MCS_LDPC_MSK)
 563                rx_status->flag |= RX_FLAG_LDPC;
 564        if (rate_n_flags & RATE_MCS_HT_MSK) {
 565                u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
 566                                RATE_MCS_STBC_POS;
 567                rx_status->flag |= RX_FLAG_HT;
 568                rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
 569                rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
 570        } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
 571                u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
 572                                RATE_MCS_STBC_POS;
 573                rx_status->vht_nss =
 574                        ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
 575                                                RATE_VHT_MCS_NSS_POS) + 1;
 576                rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
 577                rx_status->flag |= RX_FLAG_VHT;
 578                rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
 579                if (rate_n_flags & RATE_MCS_BF_MSK)
 580                        rx_status->vht_flag |= RX_VHT_FLAG_BF;
 581        } else {
 582                rx_status->rate_idx =
 583                        iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
 584                                                            rx_status->band);
 585        }
 586
 587        /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
 588        /* TODO: PHY info - gscan */
 589
 590        iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
 591        iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
 592        rcu_read_unlock();
 593}
 594
 595void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
 596                              struct iwl_rx_cmd_buffer *rxb, int queue)
 597{
 598        /* TODO */
 599}
 600