linux/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24 * USA
  25 *
  26 * The full GNU General Public License is included in this distribution
  27 * in the file called COPYING.
  28 *
  29 * Contact Information:
  30 *  Intel Linux Wireless <linuxwifi@intel.com>
  31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32 *
  33 * BSD LICENSE
  34 *
  35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37 * Copyright(c) 2016 Intel Deutschland GmbH
  38 * All rights reserved.
  39 *
  40 * Redistribution and use in source and binary forms, with or without
  41 * modification, are permitted provided that the following conditions
  42 * are met:
  43 *
  44 *  * Redistributions of source code must retain the above copyright
  45 *    notice, this list of conditions and the following disclaimer.
  46 *  * Redistributions in binary form must reproduce the above copyright
  47 *    notice, this list of conditions and the following disclaimer in
  48 *    the documentation and/or other materials provided with the
  49 *    distribution.
  50 *  * Neither the name Intel Corporation nor the names of its
  51 *    contributors may be used to endorse or promote products derived
  52 *    from this software without specific prior written permission.
  53 *
  54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65 *
  66 *****************************************************************************/
  67#include <net/mac80211.h>
  68
  69#include "mvm.h"
  70#include "sta.h"
  71#include "rs.h"
  72
  73/*
  74 * New version of ADD_STA_sta command added new fields at the end of the
  75 * structure, so sending the size of the relevant API's structure is enough to
  76 * support both API versions.
  77 */
  78static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
  79{
  80        return iwl_mvm_has_new_rx_api(mvm) ?
  81                sizeof(struct iwl_mvm_add_sta_cmd) :
  82                sizeof(struct iwl_mvm_add_sta_cmd_v7);
  83}
  84
  85static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
  86                                    enum nl80211_iftype iftype)
  87{
  88        int sta_id;
  89        u32 reserved_ids = 0;
  90
  91        BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
  92        WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
  93
  94        lockdep_assert_held(&mvm->mutex);
  95
  96        /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
  97        if (iftype != NL80211_IFTYPE_STATION)
  98                reserved_ids = BIT(0);
  99
 100        /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
 101        for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
 102                if (BIT(sta_id) & reserved_ids)
 103                        continue;
 104
 105                if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 106                                               lockdep_is_held(&mvm->mutex)))
 107                        return sta_id;
 108        }
 109        return IWL_MVM_STATION_COUNT;
 110}
 111
 112/* send station add/update command to firmware */
 113int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 114                           bool update, unsigned int flags)
 115{
 116        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 117        struct iwl_mvm_add_sta_cmd add_sta_cmd = {
 118                .sta_id = mvm_sta->sta_id,
 119                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
 120                .add_modify = update ? 1 : 0,
 121                .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
 122                                                 STA_FLG_MIMO_EN_MSK),
 123                .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
 124        };
 125        int ret;
 126        u32 status;
 127        u32 agg_size = 0, mpdu_dens = 0;
 128
 129        if (!update || (flags & STA_MODIFY_QUEUES)) {
 130                add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
 131                memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
 132
 133                if (flags & STA_MODIFY_QUEUES)
 134                        add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
 135        }
 136
 137        switch (sta->bandwidth) {
 138        case IEEE80211_STA_RX_BW_160:
 139                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
 140                /* fall through */
 141        case IEEE80211_STA_RX_BW_80:
 142                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
 143                /* fall through */
 144        case IEEE80211_STA_RX_BW_40:
 145                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
 146                /* fall through */
 147        case IEEE80211_STA_RX_BW_20:
 148                if (sta->ht_cap.ht_supported)
 149                        add_sta_cmd.station_flags |=
 150                                cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
 151                break;
 152        }
 153
 154        switch (sta->rx_nss) {
 155        case 1:
 156                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 157                break;
 158        case 2:
 159                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
 160                break;
 161        case 3 ... 8:
 162                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
 163                break;
 164        }
 165
 166        switch (sta->smps_mode) {
 167        case IEEE80211_SMPS_AUTOMATIC:
 168        case IEEE80211_SMPS_NUM_MODES:
 169                WARN_ON(1);
 170                break;
 171        case IEEE80211_SMPS_STATIC:
 172                /* override NSS */
 173                add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
 174                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
 175                break;
 176        case IEEE80211_SMPS_DYNAMIC:
 177                add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
 178                break;
 179        case IEEE80211_SMPS_OFF:
 180                /* nothing */
 181                break;
 182        }
 183
 184        if (sta->ht_cap.ht_supported) {
 185                add_sta_cmd.station_flags_msk |=
 186                        cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
 187                                    STA_FLG_AGG_MPDU_DENS_MSK);
 188
 189                mpdu_dens = sta->ht_cap.ampdu_density;
 190        }
 191
 192        if (sta->vht_cap.vht_supported) {
 193                agg_size = sta->vht_cap.cap &
 194                        IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 195                agg_size >>=
 196                        IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 197        } else if (sta->ht_cap.ht_supported) {
 198                agg_size = sta->ht_cap.ampdu_factor;
 199        }
 200
 201        add_sta_cmd.station_flags |=
 202                cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
 203        add_sta_cmd.station_flags |=
 204                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 205
 206        status = ADD_STA_SUCCESS;
 207        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 208                                          iwl_mvm_add_sta_cmd_size(mvm),
 209                                          &add_sta_cmd, &status);
 210        if (ret)
 211                return ret;
 212
 213        switch (status & IWL_ADD_STA_STATUS_MASK) {
 214        case ADD_STA_SUCCESS:
 215                IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
 216                break;
 217        default:
 218                ret = -EIO;
 219                IWL_ERR(mvm, "ADD_STA failed\n");
 220                break;
 221        }
 222
 223        return ret;
 224}
 225
 226static void iwl_mvm_rx_agg_session_expired(unsigned long data)
 227{
 228        struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
 229        struct iwl_mvm_baid_data *ba_data;
 230        struct ieee80211_sta *sta;
 231        struct iwl_mvm_sta *mvm_sta;
 232        unsigned long timeout;
 233
 234        rcu_read_lock();
 235
 236        ba_data = rcu_dereference(*rcu_ptr);
 237
 238        if (WARN_ON(!ba_data))
 239                goto unlock;
 240
 241        if (!ba_data->timeout)
 242                goto unlock;
 243
 244        timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
 245        if (time_is_after_jiffies(timeout)) {
 246                mod_timer(&ba_data->session_timer, timeout);
 247                goto unlock;
 248        }
 249
 250        /* Timer expired */
 251        sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
 252        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 253        ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
 254                                          sta->addr, ba_data->tid);
 255unlock:
 256        rcu_read_unlock();
 257}
 258
 259static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
 260                                 struct ieee80211_sta *sta)
 261{
 262        unsigned long used_hw_queues;
 263        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 264        unsigned int wdg_timeout =
 265                iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
 266        u32 ac;
 267
 268        lockdep_assert_held(&mvm->mutex);
 269
 270        used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
 271
 272        /* Find available queues, and allocate them to the ACs */
 273        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 274                u8 queue = find_first_zero_bit(&used_hw_queues,
 275                                               mvm->first_agg_queue);
 276
 277                if (queue >= mvm->first_agg_queue) {
 278                        IWL_ERR(mvm, "Failed to allocate STA queue\n");
 279                        return -EBUSY;
 280                }
 281
 282                __set_bit(queue, &used_hw_queues);
 283                mvmsta->hw_queue[ac] = queue;
 284        }
 285
 286        /* Found a place for all queues - enable them */
 287        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 288                iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
 289                                      mvmsta->hw_queue[ac],
 290                                      iwl_mvm_ac_to_tx_fifo[ac], 0,
 291                                      wdg_timeout);
 292                mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
 293        }
 294
 295        return 0;
 296}
 297
 298static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
 299                                    struct ieee80211_sta *sta)
 300{
 301        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 302        unsigned long sta_msk;
 303        int i;
 304
 305        lockdep_assert_held(&mvm->mutex);
 306
 307        /* disable the TDLS STA-specific queues */
 308        sta_msk = mvmsta->tfd_queue_msk;
 309        for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
 310                iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 311}
 312
 313/* Disable aggregations for a bitmap of TIDs for a given station */
 314static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
 315                                        unsigned long disable_agg_tids,
 316                                        bool remove_queue)
 317{
 318        struct iwl_mvm_add_sta_cmd cmd = {};
 319        struct ieee80211_sta *sta;
 320        struct iwl_mvm_sta *mvmsta;
 321        u32 status;
 322        u8 sta_id;
 323        int ret;
 324
 325        spin_lock_bh(&mvm->queue_info_lock);
 326        sta_id = mvm->queue_info[queue].ra_sta_id;
 327        spin_unlock_bh(&mvm->queue_info_lock);
 328
 329        rcu_read_lock();
 330
 331        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 332
 333        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 334                rcu_read_unlock();
 335                return -EINVAL;
 336        }
 337
 338        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 339
 340        mvmsta->tid_disable_agg |= disable_agg_tids;
 341
 342        cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 343        cmd.sta_id = mvmsta->sta_id;
 344        cmd.add_modify = STA_MODE_MODIFY;
 345        cmd.modify_mask = STA_MODIFY_QUEUES;
 346        if (disable_agg_tids)
 347                cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
 348        if (remove_queue)
 349                cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
 350        cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 351        cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 352
 353        rcu_read_unlock();
 354
 355        /* Notify FW of queue removal from the STA queues */
 356        status = ADD_STA_SUCCESS;
 357        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 358                                          iwl_mvm_add_sta_cmd_size(mvm),
 359                                          &cmd, &status);
 360
 361        return ret;
 362}
 363
 364static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 365{
 366        struct ieee80211_sta *sta;
 367        struct iwl_mvm_sta *mvmsta;
 368        unsigned long tid_bitmap;
 369        unsigned long agg_tids = 0;
 370        s8 sta_id;
 371        int tid;
 372
 373        lockdep_assert_held(&mvm->mutex);
 374
 375        spin_lock_bh(&mvm->queue_info_lock);
 376        sta_id = mvm->queue_info[queue].ra_sta_id;
 377        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 378        spin_unlock_bh(&mvm->queue_info_lock);
 379
 380        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 381                                        lockdep_is_held(&mvm->mutex));
 382
 383        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 384                return -EINVAL;
 385
 386        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 387
 388        spin_lock_bh(&mvmsta->lock);
 389        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 390                if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 391                        agg_tids |= BIT(tid);
 392        }
 393        spin_unlock_bh(&mvmsta->lock);
 394
 395        return agg_tids;
 396}
 397
 398/*
 399 * Remove a queue from a station's resources.
 400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
 401 * doesn't disable the queue
 402 */
 403static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 404{
 405        struct ieee80211_sta *sta;
 406        struct iwl_mvm_sta *mvmsta;
 407        unsigned long tid_bitmap;
 408        unsigned long disable_agg_tids = 0;
 409        u8 sta_id;
 410        int tid;
 411
 412        lockdep_assert_held(&mvm->mutex);
 413
 414        spin_lock_bh(&mvm->queue_info_lock);
 415        sta_id = mvm->queue_info[queue].ra_sta_id;
 416        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 417        spin_unlock_bh(&mvm->queue_info_lock);
 418
 419        rcu_read_lock();
 420
 421        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 422
 423        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 424                rcu_read_unlock();
 425                return 0;
 426        }
 427
 428        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 429
 430        spin_lock_bh(&mvmsta->lock);
 431        /* Unmap MAC queues and TIDs from this queue */
 432        for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
 433                if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 434                        disable_agg_tids |= BIT(tid);
 435                mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
 436        }
 437
 438        mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
 439        spin_unlock_bh(&mvmsta->lock);
 440
 441        rcu_read_unlock();
 442
 443        spin_lock_bh(&mvm->queue_info_lock);
 444        /* Unmap MAC queues and TIDs from this queue */
 445        mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
 446        mvm->queue_info[queue].hw_queue_refcount = 0;
 447        mvm->queue_info[queue].tid_bitmap = 0;
 448        spin_unlock_bh(&mvm->queue_info_lock);
 449
 450        return disable_agg_tids;
 451}
 452
 453static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
 454                                    unsigned long tfd_queue_mask, u8 ac)
 455{
 456        int queue = 0;
 457        u8 ac_to_queue[IEEE80211_NUM_ACS];
 458        int i;
 459
 460        lockdep_assert_held(&mvm->queue_info_lock);
 461
 462        memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
 463
 464        /* See what ACs the existing queues for this STA have */
 465        for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
 466                /* Only DATA queues can be shared */
 467                if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 468                    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
 469                        continue;
 470
 471                /* Don't try and take queues being reconfigured */
 472                if (mvm->queue_info[queue].status ==
 473                    IWL_MVM_QUEUE_RECONFIGURING)
 474                        continue;
 475
 476                ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
 477        }
 478
 479        /*
 480         * The queue to share is chosen only from DATA queues as follows (in
 481         * descending priority):
 482         * 1. An AC_BE queue
 483         * 2. Same AC queue
 484         * 3. Highest AC queue that is lower than new AC
 485         * 4. Any existing AC (there always is at least 1 DATA queue)
 486         */
 487
 488        /* Priority 1: An AC_BE queue */
 489        if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
 490                queue = ac_to_queue[IEEE80211_AC_BE];
 491        /* Priority 2: Same AC queue */
 492        else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
 493                queue = ac_to_queue[ac];
 494        /* Priority 3a: If new AC is VO and VI exists - use VI */
 495        else if (ac == IEEE80211_AC_VO &&
 496                 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 497                queue = ac_to_queue[IEEE80211_AC_VI];
 498        /* Priority 3b: No BE so only AC less than the new one is BK */
 499        else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
 500                queue = ac_to_queue[IEEE80211_AC_BK];
 501        /* Priority 4a: No BE nor BK - use VI if exists */
 502        else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
 503                queue = ac_to_queue[IEEE80211_AC_VI];
 504        /* Priority 4b: No BE, BK nor VI - use VO if exists */
 505        else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
 506                queue = ac_to_queue[IEEE80211_AC_VO];
 507
 508        /* Make sure queue found (or not) is legal */
 509        if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
 510            !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
 511            (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
 512                IWL_ERR(mvm, "No DATA queues available to share\n");
 513                return -ENOSPC;
 514        }
 515
 516        /* Make sure the queue isn't in the middle of being reconfigured */
 517        if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
 518                IWL_ERR(mvm,
 519                        "TXQ %d is in the middle of re-config - try again\n",
 520                        queue);
 521                return -EBUSY;
 522        }
 523
 524        return queue;
 525}
 526
 527/*
 528 * If a given queue has a higher AC than the TID stream that is being compared
 529 * to, the queue needs to be redirected to the lower AC. This function does that
 530 * in such a case, otherwise - if no redirection required - it does nothing,
 531 * unless the %force param is true.
 532 */
 533int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
 534                               int ac, int ssn, unsigned int wdg_timeout,
 535                               bool force)
 536{
 537        struct iwl_scd_txq_cfg_cmd cmd = {
 538                .scd_queue = queue,
 539                .action = SCD_CFG_DISABLE_QUEUE,
 540        };
 541        bool shared_queue;
 542        unsigned long mq;
 543        int ret;
 544
 545        /*
 546         * If the AC is lower than current one - FIFO needs to be redirected to
 547         * the lowest one of the streams in the queue. Check if this is needed
 548         * here.
 549         * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
 550         * value 3 and VO with value 0, so to check if ac X is lower than ac Y
 551         * we need to check if the numerical value of X is LARGER than of Y.
 552         */
 553        spin_lock_bh(&mvm->queue_info_lock);
 554        if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
 555                spin_unlock_bh(&mvm->queue_info_lock);
 556
 557                IWL_DEBUG_TX_QUEUES(mvm,
 558                                    "No redirection needed on TXQ #%d\n",
 559                                    queue);
 560                return 0;
 561        }
 562
 563        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 564        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 565        cmd.tid = mvm->queue_info[queue].txq_tid;
 566        mq = mvm->queue_info[queue].hw_queue_to_mac80211;
 567        shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
 568        spin_unlock_bh(&mvm->queue_info_lock);
 569
 570        IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
 571                            queue, iwl_mvm_ac_to_tx_fifo[ac]);
 572
 573        /* Stop MAC queues and wait for this queue to empty */
 574        iwl_mvm_stop_mac_queues(mvm, mq);
 575        ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
 576        if (ret) {
 577                IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
 578                        queue);
 579                ret = -EIO;
 580                goto out;
 581        }
 582
 583        /* Before redirecting the queue we need to de-activate it */
 584        iwl_trans_txq_disable(mvm->trans, queue, false);
 585        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 586        if (ret)
 587                IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
 588                        ret);
 589
 590        /* Make sure the SCD wrptr is correctly set before reconfiguring */
 591        iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
 592
 593        /* Update the TID "owner" of the queue */
 594        spin_lock_bh(&mvm->queue_info_lock);
 595        mvm->queue_info[queue].txq_tid = tid;
 596        spin_unlock_bh(&mvm->queue_info_lock);
 597
 598        /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 599
 600        /* Redirect to lower AC */
 601        iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
 602                             cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
 603                             ssn);
 604
 605        /* Update AC marking of the queue */
 606        spin_lock_bh(&mvm->queue_info_lock);
 607        mvm->queue_info[queue].mac80211_ac = ac;
 608        spin_unlock_bh(&mvm->queue_info_lock);
 609
 610        /*
 611         * Mark queue as shared in transport if shared
 612         * Note this has to be done after queue enablement because enablement
 613         * can also set this value, and there is no indication there to shared
 614         * queues
 615         */
 616        if (shared_queue)
 617                iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 618
 619out:
 620        /* Continue using the MAC queues */
 621        iwl_mvm_start_mac_queues(mvm, mq);
 622
 623        return ret;
 624}
 625
 626static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
 627                                   struct ieee80211_sta *sta, u8 ac, int tid,
 628                                   struct ieee80211_hdr *hdr)
 629{
 630        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 631        struct iwl_trans_txq_scd_cfg cfg = {
 632                .fifo = iwl_mvm_ac_to_tx_fifo[ac],
 633                .sta_id = mvmsta->sta_id,
 634                .tid = tid,
 635                .frame_limit = IWL_FRAME_LIMIT,
 636        };
 637        unsigned int wdg_timeout =
 638                iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 639        u8 mac_queue = mvmsta->vif->hw_queue[ac];
 640        int queue = -1;
 641        bool using_inactive_queue = false;
 642        unsigned long disable_agg_tids = 0;
 643        enum iwl_mvm_agg_state queue_state;
 644        bool shared_queue = false;
 645        int ssn;
 646        unsigned long tfd_queue_mask;
 647        int ret;
 648
 649        lockdep_assert_held(&mvm->mutex);
 650
 651        spin_lock_bh(&mvmsta->lock);
 652        tfd_queue_mask = mvmsta->tfd_queue_msk;
 653        spin_unlock_bh(&mvmsta->lock);
 654
 655        spin_lock_bh(&mvm->queue_info_lock);
 656
 657        /*
 658         * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
 659         * exists
 660         */
 661        if (!ieee80211_is_data_qos(hdr->frame_control) ||
 662            ieee80211_is_qos_nullfunc(hdr->frame_control)) {
 663                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 664                                                IWL_MVM_DQA_MIN_MGMT_QUEUE,
 665                                                IWL_MVM_DQA_MAX_MGMT_QUEUE);
 666                if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
 667                        IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
 668                                            queue);
 669
 670                /* If no such queue is found, we'll use a DATA queue instead */
 671        }
 672
 673        if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
 674            (mvm->queue_info[mvmsta->reserved_queue].status ==
 675             IWL_MVM_QUEUE_RESERVED ||
 676             mvm->queue_info[mvmsta->reserved_queue].status ==
 677             IWL_MVM_QUEUE_INACTIVE)) {
 678                queue = mvmsta->reserved_queue;
 679                mvm->queue_info[queue].reserved = true;
 680                IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
 681        }
 682
 683        if (queue < 0)
 684                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 685                                                IWL_MVM_DQA_MIN_DATA_QUEUE,
 686                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
 687
 688        /*
 689         * Check if this queue is already allocated but inactive.
 690         * In such a case, we'll need to first free this queue before enabling
 691         * it again, so we'll mark it as reserved to make sure no new traffic
 692         * arrives on it
 693         */
 694        if (queue > 0 &&
 695            mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
 696                mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
 697                using_inactive_queue = true;
 698                IWL_DEBUG_TX_QUEUES(mvm,
 699                                    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
 700                                    queue, mvmsta->sta_id, tid);
 701        }
 702
 703        /* No free queue - we'll have to share */
 704        if (queue <= 0) {
 705                queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
 706                if (queue > 0) {
 707                        shared_queue = true;
 708                        mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
 709                }
 710        }
 711
 712        /*
 713         * Mark TXQ as ready, even though it hasn't been fully configured yet,
 714         * to make sure no one else takes it.
 715         * This will allow avoiding re-acquiring the lock at the end of the
 716         * configuration. On error we'll mark it back as free.
 717         */
 718        if ((queue > 0) && !shared_queue)
 719                mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 720
 721        spin_unlock_bh(&mvm->queue_info_lock);
 722
 723        /* This shouldn't happen - out of queues */
 724        if (WARN_ON(queue <= 0)) {
 725                IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
 726                        tid, cfg.sta_id);
 727                return queue;
 728        }
 729
 730        /*
 731         * Actual en/disablement of aggregations is through the ADD_STA HCMD,
 732         * but for configuring the SCD to send A-MPDUs we need to mark the queue
 733         * as aggregatable.
 734         * Mark all DATA queues as allowing to be aggregated at some point
 735         */
 736        cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
 737                         queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
 738
 739        /*
 740         * If this queue was previously inactive (idle) - we need to free it
 741         * first
 742         */
 743        if (using_inactive_queue) {
 744                struct iwl_scd_txq_cfg_cmd cmd = {
 745                        .scd_queue = queue,
 746                        .action = SCD_CFG_DISABLE_QUEUE,
 747                };
 748                u8 txq_curr_ac;
 749
 750                disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
 751
 752                spin_lock_bh(&mvm->queue_info_lock);
 753                txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
 754                cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 755                cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
 756                cmd.tid = mvm->queue_info[queue].txq_tid;
 757                spin_unlock_bh(&mvm->queue_info_lock);
 758
 759                /* Disable the queue */
 760                if (disable_agg_tids)
 761                        iwl_mvm_invalidate_sta_queue(mvm, queue,
 762                                                     disable_agg_tids, false);
 763                iwl_trans_txq_disable(mvm->trans, queue, false);
 764                ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
 765                                           &cmd);
 766                if (ret) {
 767                        IWL_ERR(mvm,
 768                                "Failed to free inactive queue %d (ret=%d)\n",
 769                                queue, ret);
 770
 771                        /* Re-mark the inactive queue as inactive */
 772                        spin_lock_bh(&mvm->queue_info_lock);
 773                        mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
 774                        spin_unlock_bh(&mvm->queue_info_lock);
 775
 776                        return ret;
 777                }
 778
 779                /* If TXQ is allocated to another STA, update removal in FW */
 780                if (cmd.sta_id != mvmsta->sta_id)
 781                        iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
 782        }
 783
 784        IWL_DEBUG_TX_QUEUES(mvm,
 785                            "Allocating %squeue #%d to sta %d on tid %d\n",
 786                            shared_queue ? "shared " : "", queue,
 787                            mvmsta->sta_id, tid);
 788
 789        if (shared_queue) {
 790                /* Disable any open aggs on this queue */
 791                disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
 792
 793                if (disable_agg_tids) {
 794                        IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
 795                                            queue);
 796                        iwl_mvm_invalidate_sta_queue(mvm, queue,
 797                                                     disable_agg_tids, false);
 798                }
 799        }
 800
 801        ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
 802        iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
 803                           wdg_timeout);
 804
 805        /*
 806         * Mark queue as shared in transport if shared
 807         * Note this has to be done after queue enablement because enablement
 808         * can also set this value, and there is no indication there to shared
 809         * queues
 810         */
 811        if (shared_queue)
 812                iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 813
 814        spin_lock_bh(&mvmsta->lock);
 815        mvmsta->tid_data[tid].txq_id = queue;
 816        mvmsta->tid_data[tid].is_tid_active = true;
 817        mvmsta->tfd_queue_msk |= BIT(queue);
 818        queue_state = mvmsta->tid_data[tid].state;
 819
 820        if (mvmsta->reserved_queue == queue)
 821                mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
 822        spin_unlock_bh(&mvmsta->lock);
 823
 824        if (!shared_queue) {
 825                ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
 826                if (ret)
 827                        goto out_err;
 828
 829                /* If we need to re-enable aggregations... */
 830                if (queue_state == IWL_AGG_ON) {
 831                        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 832                        if (ret)
 833                                goto out_err;
 834                }
 835        } else {
 836                /* Redirect queue, if needed */
 837                ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
 838                                                 wdg_timeout, false);
 839                if (ret)
 840                        goto out_err;
 841        }
 842
 843        return 0;
 844
 845out_err:
 846        iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
 847
 848        return ret;
 849}
 850
 851static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
 852{
 853        struct iwl_scd_txq_cfg_cmd cmd = {
 854                .scd_queue = queue,
 855                .action = SCD_CFG_UPDATE_QUEUE_TID,
 856        };
 857        s8 sta_id;
 858        int tid;
 859        unsigned long tid_bitmap;
 860        int ret;
 861
 862        lockdep_assert_held(&mvm->mutex);
 863
 864        spin_lock_bh(&mvm->queue_info_lock);
 865        sta_id = mvm->queue_info[queue].ra_sta_id;
 866        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 867        spin_unlock_bh(&mvm->queue_info_lock);
 868
 869        if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
 870                return;
 871
 872        /* Find any TID for queue */
 873        tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 874        cmd.tid = tid;
 875        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 876
 877        ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
 878        if (ret)
 879                IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
 880                        queue, ret);
 881        else
 882                IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
 883                                    queue, tid);
 884}
 885
 886static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
 887{
 888        struct ieee80211_sta *sta;
 889        struct iwl_mvm_sta *mvmsta;
 890        s8 sta_id;
 891        int tid = -1;
 892        unsigned long tid_bitmap;
 893        unsigned int wdg_timeout;
 894        int ssn;
 895        int ret = true;
 896
 897        lockdep_assert_held(&mvm->mutex);
 898
 899        spin_lock_bh(&mvm->queue_info_lock);
 900        sta_id = mvm->queue_info[queue].ra_sta_id;
 901        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
 902        spin_unlock_bh(&mvm->queue_info_lock);
 903
 904        /* Find TID for queue, and make sure it is the only one on the queue */
 905        tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
 906        if (tid_bitmap != BIT(tid)) {
 907                IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
 908                        queue, tid_bitmap);
 909                return;
 910        }
 911
 912        IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
 913                            tid);
 914
 915        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 916                                        lockdep_is_held(&mvm->mutex));
 917
 918        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
 919                return;
 920
 921        mvmsta = iwl_mvm_sta_from_mac80211(sta);
 922        wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
 923
 924        ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
 925
 926        ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
 927                                         tid_to_mac80211_ac[tid], ssn,
 928                                         wdg_timeout, true);
 929        if (ret) {
 930                IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
 931                return;
 932        }
 933
 934        /* If aggs should be turned back on - do it */
 935        if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
 936                struct iwl_mvm_add_sta_cmd cmd = {0};
 937
 938                mvmsta->tid_disable_agg &= ~BIT(tid);
 939
 940                cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
 941                cmd.sta_id = mvmsta->sta_id;
 942                cmd.add_modify = STA_MODE_MODIFY;
 943                cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
 944                cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
 945                cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
 946
 947                ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
 948                                           iwl_mvm_add_sta_cmd_size(mvm), &cmd);
 949                if (!ret) {
 950                        IWL_DEBUG_TX_QUEUES(mvm,
 951                                            "TXQ #%d is now aggregated again\n",
 952                                            queue);
 953
 954                        /* Mark queue intenally as aggregating again */
 955                        iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
 956                }
 957        }
 958
 959        spin_lock_bh(&mvm->queue_info_lock);
 960        mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
 961        spin_unlock_bh(&mvm->queue_info_lock);
 962}
 963
 964static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
 965{
 966        if (tid == IWL_MAX_TID_COUNT)
 967                return IEEE80211_AC_VO; /* MGMT */
 968
 969        return tid_to_mac80211_ac[tid];
 970}
 971
 972static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
 973                                       struct ieee80211_sta *sta, int tid)
 974{
 975        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 976        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 977        struct sk_buff *skb;
 978        struct ieee80211_hdr *hdr;
 979        struct sk_buff_head deferred_tx;
 980        u8 mac_queue;
 981        bool no_queue = false; /* Marks if there is a problem with the queue */
 982        u8 ac;
 983
 984        lockdep_assert_held(&mvm->mutex);
 985
 986        skb = skb_peek(&tid_data->deferred_tx_frames);
 987        if (!skb)
 988                return;
 989        hdr = (void *)skb->data;
 990
 991        ac = iwl_mvm_tid_to_ac_queue(tid);
 992        mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
 993
 994        if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
 995            iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
 996                IWL_ERR(mvm,
 997                        "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
 998                        mvmsta->sta_id, tid);
 999
1000                /*
1001                 * Mark queue as problematic so later the deferred traffic is
1002                 * freed, as we can do nothing with it
1003                 */
1004                no_queue = true;
1005        }
1006
1007        __skb_queue_head_init(&deferred_tx);
1008
1009        /* Disable bottom-halves when entering TX path */
1010        local_bh_disable();
1011        spin_lock(&mvmsta->lock);
1012        skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1013        spin_unlock(&mvmsta->lock);
1014
1015        while ((skb = __skb_dequeue(&deferred_tx)))
1016                if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1017                        ieee80211_free_txskb(mvm->hw, skb);
1018        local_bh_enable();
1019
1020        /* Wake queue */
1021        iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1022}
1023
1024void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1025{
1026        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1027                                           add_stream_wk);
1028        struct ieee80211_sta *sta;
1029        struct iwl_mvm_sta *mvmsta;
1030        unsigned long deferred_tid_traffic;
1031        int queue, sta_id, tid;
1032
1033        /* Check inactivity of queues */
1034        iwl_mvm_inactivity_check(mvm);
1035
1036        mutex_lock(&mvm->mutex);
1037
1038        /* Reconfigure queues requiring reconfiguation */
1039        for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
1040                bool reconfig;
1041                bool change_owner;
1042
1043                spin_lock_bh(&mvm->queue_info_lock);
1044                reconfig = (mvm->queue_info[queue].status ==
1045                            IWL_MVM_QUEUE_RECONFIGURING);
1046
1047                /*
1048                 * We need to take into account a situation in which a TXQ was
1049                 * allocated to TID x, and then turned shared by adding TIDs y
1050                 * and z. If TID x becomes inactive and is removed from the TXQ,
1051                 * ownership must be given to one of the remaining TIDs.
1052                 * This is mainly because if TID x continues - a new queue can't
1053                 * be allocated for it as long as it is an owner of another TXQ.
1054                 */
1055                change_owner = !(mvm->queue_info[queue].tid_bitmap &
1056                                 BIT(mvm->queue_info[queue].txq_tid)) &&
1057                               (mvm->queue_info[queue].status ==
1058                                IWL_MVM_QUEUE_SHARED);
1059                spin_unlock_bh(&mvm->queue_info_lock);
1060
1061                if (reconfig)
1062                        iwl_mvm_unshare_queue(mvm, queue);
1063                else if (change_owner)
1064                        iwl_mvm_change_queue_owner(mvm, queue);
1065        }
1066
1067        /* Go over all stations with deferred traffic */
1068        for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1069                         IWL_MVM_STATION_COUNT) {
1070                clear_bit(sta_id, mvm->sta_deferred_frames);
1071                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1072                                                lockdep_is_held(&mvm->mutex));
1073                if (IS_ERR_OR_NULL(sta))
1074                        continue;
1075
1076                mvmsta = iwl_mvm_sta_from_mac80211(sta);
1077                deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1078
1079                for_each_set_bit(tid, &deferred_tid_traffic,
1080                                 IWL_MAX_TID_COUNT + 1)
1081                        iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1082        }
1083
1084        mutex_unlock(&mvm->mutex);
1085}
1086
1087static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1088                                      struct ieee80211_sta *sta,
1089                                      enum nl80211_iftype vif_type)
1090{
1091        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1092        int queue;
1093
1094        /*
1095         * Check for inactive queues, so we don't reach a situation where we
1096         * can't add a STA due to a shortage in queues that doesn't really exist
1097         */
1098        iwl_mvm_inactivity_check(mvm);
1099
1100        spin_lock_bh(&mvm->queue_info_lock);
1101
1102        /* Make sure we have free resources for this STA */
1103        if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1104            !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1105            (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1106             IWL_MVM_QUEUE_FREE))
1107                queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1108        else
1109                queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1110                                                IWL_MVM_DQA_MIN_DATA_QUEUE,
1111                                                IWL_MVM_DQA_MAX_DATA_QUEUE);
1112        if (queue < 0) {
1113                spin_unlock_bh(&mvm->queue_info_lock);
1114                IWL_ERR(mvm, "No available queues for new station\n");
1115                return -ENOSPC;
1116        }
1117        mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1118
1119        spin_unlock_bh(&mvm->queue_info_lock);
1120
1121        mvmsta->reserved_queue = queue;
1122
1123        IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1124                            queue, mvmsta->sta_id);
1125
1126        return 0;
1127}
1128
1129/*
1130 * In DQA mode, after a HW restart the queues should be allocated as before, in
1131 * order to avoid race conditions when there are shared queues. This function
1132 * does the re-mapping and queue allocation.
1133 *
1134 * Note that re-enabling aggregations isn't done in this function.
1135 */
1136static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1137                                                 struct iwl_mvm_sta *mvm_sta)
1138{
1139        unsigned int wdg_timeout =
1140                        iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1141        int i;
1142        struct iwl_trans_txq_scd_cfg cfg = {
1143                .sta_id = mvm_sta->sta_id,
1144                .frame_limit = IWL_FRAME_LIMIT,
1145        };
1146
1147        /* Make sure reserved queue is still marked as such (or allocated) */
1148        mvm->queue_info[mvm_sta->reserved_queue].status =
1149                IWL_MVM_QUEUE_RESERVED;
1150
1151        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1152                struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1153                int txq_id = tid_data->txq_id;
1154                int ac;
1155                u8 mac_queue;
1156
1157                if (txq_id == IEEE80211_INVAL_HW_QUEUE)
1158                        continue;
1159
1160                skb_queue_head_init(&tid_data->deferred_tx_frames);
1161
1162                ac = tid_to_mac80211_ac[i];
1163                mac_queue = mvm_sta->vif->hw_queue[ac];
1164
1165                cfg.tid = i;
1166                cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
1167                cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1168                                 txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1169
1170                IWL_DEBUG_TX_QUEUES(mvm,
1171                                    "Re-mapping sta %d tid %d to queue %d\n",
1172                                    mvm_sta->sta_id, i, txq_id);
1173
1174                iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
1175                                   IEEE80211_SEQ_TO_SN(tid_data->seq_number),
1176                                   &cfg, wdg_timeout);
1177
1178                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1179        }
1180
1181        atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
1182}
1183
1184int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1185                    struct ieee80211_vif *vif,
1186                    struct ieee80211_sta *sta)
1187{
1188        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1189        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1190        struct iwl_mvm_rxq_dup_data *dup_data;
1191        int i, ret, sta_id;
1192
1193        lockdep_assert_held(&mvm->mutex);
1194
1195        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1196                sta_id = iwl_mvm_find_free_sta_id(mvm,
1197                                                  ieee80211_vif_type_p2p(vif));
1198        else
1199                sta_id = mvm_sta->sta_id;
1200
1201        if (sta_id == IWL_MVM_STATION_COUNT)
1202                return -ENOSPC;
1203
1204        spin_lock_init(&mvm_sta->lock);
1205
1206        /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1207        if (iwl_mvm_is_dqa_supported(mvm) &&
1208            test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1209                iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1210                goto update_fw;
1211        }
1212
1213        mvm_sta->sta_id = sta_id;
1214        mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1215                                                      mvmvif->color);
1216        mvm_sta->vif = vif;
1217        mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1218        mvm_sta->tx_protection = 0;
1219        mvm_sta->tt_tx_protection = false;
1220
1221        /* HW restart, don't assume the memory has been zeroed */
1222        atomic_set(&mvm->pending_frames[sta_id], 0);
1223        mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1224        mvm_sta->tfd_queue_msk = 0;
1225
1226        /*
1227         * Allocate new queues for a TDLS station, unless we're in DQA mode,
1228         * and then they'll be allocated dynamically
1229         */
1230        if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
1231                ret = iwl_mvm_tdls_sta_init(mvm, sta);
1232                if (ret)
1233                        return ret;
1234        } else if (!iwl_mvm_is_dqa_supported(mvm)) {
1235                for (i = 0; i < IEEE80211_NUM_ACS; i++)
1236                        if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1237                                mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1238        }
1239
1240        /* for HW restart - reset everything but the sequence number */
1241        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1242                u16 seq = mvm_sta->tid_data[i].seq_number;
1243                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1244                mvm_sta->tid_data[i].seq_number = seq;
1245
1246                if (!iwl_mvm_is_dqa_supported(mvm))
1247                        continue;
1248
1249                /*
1250                 * Mark all queues for this STA as unallocated and defer TX
1251                 * frames until the queue is allocated
1252                 */
1253                mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1254                skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1255        }
1256        mvm_sta->deferred_traffic_tid_map = 0;
1257        mvm_sta->agg_tids = 0;
1258
1259        if (iwl_mvm_has_new_rx_api(mvm) &&
1260            !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1261                dup_data = kcalloc(mvm->trans->num_rx_queues,
1262                                   sizeof(*dup_data),
1263                                   GFP_KERNEL);
1264                if (!dup_data)
1265                        return -ENOMEM;
1266                mvm_sta->dup_data = dup_data;
1267        }
1268
1269        if (iwl_mvm_is_dqa_supported(mvm)) {
1270                ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1271                                                 ieee80211_vif_type_p2p(vif));
1272                if (ret)
1273                        goto err;
1274        }
1275
1276update_fw:
1277        ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
1278        if (ret)
1279                goto err;
1280
1281        if (vif->type == NL80211_IFTYPE_STATION) {
1282                if (!sta->tdls) {
1283                        WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
1284                        mvmvif->ap_sta_id = sta_id;
1285                } else {
1286                        WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
1287                }
1288        }
1289
1290        rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1291
1292        return 0;
1293
1294err:
1295        if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1296                iwl_mvm_tdls_sta_deinit(mvm, sta);
1297        return ret;
1298}
1299
1300int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1301                      bool drain)
1302{
1303        struct iwl_mvm_add_sta_cmd cmd = {};
1304        int ret;
1305        u32 status;
1306
1307        lockdep_assert_held(&mvm->mutex);
1308
1309        cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1310        cmd.sta_id = mvmsta->sta_id;
1311        cmd.add_modify = STA_MODE_MODIFY;
1312        cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1313        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1314
1315        status = ADD_STA_SUCCESS;
1316        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1317                                          iwl_mvm_add_sta_cmd_size(mvm),
1318                                          &cmd, &status);
1319        if (ret)
1320                return ret;
1321
1322        switch (status & IWL_ADD_STA_STATUS_MASK) {
1323        case ADD_STA_SUCCESS:
1324                IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1325                               mvmsta->sta_id);
1326                break;
1327        default:
1328                ret = -EIO;
1329                IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1330                        mvmsta->sta_id);
1331                break;
1332        }
1333
1334        return ret;
1335}
1336
1337/*
1338 * Remove a station from the FW table. Before sending the command to remove
1339 * the station validate that the station is indeed known to the driver (sanity
1340 * only).
1341 */
1342static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1343{
1344        struct ieee80211_sta *sta;
1345        struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1346                .sta_id = sta_id,
1347        };
1348        int ret;
1349
1350        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1351                                        lockdep_is_held(&mvm->mutex));
1352
1353        /* Note: internal stations are marked as error values */
1354        if (!sta) {
1355                IWL_ERR(mvm, "Invalid station id\n");
1356                return -EINVAL;
1357        }
1358
1359        ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1360                                   sizeof(rm_sta_cmd), &rm_sta_cmd);
1361        if (ret) {
1362                IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1363                return ret;
1364        }
1365
1366        return 0;
1367}
1368
1369void iwl_mvm_sta_drained_wk(struct work_struct *wk)
1370{
1371        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
1372        u8 sta_id;
1373
1374        /*
1375         * The mutex is needed because of the SYNC cmd, but not only: if the
1376         * work would run concurrently with iwl_mvm_rm_sta, it would run before
1377         * iwl_mvm_rm_sta sets the station as busy, and exit. Then
1378         * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
1379         * that later.
1380         */
1381        mutex_lock(&mvm->mutex);
1382
1383        for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
1384                int ret;
1385                struct ieee80211_sta *sta =
1386                        rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1387                                                  lockdep_is_held(&mvm->mutex));
1388
1389                /*
1390                 * This station is in use or RCU-removed; the latter happens in
1391                 * managed mode, where mac80211 removes the station before we
1392                 * can remove it from firmware (we can only do that after the
1393                 * MAC is marked unassociated), and possibly while the deauth
1394                 * frame to disconnect from the AP is still queued. Then, the
1395                 * station pointer is -ENOENT when the last skb is reclaimed.
1396                 */
1397                if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
1398                        continue;
1399
1400                if (PTR_ERR(sta) == -EINVAL) {
1401                        IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
1402                                sta_id);
1403                        continue;
1404                }
1405
1406                if (!sta) {
1407                        IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
1408                                sta_id);
1409                        continue;
1410                }
1411
1412                WARN_ON(PTR_ERR(sta) != -EBUSY);
1413                /* This station was removed and we waited until it got drained,
1414                 * we can now proceed and remove it.
1415                 */
1416                ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1417                if (ret) {
1418                        IWL_ERR(mvm,
1419                                "Couldn't remove sta %d after it was drained\n",
1420                                sta_id);
1421                        continue;
1422                }
1423                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1424                clear_bit(sta_id, mvm->sta_drained);
1425
1426                if (mvm->tfd_drained[sta_id]) {
1427                        unsigned long i, msk = mvm->tfd_drained[sta_id];
1428
1429                        for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
1430                                iwl_mvm_disable_txq(mvm, i, i,
1431                                                    IWL_MAX_TID_COUNT, 0);
1432
1433                        mvm->tfd_drained[sta_id] = 0;
1434                        IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
1435                                       sta_id, msk);
1436                }
1437        }
1438
1439        mutex_unlock(&mvm->mutex);
1440}
1441
1442static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1443                                       struct ieee80211_vif *vif,
1444                                       struct iwl_mvm_sta *mvm_sta)
1445{
1446        int ac;
1447        int i;
1448
1449        lockdep_assert_held(&mvm->mutex);
1450
1451        for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1452                if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
1453                        continue;
1454
1455                ac = iwl_mvm_tid_to_ac_queue(i);
1456                iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1457                                    vif->hw_queue[ac], i, 0);
1458                mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
1459        }
1460}
1461
1462int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1463                   struct ieee80211_vif *vif,
1464                   struct ieee80211_sta *sta)
1465{
1466        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1467        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1468        int ret;
1469
1470        lockdep_assert_held(&mvm->mutex);
1471
1472        if (iwl_mvm_has_new_rx_api(mvm))
1473                kfree(mvm_sta->dup_data);
1474
1475        if ((vif->type == NL80211_IFTYPE_STATION &&
1476             mvmvif->ap_sta_id == mvm_sta->sta_id) ||
1477            iwl_mvm_is_dqa_supported(mvm)){
1478                ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1479                if (ret)
1480                        return ret;
1481                /* flush its queues here since we are freeing mvm_sta */
1482                ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
1483                if (ret)
1484                        return ret;
1485                ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
1486                                                    mvm_sta->tfd_queue_msk);
1487                if (ret)
1488                        return ret;
1489                ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1490
1491                /* If DQA is supported - the queues can be disabled now */
1492                if (iwl_mvm_is_dqa_supported(mvm)) {
1493                        u8 reserved_txq = mvm_sta->reserved_queue;
1494                        enum iwl_mvm_queue_status *status;
1495
1496                        iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1497
1498                        /*
1499                         * If no traffic has gone through the reserved TXQ - it
1500                         * is still marked as IWL_MVM_QUEUE_RESERVED, and
1501                         * should be manually marked as free again
1502                         */
1503                        spin_lock_bh(&mvm->queue_info_lock);
1504                        status = &mvm->queue_info[reserved_txq].status;
1505                        if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1506                                 (*status != IWL_MVM_QUEUE_FREE),
1507                                 "sta_id %d reserved txq %d status %d",
1508                                 mvm_sta->sta_id, reserved_txq, *status)) {
1509                                spin_unlock_bh(&mvm->queue_info_lock);
1510                                return -EINVAL;
1511                        }
1512
1513                        *status = IWL_MVM_QUEUE_FREE;
1514                        spin_unlock_bh(&mvm->queue_info_lock);
1515                }
1516
1517                if (vif->type == NL80211_IFTYPE_STATION &&
1518                    mvmvif->ap_sta_id == mvm_sta->sta_id) {
1519                        /* if associated - we can't remove the AP STA now */
1520                        if (vif->bss_conf.assoc)
1521                                return ret;
1522
1523                        /* unassoc - go ahead - remove the AP STA now */
1524                        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1525
1526                        /* clear d0i3_ap_sta_id if no longer relevant */
1527                        if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
1528                                mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1529                }
1530        }
1531
1532        /*
1533         * This shouldn't happen - the TDLS channel switch should be canceled
1534         * before the STA is removed.
1535         */
1536        if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
1537                mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1538                cancel_delayed_work(&mvm->tdls_cs.dwork);
1539        }
1540
1541        /*
1542         * Make sure that the tx response code sees the station as -EBUSY and
1543         * calls the drain worker.
1544         */
1545        spin_lock_bh(&mvm_sta->lock);
1546        /*
1547         * There are frames pending on the AC queues for this station.
1548         * We need to wait until all the frames are drained...
1549         */
1550        if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
1551                rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
1552                                   ERR_PTR(-EBUSY));
1553                spin_unlock_bh(&mvm_sta->lock);
1554
1555                /* disable TDLS sta queues on drain complete */
1556                if (sta->tdls) {
1557                        mvm->tfd_drained[mvm_sta->sta_id] =
1558                                                        mvm_sta->tfd_queue_msk;
1559                        IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
1560                                       mvm_sta->sta_id);
1561                }
1562
1563                ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1564        } else {
1565                spin_unlock_bh(&mvm_sta->lock);
1566
1567                if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
1568                        iwl_mvm_tdls_sta_deinit(mvm, sta);
1569
1570                ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1571                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1572        }
1573
1574        return ret;
1575}
1576
1577int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1578                      struct ieee80211_vif *vif,
1579                      u8 sta_id)
1580{
1581        int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1582
1583        lockdep_assert_held(&mvm->mutex);
1584
1585        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1586        return ret;
1587}
1588
1589int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1590                             struct iwl_mvm_int_sta *sta,
1591                             u32 qmask, enum nl80211_iftype iftype)
1592{
1593        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1594                sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1595                if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
1596                        return -ENOSPC;
1597        }
1598
1599        sta->tfd_queue_msk = qmask;
1600
1601        /* put a non-NULL value so iterating over the stations won't stop */
1602        rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1603        return 0;
1604}
1605
1606static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
1607                                    struct iwl_mvm_int_sta *sta)
1608{
1609        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1610        memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1611        sta->sta_id = IWL_MVM_STATION_COUNT;
1612}
1613
1614static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1615                                      struct iwl_mvm_int_sta *sta,
1616                                      const u8 *addr,
1617                                      u16 mac_id, u16 color)
1618{
1619        struct iwl_mvm_add_sta_cmd cmd;
1620        int ret;
1621        u32 status;
1622
1623        lockdep_assert_held(&mvm->mutex);
1624
1625        memset(&cmd, 0, sizeof(cmd));
1626        cmd.sta_id = sta->sta_id;
1627        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1628                                                             color));
1629
1630        cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1631        cmd.tid_disable_tx = cpu_to_le16(0xffff);
1632
1633        if (addr)
1634                memcpy(cmd.addr, addr, ETH_ALEN);
1635
1636        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1637                                          iwl_mvm_add_sta_cmd_size(mvm),
1638                                          &cmd, &status);
1639        if (ret)
1640                return ret;
1641
1642        switch (status & IWL_ADD_STA_STATUS_MASK) {
1643        case ADD_STA_SUCCESS:
1644                IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1645                return 0;
1646        default:
1647                ret = -EIO;
1648                IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1649                        status);
1650                break;
1651        }
1652        return ret;
1653}
1654
1655int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1656{
1657        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1658                                        mvm->cfg->base_params->wd_timeout :
1659                                        IWL_WATCHDOG_DISABLED;
1660        int ret;
1661
1662        lockdep_assert_held(&mvm->mutex);
1663
1664        /* Map Aux queue to fifo - needs to happen before adding Aux station */
1665        if (!iwl_mvm_is_dqa_supported(mvm))
1666                iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
1667                                      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
1668
1669        /* Allocate aux station and assign to it the aux queue */
1670        ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1671                                       NL80211_IFTYPE_UNSPECIFIED);
1672        if (ret)
1673                return ret;
1674
1675        if (iwl_mvm_is_dqa_supported(mvm)) {
1676                struct iwl_trans_txq_scd_cfg cfg = {
1677                        .fifo = IWL_MVM_TX_FIFO_MCAST,
1678                        .sta_id = mvm->aux_sta.sta_id,
1679                        .tid = IWL_MAX_TID_COUNT,
1680                        .aggregate = false,
1681                        .frame_limit = IWL_FRAME_LIMIT,
1682                };
1683
1684                iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1685                                   wdg_timeout);
1686        }
1687
1688        ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1689                                         MAC_INDEX_AUX, 0);
1690
1691        if (ret)
1692                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1693        return ret;
1694}
1695
1696int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1697{
1698        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1699
1700        lockdep_assert_held(&mvm->mutex);
1701        return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1702                                         mvmvif->id, 0);
1703}
1704
1705int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1706{
1707        int ret;
1708
1709        lockdep_assert_held(&mvm->mutex);
1710
1711        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1712        if (ret)
1713                IWL_WARN(mvm, "Failed sending remove station\n");
1714
1715        return ret;
1716}
1717
1718void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1719{
1720        iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1721}
1722
1723void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1724{
1725        lockdep_assert_held(&mvm->mutex);
1726
1727        iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1728}
1729
1730/*
1731 * Send the add station command for the vif's broadcast station.
1732 * Assumes that the station was already allocated.
1733 *
1734 * @mvm: the mvm component
1735 * @vif: the interface to which the broadcast station is added
1736 * @bsta: the broadcast station to add.
1737 */
1738int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1739{
1740        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1741        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1742        static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1743        const u8 *baddr = _baddr;
1744
1745        lockdep_assert_held(&mvm->mutex);
1746
1747        if (iwl_mvm_is_dqa_supported(mvm)) {
1748                struct iwl_trans_txq_scd_cfg cfg = {
1749                        .fifo = IWL_MVM_TX_FIFO_VO,
1750                        .sta_id = mvmvif->bcast_sta.sta_id,
1751                        .tid = IWL_MAX_TID_COUNT,
1752                        .aggregate = false,
1753                        .frame_limit = IWL_FRAME_LIMIT,
1754                };
1755                unsigned int wdg_timeout =
1756                        iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1757                int queue;
1758
1759                if ((vif->type == NL80211_IFTYPE_AP) &&
1760                    (mvmvif->bcast_sta.tfd_queue_msk &
1761                     BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
1762                        queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1763                else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
1764                         (mvmvif->bcast_sta.tfd_queue_msk &
1765                          BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
1766                        queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1767                else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1768                        return -EINVAL;
1769
1770                iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
1771                                   wdg_timeout);
1772        }
1773
1774        if (vif->type == NL80211_IFTYPE_ADHOC)
1775                baddr = vif->bss_conf.bssid;
1776
1777        if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
1778                return -ENOSPC;
1779
1780        return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1781                                          mvmvif->id, mvmvif->color);
1782}
1783
1784/* Send the FW a request to remove the station from it's internal data
1785 * structures, but DO NOT remove the entry from the local data structures. */
1786int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1787{
1788        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1789        int ret;
1790
1791        lockdep_assert_held(&mvm->mutex);
1792
1793        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1794        if (ret)
1795                IWL_WARN(mvm, "Failed sending remove station\n");
1796        return ret;
1797}
1798
1799int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1800{
1801        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1802        u32 qmask = 0;
1803
1804        lockdep_assert_held(&mvm->mutex);
1805
1806        if (!iwl_mvm_is_dqa_supported(mvm))
1807                qmask = iwl_mvm_mac_get_queues_mask(vif);
1808
1809        if (vif->type == NL80211_IFTYPE_AP) {
1810                /*
1811                 * The firmware defines the TFD queue mask to only be relevant
1812                 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1813                 * be included.
1814                 */
1815                qmask &= ~BIT(vif->cab_queue);
1816
1817                if (iwl_mvm_is_dqa_supported(mvm))
1818                        qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
1819        } else if (iwl_mvm_is_dqa_supported(mvm) &&
1820                   vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1821                qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
1822        }
1823
1824        return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
1825                                        ieee80211_vif_type_p2p(vif));
1826}
1827
1828/* Allocate a new station entry for the broadcast station to the given vif,
1829 * and send it to the FW.
1830 * Note that each P2P mac should have its own broadcast station.
1831 *
1832 * @mvm: the mvm component
1833 * @vif: the interface to which the broadcast station is added
1834 * @bsta: the broadcast station to add. */
1835int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1836{
1837        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1838        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1839        int ret;
1840
1841        lockdep_assert_held(&mvm->mutex);
1842
1843        ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1844        if (ret)
1845                return ret;
1846
1847        ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1848
1849        if (ret)
1850                iwl_mvm_dealloc_int_sta(mvm, bsta);
1851
1852        return ret;
1853}
1854
1855void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1856{
1857        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1858
1859        iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1860}
1861
1862/*
1863 * Send the FW a request to remove the station from it's internal data
1864 * structures, and in addition remove it from the local data structure.
1865 */
1866int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1867{
1868        int ret;
1869
1870        lockdep_assert_held(&mvm->mutex);
1871
1872        ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
1873
1874        iwl_mvm_dealloc_bcast_sta(mvm, vif);
1875
1876        return ret;
1877}
1878
1879#define IWL_MAX_RX_BA_SESSIONS 16
1880
1881static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
1882{
1883        struct iwl_mvm_delba_notif notif = {
1884                .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
1885                .metadata.sync = 1,
1886                .delba.baid = baid,
1887        };
1888        iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
1889};
1890
1891static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
1892                                 struct iwl_mvm_baid_data *data)
1893{
1894        int i;
1895
1896        iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
1897
1898        for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1899                int j;
1900                struct iwl_mvm_reorder_buffer *reorder_buf =
1901                        &data->reorder_buf[i];
1902
1903                spin_lock_bh(&reorder_buf->lock);
1904                if (likely(!reorder_buf->num_stored)) {
1905                        spin_unlock_bh(&reorder_buf->lock);
1906                        continue;
1907                }
1908
1909                /*
1910                 * This shouldn't happen in regular DELBA since the internal
1911                 * delBA notification should trigger a release of all frames in
1912                 * the reorder buffer.
1913                 */
1914                WARN_ON(1);
1915
1916                for (j = 0; j < reorder_buf->buf_size; j++)
1917                        __skb_queue_purge(&reorder_buf->entries[j]);
1918                /*
1919                 * Prevent timer re-arm. This prevents a very far fetched case
1920                 * where we timed out on the notification. There may be prior
1921                 * RX frames pending in the RX queue before the notification
1922                 * that might get processed between now and the actual deletion
1923                 * and we would re-arm the timer although we are deleting the
1924                 * reorder buffer.
1925                 */
1926                reorder_buf->removed = true;
1927                spin_unlock_bh(&reorder_buf->lock);
1928                del_timer_sync(&reorder_buf->reorder_timer);
1929        }
1930}
1931
1932static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
1933                                        u32 sta_id,
1934                                        struct iwl_mvm_baid_data *data,
1935                                        u16 ssn, u8 buf_size)
1936{
1937        int i;
1938
1939        for (i = 0; i < mvm->trans->num_rx_queues; i++) {
1940                struct iwl_mvm_reorder_buffer *reorder_buf =
1941                        &data->reorder_buf[i];
1942                int j;
1943
1944                reorder_buf->num_stored = 0;
1945                reorder_buf->head_sn = ssn;
1946                reorder_buf->buf_size = buf_size;
1947                /* rx reorder timer */
1948                reorder_buf->reorder_timer.function =
1949                        iwl_mvm_reorder_timer_expired;
1950                reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
1951                init_timer(&reorder_buf->reorder_timer);
1952                spin_lock_init(&reorder_buf->lock);
1953                reorder_buf->mvm = mvm;
1954                reorder_buf->queue = i;
1955                reorder_buf->sta_id = sta_id;
1956                for (j = 0; j < reorder_buf->buf_size; j++)
1957                        __skb_queue_head_init(&reorder_buf->entries[j]);
1958        }
1959}
1960
1961int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1962                       int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
1963{
1964        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1965        struct iwl_mvm_add_sta_cmd cmd = {};
1966        struct iwl_mvm_baid_data *baid_data = NULL;
1967        int ret;
1968        u32 status;
1969
1970        lockdep_assert_held(&mvm->mutex);
1971
1972        if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
1973                IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
1974                return -ENOSPC;
1975        }
1976
1977        if (iwl_mvm_has_new_rx_api(mvm) && start) {
1978                /*
1979                 * Allocate here so if allocation fails we can bail out early
1980                 * before starting the BA session in the firmware
1981                 */
1982                baid_data = kzalloc(sizeof(*baid_data) +
1983                                    mvm->trans->num_rx_queues *
1984                                    sizeof(baid_data->reorder_buf[0]),
1985                                    GFP_KERNEL);
1986                if (!baid_data)
1987                        return -ENOMEM;
1988        }
1989
1990        cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
1991        cmd.sta_id = mvm_sta->sta_id;
1992        cmd.add_modify = STA_MODE_MODIFY;
1993        if (start) {
1994                cmd.add_immediate_ba_tid = (u8) tid;
1995                cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
1996                cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
1997        } else {
1998                cmd.remove_immediate_ba_tid = (u8) tid;
1999        }
2000        cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2001                                  STA_MODIFY_REMOVE_BA_TID;
2002
2003        status = ADD_STA_SUCCESS;
2004        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2005                                          iwl_mvm_add_sta_cmd_size(mvm),
2006                                          &cmd, &status);
2007        if (ret)
2008                goto out_free;
2009
2010        switch (status & IWL_ADD_STA_STATUS_MASK) {
2011        case ADD_STA_SUCCESS:
2012                IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2013                             start ? "start" : "stopp");
2014                break;
2015        case ADD_STA_IMMEDIATE_BA_FAILURE:
2016                IWL_WARN(mvm, "RX BA Session refused by fw\n");
2017                ret = -ENOSPC;
2018                break;
2019        default:
2020                ret = -EIO;
2021                IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2022                        start ? "start" : "stopp", status);
2023                break;
2024        }
2025
2026        if (ret)
2027                goto out_free;
2028
2029        if (start) {
2030                u8 baid;
2031
2032                mvm->rx_ba_sessions++;
2033
2034                if (!iwl_mvm_has_new_rx_api(mvm))
2035                        return 0;
2036
2037                if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2038                        ret = -EINVAL;
2039                        goto out_free;
2040                }
2041                baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2042                            IWL_ADD_STA_BAID_SHIFT);
2043                baid_data->baid = baid;
2044                baid_data->timeout = timeout;
2045                baid_data->last_rx = jiffies;
2046                setup_timer(&baid_data->session_timer,
2047                            iwl_mvm_rx_agg_session_expired,
2048                            (unsigned long)&mvm->baid_map[baid]);
2049                baid_data->mvm = mvm;
2050                baid_data->tid = tid;
2051                baid_data->sta_id = mvm_sta->sta_id;
2052
2053                mvm_sta->tid_to_baid[tid] = baid;
2054                if (timeout)
2055                        mod_timer(&baid_data->session_timer,
2056                                  TU_TO_EXP_TIME(timeout * 2));
2057
2058                iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2059                                            baid_data, ssn, buf_size);
2060                /*
2061                 * protect the BA data with RCU to cover a case where our
2062                 * internal RX sync mechanism will timeout (not that it's
2063                 * supposed to happen) and we will free the session data while
2064                 * RX is being processed in parallel
2065                 */
2066                IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2067                             mvm_sta->sta_id, tid, baid);
2068                WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2069                rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2070        } else  {
2071                u8 baid = mvm_sta->tid_to_baid[tid];
2072
2073                if (mvm->rx_ba_sessions > 0)
2074                        /* check that restart flow didn't zero the counter */
2075                        mvm->rx_ba_sessions--;
2076                if (!iwl_mvm_has_new_rx_api(mvm))
2077                        return 0;
2078
2079                if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2080                        return -EINVAL;
2081
2082                baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2083                if (WARN_ON(!baid_data))
2084                        return -EINVAL;
2085
2086                /* synchronize all rx queues so we can safely delete */
2087                iwl_mvm_free_reorder(mvm, baid_data);
2088                del_timer_sync(&baid_data->session_timer);
2089                RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2090                kfree_rcu(baid_data, rcu_head);
2091                IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2092        }
2093        return 0;
2094
2095out_free:
2096        kfree(baid_data);
2097        return ret;
2098}
2099
2100int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2101                       int tid, u8 queue, bool start)
2102{
2103        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2104        struct iwl_mvm_add_sta_cmd cmd = {};
2105        int ret;
2106        u32 status;
2107
2108        lockdep_assert_held(&mvm->mutex);
2109
2110        if (start) {
2111                mvm_sta->tfd_queue_msk |= BIT(queue);
2112                mvm_sta->tid_disable_agg &= ~BIT(tid);
2113        } else {
2114                /* In DQA-mode the queue isn't removed on agg termination */
2115                if (!iwl_mvm_is_dqa_supported(mvm))
2116                        mvm_sta->tfd_queue_msk &= ~BIT(queue);
2117                mvm_sta->tid_disable_agg |= BIT(tid);
2118        }
2119
2120        cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2121        cmd.sta_id = mvm_sta->sta_id;
2122        cmd.add_modify = STA_MODE_MODIFY;
2123        cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
2124        cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2125        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2126
2127        status = ADD_STA_SUCCESS;
2128        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2129                                          iwl_mvm_add_sta_cmd_size(mvm),
2130                                          &cmd, &status);
2131        if (ret)
2132                return ret;
2133
2134        switch (status & IWL_ADD_STA_STATUS_MASK) {
2135        case ADD_STA_SUCCESS:
2136                break;
2137        default:
2138                ret = -EIO;
2139                IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2140                        start ? "start" : "stopp", status);
2141                break;
2142        }
2143
2144        return ret;
2145}
2146
2147const u8 tid_to_mac80211_ac[] = {
2148        IEEE80211_AC_BE,
2149        IEEE80211_AC_BK,
2150        IEEE80211_AC_BK,
2151        IEEE80211_AC_BE,
2152        IEEE80211_AC_VI,
2153        IEEE80211_AC_VI,
2154        IEEE80211_AC_VO,
2155        IEEE80211_AC_VO,
2156        IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2157};
2158
2159static const u8 tid_to_ucode_ac[] = {
2160        AC_BE,
2161        AC_BK,
2162        AC_BK,
2163        AC_BE,
2164        AC_VI,
2165        AC_VI,
2166        AC_VO,
2167        AC_VO,
2168};
2169
2170int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2171                             struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2172{
2173        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2174        struct iwl_mvm_tid_data *tid_data;
2175        int txq_id;
2176        int ret;
2177
2178        if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2179                return -EINVAL;
2180
2181        if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2182                IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n",
2183                        mvmsta->tid_data[tid].state);
2184                return -ENXIO;
2185        }
2186
2187        lockdep_assert_held(&mvm->mutex);
2188
2189        spin_lock_bh(&mvmsta->lock);
2190
2191        /* possible race condition - we entered D0i3 while starting agg */
2192        if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2193                spin_unlock_bh(&mvmsta->lock);
2194                IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2195                return -EIO;
2196        }
2197
2198        spin_lock(&mvm->queue_info_lock);
2199
2200        /*
2201         * Note the possible cases:
2202         *  1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2203         *  2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2204         *      one and mark it as reserved
2205         *  3. In DQA mode, but no traffic yet on this TID: same treatment as in
2206         *      non-DQA mode, since the TXQ hasn't yet been allocated
2207         */
2208        txq_id = mvmsta->tid_data[tid].txq_id;
2209        if (iwl_mvm_is_dqa_supported(mvm) &&
2210            unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2211                ret = -ENXIO;
2212                IWL_DEBUG_TX_QUEUES(mvm,
2213                                    "Can't start tid %d agg on shared queue!\n",
2214                                    tid);
2215                goto release_locks;
2216        } else if (!iwl_mvm_is_dqa_supported(mvm) ||
2217            mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
2218                txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2219                                                 mvm->first_agg_queue,
2220                                                 mvm->last_agg_queue);
2221                if (txq_id < 0) {
2222                        ret = txq_id;
2223                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
2224                        goto release_locks;
2225                }
2226
2227                /* TXQ hasn't yet been enabled, so mark it only as reserved */
2228                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2229        }
2230
2231        spin_unlock(&mvm->queue_info_lock);
2232
2233        IWL_DEBUG_TX_QUEUES(mvm,
2234                            "AGG for tid %d will be on queue #%d\n",
2235                            tid, txq_id);
2236
2237        tid_data = &mvmsta->tid_data[tid];
2238        tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2239        tid_data->txq_id = txq_id;
2240        *ssn = tid_data->ssn;
2241
2242        IWL_DEBUG_TX_QUEUES(mvm,
2243                            "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2244                            mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2245                            tid_data->next_reclaimed);
2246
2247        if (tid_data->ssn == tid_data->next_reclaimed) {
2248                tid_data->state = IWL_AGG_STARTING;
2249                ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2250        } else {
2251                tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2252        }
2253
2254        ret = 0;
2255        goto out;
2256
2257release_locks:
2258        spin_unlock(&mvm->queue_info_lock);
2259out:
2260        spin_unlock_bh(&mvmsta->lock);
2261
2262        return ret;
2263}
2264
2265int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2266                            struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2267                            bool amsdu)
2268{
2269        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2270        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2271        unsigned int wdg_timeout =
2272                iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2273        int queue, ret;
2274        bool alloc_queue = true;
2275        enum iwl_mvm_queue_status queue_status;
2276        u16 ssn;
2277
2278        struct iwl_trans_txq_scd_cfg cfg = {
2279                .sta_id = mvmsta->sta_id,
2280                .tid = tid,
2281                .frame_limit = buf_size,
2282                .aggregate = true,
2283        };
2284
2285        BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2286                     != IWL_MAX_TID_COUNT);
2287
2288        buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2289
2290        spin_lock_bh(&mvmsta->lock);
2291        ssn = tid_data->ssn;
2292        queue = tid_data->txq_id;
2293        tid_data->state = IWL_AGG_ON;
2294        mvmsta->agg_tids |= BIT(tid);
2295        tid_data->ssn = 0xffff;
2296        tid_data->amsdu_in_ampdu_allowed = amsdu;
2297        spin_unlock_bh(&mvmsta->lock);
2298
2299        cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2300
2301        spin_lock_bh(&mvm->queue_info_lock);
2302        queue_status = mvm->queue_info[queue].status;
2303        spin_unlock_bh(&mvm->queue_info_lock);
2304
2305        /* In DQA mode, the existing queue might need to be reconfigured */
2306        if (iwl_mvm_is_dqa_supported(mvm)) {
2307                /* Maybe there is no need to even alloc a queue... */
2308                if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2309                        alloc_queue = false;
2310
2311                /*
2312                 * Only reconfig the SCD for the queue if the window size has
2313                 * changed from current (become smaller)
2314                 */
2315                if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2316                        /*
2317                         * If reconfiguring an existing queue, it first must be
2318                         * drained
2319                         */
2320                        ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
2321                                                            BIT(queue));
2322                        if (ret) {
2323                                IWL_ERR(mvm,
2324                                        "Error draining queue before reconfig\n");
2325                                return ret;
2326                        }
2327
2328                        ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2329                                                   mvmsta->sta_id, tid,
2330                                                   buf_size, ssn);
2331                        if (ret) {
2332                                IWL_ERR(mvm,
2333                                        "Error reconfiguring TXQ #%d\n", queue);
2334                                return ret;
2335                        }
2336                }
2337        }
2338
2339        if (alloc_queue)
2340                iwl_mvm_enable_txq(mvm, queue,
2341                                   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2342                                   &cfg, wdg_timeout);
2343
2344        /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2345        if (queue_status != IWL_MVM_QUEUE_SHARED) {
2346                ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2347                if (ret)
2348                        return -EIO;
2349        }
2350
2351        /* No need to mark as reserved */
2352        spin_lock_bh(&mvm->queue_info_lock);
2353        mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2354        spin_unlock_bh(&mvm->queue_info_lock);
2355
2356        /*
2357         * Even though in theory the peer could have different
2358         * aggregation reorder buffer sizes for different sessions,
2359         * our ucode doesn't allow for that and has a global limit
2360         * for each station. Therefore, use the minimum of all the
2361         * aggregation sessions and our default value.
2362         */
2363        mvmsta->max_agg_bufsize =
2364                min(mvmsta->max_agg_bufsize, buf_size);
2365        mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2366
2367        IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2368                     sta->addr, tid);
2369
2370        return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
2371}
2372
2373int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2374                            struct ieee80211_sta *sta, u16 tid)
2375{
2376        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2377        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2378        u16 txq_id;
2379        int err;
2380
2381        /*
2382         * If mac80211 is cleaning its state, then say that we finished since
2383         * our state has been cleared anyway.
2384         */
2385        if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2386                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2387                return 0;
2388        }
2389
2390        spin_lock_bh(&mvmsta->lock);
2391
2392        txq_id = tid_data->txq_id;
2393
2394        IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2395                            mvmsta->sta_id, tid, txq_id, tid_data->state);
2396
2397        mvmsta->agg_tids &= ~BIT(tid);
2398
2399        spin_lock_bh(&mvm->queue_info_lock);
2400        /*
2401         * The TXQ is marked as reserved only if no traffic came through yet
2402         * This means no traffic has been sent on this TID (agg'd or not), so
2403         * we no longer have use for the queue. Since it hasn't even been
2404         * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2405         * free.
2406         */
2407        if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2408                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2409
2410        spin_unlock_bh(&mvm->queue_info_lock);
2411
2412        switch (tid_data->state) {
2413        case IWL_AGG_ON:
2414                tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2415
2416                IWL_DEBUG_TX_QUEUES(mvm,
2417                                    "ssn = %d, next_recl = %d\n",
2418                                    tid_data->ssn, tid_data->next_reclaimed);
2419
2420                /* There are still packets for this RA / TID in the HW */
2421                if (tid_data->ssn != tid_data->next_reclaimed) {
2422                        tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
2423                        err = 0;
2424                        break;
2425                }
2426
2427                tid_data->ssn = 0xffff;
2428                tid_data->state = IWL_AGG_OFF;
2429                spin_unlock_bh(&mvmsta->lock);
2430
2431                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2432
2433                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2434
2435                if (!iwl_mvm_is_dqa_supported(mvm)) {
2436                        int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2437
2438                        iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
2439                }
2440                return 0;
2441        case IWL_AGG_STARTING:
2442        case IWL_EMPTYING_HW_QUEUE_ADDBA:
2443                /*
2444                 * The agg session has been stopped before it was set up. This
2445                 * can happen when the AddBA timer times out for example.
2446                 */
2447
2448                /* No barriers since we are under mutex */
2449                lockdep_assert_held(&mvm->mutex);
2450
2451                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2452                tid_data->state = IWL_AGG_OFF;
2453                err = 0;
2454                break;
2455        default:
2456                IWL_ERR(mvm,
2457                        "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2458                        mvmsta->sta_id, tid, tid_data->state);
2459                IWL_ERR(mvm,
2460                        "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2461                err = -EINVAL;
2462        }
2463
2464        spin_unlock_bh(&mvmsta->lock);
2465
2466        return err;
2467}
2468
2469int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2470                            struct ieee80211_sta *sta, u16 tid)
2471{
2472        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2473        struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2474        u16 txq_id;
2475        enum iwl_mvm_agg_state old_state;
2476
2477        /*
2478         * First set the agg state to OFF to avoid calling
2479         * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2480         */
2481        spin_lock_bh(&mvmsta->lock);
2482        txq_id = tid_data->txq_id;
2483        IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2484                            mvmsta->sta_id, tid, txq_id, tid_data->state);
2485        old_state = tid_data->state;
2486        tid_data->state = IWL_AGG_OFF;
2487        mvmsta->agg_tids &= ~BIT(tid);
2488        spin_unlock_bh(&mvmsta->lock);
2489
2490        spin_lock_bh(&mvm->queue_info_lock);
2491        /*
2492         * The TXQ is marked as reserved only if no traffic came through yet
2493         * This means no traffic has been sent on this TID (agg'd or not), so
2494         * we no longer have use for the queue. Since it hasn't even been
2495         * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2496         * free.
2497         */
2498        if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2499                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2500        spin_unlock_bh(&mvm->queue_info_lock);
2501
2502        if (old_state >= IWL_AGG_ON) {
2503                iwl_mvm_drain_sta(mvm, mvmsta, true);
2504                if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2505                        IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2506                iwl_trans_wait_tx_queue_empty(mvm->trans,
2507                                              mvmsta->tfd_queue_msk);
2508                iwl_mvm_drain_sta(mvm, mvmsta, false);
2509
2510                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2511
2512                if (!iwl_mvm_is_dqa_supported(mvm)) {
2513                        int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
2514
2515                        iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
2516                                            tid, 0);
2517                }
2518        }
2519
2520        return 0;
2521}
2522
2523static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2524{
2525        int i, max = -1, max_offs = -1;
2526
2527        lockdep_assert_held(&mvm->mutex);
2528
2529        /* Pick the unused key offset with the highest 'deleted'
2530         * counter. Every time a key is deleted, all the counters
2531         * are incremented and the one that was just deleted is
2532         * reset to zero. Thus, the highest counter is the one
2533         * that was deleted longest ago. Pick that one.
2534         */
2535        for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2536                if (test_bit(i, mvm->fw_key_table))
2537                        continue;
2538                if (mvm->fw_key_deleted[i] > max) {
2539                        max = mvm->fw_key_deleted[i];
2540                        max_offs = i;
2541                }
2542        }
2543
2544        if (max_offs < 0)
2545                return STA_KEY_IDX_INVALID;
2546
2547        return max_offs;
2548}
2549
2550static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2551                                               struct ieee80211_vif *vif,
2552                                               struct ieee80211_sta *sta)
2553{
2554        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2555
2556        if (sta)
2557                return iwl_mvm_sta_from_mac80211(sta);
2558
2559        /*
2560         * The device expects GTKs for station interfaces to be
2561         * installed as GTKs for the AP station. If we have no
2562         * station ID, then use AP's station ID.
2563         */
2564        if (vif->type == NL80211_IFTYPE_STATION &&
2565            mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2566                u8 sta_id = mvmvif->ap_sta_id;
2567
2568                sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2569                                            lockdep_is_held(&mvm->mutex));
2570
2571                /*
2572                 * It is possible that the 'sta' parameter is NULL,
2573                 * for example when a GTK is removed - the sta_id will then
2574                 * be the AP ID, and no station was passed by mac80211.
2575                 */
2576                if (IS_ERR_OR_NULL(sta))
2577                        return NULL;
2578
2579                return iwl_mvm_sta_from_mac80211(sta);
2580        }
2581
2582        return NULL;
2583}
2584
2585static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2586                                struct iwl_mvm_sta *mvm_sta,
2587                                struct ieee80211_key_conf *keyconf, bool mcast,
2588                                u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2589                                u8 key_offset)
2590{
2591        struct iwl_mvm_add_sta_key_cmd cmd = {};
2592        __le16 key_flags;
2593        int ret;
2594        u32 status;
2595        u16 keyidx;
2596        int i;
2597        u8 sta_id = mvm_sta->sta_id;
2598
2599        keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2600                 STA_KEY_FLG_KEYID_MSK;
2601        key_flags = cpu_to_le16(keyidx);
2602        key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2603
2604        switch (keyconf->cipher) {
2605        case WLAN_CIPHER_SUITE_TKIP:
2606                key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2607                cmd.tkip_rx_tsc_byte2 = tkip_iv32;
2608                for (i = 0; i < 5; i++)
2609                        cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
2610                memcpy(cmd.key, keyconf->key, keyconf->keylen);
2611                break;
2612        case WLAN_CIPHER_SUITE_CCMP:
2613                key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2614                memcpy(cmd.key, keyconf->key, keyconf->keylen);
2615                break;
2616        case WLAN_CIPHER_SUITE_WEP104:
2617                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2618                /* fall through */
2619        case WLAN_CIPHER_SUITE_WEP40:
2620                key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2621                memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
2622                break;
2623        case WLAN_CIPHER_SUITE_GCMP_256:
2624                key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2625                /* fall through */
2626        case WLAN_CIPHER_SUITE_GCMP:
2627                key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2628                memcpy(cmd.key, keyconf->key, keyconf->keylen);
2629                break;
2630        default:
2631                key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2632                memcpy(cmd.key, keyconf->key, keyconf->keylen);
2633        }
2634
2635        if (mcast)
2636                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2637
2638        cmd.key_offset = key_offset;
2639        cmd.key_flags = key_flags;
2640        cmd.sta_id = sta_id;
2641
2642        status = ADD_STA_SUCCESS;
2643        if (cmd_flags & CMD_ASYNC)
2644                ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
2645                                            sizeof(cmd), &cmd);
2646        else
2647                ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2648                                                  &cmd, &status);
2649
2650        switch (status) {
2651        case ADD_STA_SUCCESS:
2652                IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2653                break;
2654        default:
2655                ret = -EIO;
2656                IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2657                break;
2658        }
2659
2660        return ret;
2661}
2662
2663static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2664                                 struct ieee80211_key_conf *keyconf,
2665                                 u8 sta_id, bool remove_key)
2666{
2667        struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2668
2669        /* verify the key details match the required command's expectations */
2670        if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2671                    (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2672                    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2673                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2674                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2675                return -EINVAL;
2676
2677        if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2678                    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
2679                return -EINVAL;
2680
2681        igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2682        igtk_cmd.sta_id = cpu_to_le32(sta_id);
2683
2684        if (remove_key) {
2685                igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2686        } else {
2687                struct ieee80211_key_seq seq;
2688                const u8 *pn;
2689
2690                switch (keyconf->cipher) {
2691                case WLAN_CIPHER_SUITE_AES_CMAC:
2692                        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2693                        break;
2694                case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2695                case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2696                        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2697                        break;
2698                default:
2699                        return -EINVAL;
2700                }
2701
2702                memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2703                if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2704                        igtk_cmd.ctrl_flags |=
2705                                cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
2706                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2707                pn = seq.aes_cmac.pn;
2708                igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2709                                                       ((u64) pn[4] << 8) |
2710                                                       ((u64) pn[3] << 16) |
2711                                                       ((u64) pn[2] << 24) |
2712                                                       ((u64) pn[1] << 32) |
2713                                                       ((u64) pn[0] << 40));
2714        }
2715
2716        IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2717                       remove_key ? "removing" : "installing",
2718                       igtk_cmd.sta_id);
2719
2720        if (!iwl_mvm_has_new_rx_api(mvm)) {
2721                struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2722                        .ctrl_flags = igtk_cmd.ctrl_flags,
2723                        .key_id = igtk_cmd.key_id,
2724                        .sta_id = igtk_cmd.sta_id,
2725                        .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2726                };
2727
2728                memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2729                       ARRAY_SIZE(igtk_cmd_v1.igtk));
2730                return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2731                                            sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2732        }
2733        return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2734                                    sizeof(igtk_cmd), &igtk_cmd);
2735}
2736
2737
2738static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
2739                                       struct ieee80211_vif *vif,
2740                                       struct ieee80211_sta *sta)
2741{
2742        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2743
2744        if (sta)
2745                return sta->addr;
2746
2747        if (vif->type == NL80211_IFTYPE_STATION &&
2748            mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2749                u8 sta_id = mvmvif->ap_sta_id;
2750                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2751                                                lockdep_is_held(&mvm->mutex));
2752                return sta->addr;
2753        }
2754
2755
2756        return NULL;
2757}
2758
2759static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2760                                 struct ieee80211_vif *vif,
2761                                 struct ieee80211_sta *sta,
2762                                 struct ieee80211_key_conf *keyconf,
2763                                 u8 key_offset,
2764                                 bool mcast)
2765{
2766        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2767        int ret;
2768        const u8 *addr;
2769        struct ieee80211_key_seq seq;
2770        u16 p1k[5];
2771
2772        switch (keyconf->cipher) {
2773        case WLAN_CIPHER_SUITE_TKIP:
2774                addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
2775                /* get phase 1 key from mac80211 */
2776                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2777                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
2778                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2779                                           seq.tkip.iv32, p1k, 0, key_offset);
2780                break;
2781        case WLAN_CIPHER_SUITE_CCMP:
2782        case WLAN_CIPHER_SUITE_WEP40:
2783        case WLAN_CIPHER_SUITE_WEP104:
2784        case WLAN_CIPHER_SUITE_GCMP:
2785        case WLAN_CIPHER_SUITE_GCMP_256:
2786                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2787                                           0, NULL, 0, key_offset);
2788                break;
2789        default:
2790                ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2791                                           0, NULL, 0, key_offset);
2792        }
2793
2794        return ret;
2795}
2796
2797static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2798                                    struct ieee80211_key_conf *keyconf,
2799                                    bool mcast)
2800{
2801        struct iwl_mvm_add_sta_key_cmd cmd = {};
2802        __le16 key_flags;
2803        int ret;
2804        u32 status;
2805
2806        key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2807                                 STA_KEY_FLG_KEYID_MSK);
2808        key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2809        key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2810
2811        if (mcast)
2812                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2813
2814        cmd.key_flags = key_flags;
2815        cmd.key_offset = keyconf->hw_key_idx;
2816        cmd.sta_id = sta_id;
2817
2818        status = ADD_STA_SUCCESS;
2819        ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
2820                                          &cmd, &status);
2821
2822        switch (status) {
2823        case ADD_STA_SUCCESS:
2824                IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2825                break;
2826        default:
2827                ret = -EIO;
2828                IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2829                break;
2830        }
2831
2832        return ret;
2833}
2834
2835int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
2836                        struct ieee80211_vif *vif,
2837                        struct ieee80211_sta *sta,
2838                        struct ieee80211_key_conf *keyconf,
2839                        u8 key_offset)
2840{
2841        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2842        struct iwl_mvm_sta *mvm_sta;
2843        u8 sta_id;
2844        int ret;
2845        static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
2846
2847        lockdep_assert_held(&mvm->mutex);
2848
2849        /* Get the station id from the mvm local station table */
2850        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2851        if (!mvm_sta) {
2852                IWL_ERR(mvm, "Failed to find station\n");
2853                return -EINVAL;
2854        }
2855        sta_id = mvm_sta->sta_id;
2856
2857        if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2858            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2859            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
2860                ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
2861                goto end;
2862        }
2863
2864        /*
2865         * It is possible that the 'sta' parameter is NULL, and thus
2866         * there is a need to retrieve  the sta from the local station table.
2867         */
2868        if (!sta) {
2869                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
2870                                                lockdep_is_held(&mvm->mutex));
2871                if (IS_ERR_OR_NULL(sta)) {
2872                        IWL_ERR(mvm, "Invalid station id\n");
2873                        return -EINVAL;
2874                }
2875        }
2876
2877        if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
2878                return -EINVAL;
2879
2880        /* If the key_offset is not pre-assigned, we need to find a
2881         * new offset to use.  In normal cases, the offset is not
2882         * pre-assigned, but during HW_RESTART we want to reuse the
2883         * same indices, so we pass them when this function is called.
2884         *
2885         * In D3 entry, we need to hardcoded the indices (because the
2886         * firmware hardcodes the PTK offset to 0).  In this case, we
2887         * need to make sure we don't overwrite the hw_key_idx in the
2888         * keyconf structure, because otherwise we cannot configure
2889         * the original ones back when resuming.
2890         */
2891        if (key_offset == STA_KEY_IDX_INVALID) {
2892                key_offset  = iwl_mvm_set_fw_key_idx(mvm);
2893                if (key_offset == STA_KEY_IDX_INVALID)
2894                        return -ENOSPC;
2895                keyconf->hw_key_idx = key_offset;
2896        }
2897
2898        ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
2899        if (ret)
2900                goto end;
2901
2902        /*
2903         * For WEP, the same key is used for multicast and unicast. Upload it
2904         * again, using the same key offset, and now pointing the other one
2905         * to the same key slot (offset).
2906         * If this fails, remove the original as well.
2907         */
2908        if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2909            keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
2910                ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
2911                                            key_offset, !mcast);
2912                if (ret) {
2913                        __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2914                        goto end;
2915                }
2916        }
2917
2918        __set_bit(key_offset, mvm->fw_key_table);
2919
2920end:
2921        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2922                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
2923                      sta ? sta->addr : zero_addr, ret);
2924        return ret;
2925}
2926
2927int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
2928                           struct ieee80211_vif *vif,
2929                           struct ieee80211_sta *sta,
2930                           struct ieee80211_key_conf *keyconf)
2931{
2932        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2933        struct iwl_mvm_sta *mvm_sta;
2934        u8 sta_id = IWL_MVM_STATION_COUNT;
2935        int ret, i;
2936
2937        lockdep_assert_held(&mvm->mutex);
2938
2939        /* Get the station from the mvm local station table */
2940        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2941
2942        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
2943                      keyconf->keyidx, sta_id);
2944
2945        if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2946            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2947            keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2948                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
2949
2950        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
2951                IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2952                        keyconf->hw_key_idx);
2953                return -ENOENT;
2954        }
2955
2956        /* track which key was deleted last */
2957        for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2958                if (mvm->fw_key_deleted[i] < U8_MAX)
2959                        mvm->fw_key_deleted[i]++;
2960        }
2961        mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
2962
2963        if (!mvm_sta) {
2964                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
2965                return 0;
2966        }
2967
2968        sta_id = mvm_sta->sta_id;
2969
2970        ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
2971        if (ret)
2972                return ret;
2973
2974        /* delete WEP key twice to get rid of (now useless) offset */
2975        if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2976            keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
2977                ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
2978
2979        return ret;
2980}
2981
2982void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
2983                             struct ieee80211_vif *vif,
2984                             struct ieee80211_key_conf *keyconf,
2985                             struct ieee80211_sta *sta, u32 iv32,
2986                             u16 *phase1key)
2987{
2988        struct iwl_mvm_sta *mvm_sta;
2989        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
2990
2991        rcu_read_lock();
2992
2993        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
2994        if (WARN_ON_ONCE(!mvm_sta))
2995                goto unlock;
2996        iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
2997                             iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
2998
2999 unlock:
3000        rcu_read_unlock();
3001}
3002
3003void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3004                                struct ieee80211_sta *sta)
3005{
3006        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3007        struct iwl_mvm_add_sta_cmd cmd = {
3008                .add_modify = STA_MODE_MODIFY,
3009                .sta_id = mvmsta->sta_id,
3010                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3011                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3012        };
3013        int ret;
3014
3015        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3016                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3017        if (ret)
3018                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3019}
3020
3021void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3022                                       struct ieee80211_sta *sta,
3023                                       enum ieee80211_frame_release_type reason,
3024                                       u16 cnt, u16 tids, bool more_data,
3025                                       bool agg)
3026{
3027        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3028        struct iwl_mvm_add_sta_cmd cmd = {
3029                .add_modify = STA_MODE_MODIFY,
3030                .sta_id = mvmsta->sta_id,
3031                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3032                .sleep_tx_count = cpu_to_le16(cnt),
3033                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3034        };
3035        int tid, ret;
3036        unsigned long _tids = tids;
3037
3038        /* convert TIDs to ACs - we don't support TSPEC so that's OK
3039         * Note that this field is reserved and unused by firmware not
3040         * supporting GO uAPSD, so it's safe to always do this.
3041         */
3042        for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3043                cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3044
3045        /* If we're releasing frames from aggregation queues then check if the
3046         * all queues combined that we're releasing frames from have
3047         *  - more frames than the service period, in which case more_data
3048         *    needs to be set
3049         *  - fewer than 'cnt' frames, in which case we need to adjust the
3050         *    firmware command (but do that unconditionally)
3051         */
3052        if (agg) {
3053                int remaining = cnt;
3054                int sleep_tx_count;
3055
3056                spin_lock_bh(&mvmsta->lock);
3057                for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3058                        struct iwl_mvm_tid_data *tid_data;
3059                        u16 n_queued;
3060
3061                        tid_data = &mvmsta->tid_data[tid];
3062                        if (WARN(tid_data->state != IWL_AGG_ON &&
3063                                 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
3064                                 "TID %d state is %d\n",
3065                                 tid, tid_data->state)) {
3066                                spin_unlock_bh(&mvmsta->lock);
3067                                ieee80211_sta_eosp(sta);
3068                                return;
3069                        }
3070
3071                        n_queued = iwl_mvm_tid_queued(tid_data);
3072                        if (n_queued > remaining) {
3073                                more_data = true;
3074                                remaining = 0;
3075                                break;
3076                        }
3077                        remaining -= n_queued;
3078                }
3079                sleep_tx_count = cnt - remaining;
3080                if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3081                        mvmsta->sleep_tx_count = sleep_tx_count;
3082                spin_unlock_bh(&mvmsta->lock);
3083
3084                cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3085                if (WARN_ON(cnt - remaining == 0)) {
3086                        ieee80211_sta_eosp(sta);
3087                        return;
3088                }
3089        }
3090
3091        /* Note: this is ignored by firmware not supporting GO uAPSD */
3092        if (more_data)
3093                cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
3094
3095        if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3096                mvmsta->next_status_eosp = true;
3097                cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
3098        } else {
3099                cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
3100        }
3101
3102        /* block the Tx queues until the FW updated the sleep Tx count */
3103        iwl_trans_block_txq_ptrs(mvm->trans, true);
3104
3105        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3106                                   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3107                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3108        if (ret)
3109                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3110}
3111
3112void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3113                           struct iwl_rx_cmd_buffer *rxb)
3114{
3115        struct iwl_rx_packet *pkt = rxb_addr(rxb);
3116        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3117        struct ieee80211_sta *sta;
3118        u32 sta_id = le32_to_cpu(notif->sta_id);
3119
3120        if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3121                return;
3122
3123        rcu_read_lock();
3124        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3125        if (!IS_ERR_OR_NULL(sta))
3126                ieee80211_sta_eosp(sta);
3127        rcu_read_unlock();
3128}
3129
3130void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3131                                   struct iwl_mvm_sta *mvmsta, bool disable)
3132{
3133        struct iwl_mvm_add_sta_cmd cmd = {
3134                .add_modify = STA_MODE_MODIFY,
3135                .sta_id = mvmsta->sta_id,
3136                .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3137                .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3138                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3139        };
3140        int ret;
3141
3142        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3143                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3144        if (ret)
3145                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3146}
3147
3148void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3149                                      struct ieee80211_sta *sta,
3150                                      bool disable)
3151{
3152        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3153
3154        spin_lock_bh(&mvm_sta->lock);
3155
3156        if (mvm_sta->disable_tx == disable) {
3157                spin_unlock_bh(&mvm_sta->lock);
3158                return;
3159        }
3160
3161        mvm_sta->disable_tx = disable;
3162
3163        /*
3164         * Tell mac80211 to start/stop queuing tx for this station,
3165         * but don't stop queuing if there are still pending frames
3166         * for this station.
3167         */
3168        if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
3169                ieee80211_sta_block_awake(mvm->hw, sta, disable);
3170
3171        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3172
3173        spin_unlock_bh(&mvm_sta->lock);
3174}
3175
3176void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3177                                       struct iwl_mvm_vif *mvmvif,
3178                                       bool disable)
3179{
3180        struct ieee80211_sta *sta;
3181        struct iwl_mvm_sta *mvm_sta;
3182        int i;
3183
3184        lockdep_assert_held(&mvm->mutex);
3185
3186        /* Block/unblock all the stations of the given mvmvif */
3187        for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3188                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3189                                                lockdep_is_held(&mvm->mutex));
3190                if (IS_ERR_OR_NULL(sta))
3191                        continue;
3192
3193                mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3194                if (mvm_sta->mac_id_n_color !=
3195                    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3196                        continue;
3197
3198                iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3199        }
3200}
3201
3202void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3203{
3204        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3205        struct iwl_mvm_sta *mvmsta;
3206
3207        rcu_read_lock();
3208
3209        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3210
3211        if (!WARN_ON(!mvmsta))
3212                iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3213
3214        rcu_read_unlock();
3215}
3216