linux/drivers/net/wireless/iwlegacy/iwl-tx.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
   4 *
   5 * Portions of this file are derived from the ipw3945 project, as well
   6 * as portions of the ieee80211 subsystem header files.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of version 2 of the GNU General Public License as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but WITHOUT
  13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  15 * more details.
  16 *
  17 * You should have received a copy of the GNU General Public License along with
  18 * this program; if not, write to the Free Software Foundation, Inc.,
  19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20 *
  21 * The full GNU General Public License is included in this distribution in the
  22 * file called LICENSE.
  23 *
  24 * Contact Information:
  25 *  Intel Linux Wireless <ilw@linux.intel.com>
  26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27 *
  28 *****************************************************************************/
  29
  30#include <linux/etherdevice.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
  33#include <net/mac80211.h>
  34#include "iwl-eeprom.h"
  35#include "iwl-dev.h"
  36#include "iwl-core.h"
  37#include "iwl-sta.h"
  38#include "iwl-io.h"
  39#include "iwl-helpers.h"
  40
  41/**
  42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
  43 */
  44void
  45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  46{
  47        u32 reg = 0;
  48        int txq_id = txq->q.id;
  49
  50        if (txq->need_update == 0)
  51                return;
  52
  53        /* if we're trying to save power */
  54        if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  55                /* wake up nic if it's powered down ...
  56                 * uCode will wake up, and interrupt us again, so next
  57                 * time we'll skip this part. */
  58                reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  59
  60                if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  61                        IWL_DEBUG_INFO(priv,
  62                                        "Tx queue %d requesting wakeup,"
  63                                        " GP1 = 0x%x\n", txq_id, reg);
  64                        iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
  65                                        CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  66                        return;
  67                }
  68
  69                iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
  70                                txq->q.write_ptr | (txq_id << 8));
  71
  72                /*
  73                 * else not in power-save mode,
  74                 * uCode will never sleep when we're
  75                 * trying to tx (during RFKILL, we're not trying to tx).
  76                 */
  77        } else
  78                iwl_write32(priv, HBUS_TARG_WRPTR,
  79                            txq->q.write_ptr | (txq_id << 8));
  80        txq->need_update = 0;
  81}
  82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
  83
  84/**
  85 * iwl_legacy_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
  86 */
  87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  88{
  89        struct iwl_tx_queue *txq = &priv->txq[txq_id];
  90        struct iwl_queue *q = &txq->q;
  91
  92        if (q->n_bd == 0)
  93                return;
  94
  95        while (q->write_ptr != q->read_ptr) {
  96                priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  97                q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
  98        }
  99}
 100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
 101
 102/**
 103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
 104 * @txq: Transmit queue to deallocate.
 105 *
 106 * Empty queue by removing and destroying all BD's.
 107 * Free all buffers.
 108 * 0-fill, but do not free "txq" descriptor structure.
 109 */
 110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
 111{
 112        struct iwl_tx_queue *txq = &priv->txq[txq_id];
 113        struct device *dev = &priv->pci_dev->dev;
 114        int i;
 115
 116        iwl_legacy_tx_queue_unmap(priv, txq_id);
 117
 118        /* De-alloc array of command/tx buffers */
 119        for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
 120                kfree(txq->cmd[i]);
 121
 122        /* De-alloc circular buffer of TFDs */
 123        if (txq->q.n_bd)
 124                dma_free_coherent(dev, priv->hw_params.tfd_size *
 125                                  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
 126
 127        /* De-alloc array of per-TFD driver data */
 128        kfree(txq->txb);
 129        txq->txb = NULL;
 130
 131        /* deallocate arrays */
 132        kfree(txq->cmd);
 133        kfree(txq->meta);
 134        txq->cmd = NULL;
 135        txq->meta = NULL;
 136
 137        /* 0-fill queue descriptor structure */
 138        memset(txq, 0, sizeof(*txq));
 139}
 140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
 141
 142/**
 143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
 144 */
 145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
 146{
 147        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
 148        struct iwl_queue *q = &txq->q;
 149        bool huge = false;
 150        int i;
 151
 152        if (q->n_bd == 0)
 153                return;
 154
 155        while (q->read_ptr != q->write_ptr) {
 156                /* we have no way to tell if it is a huge cmd ATM */
 157                i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
 158
 159                if (txq->meta[i].flags & CMD_SIZE_HUGE)
 160                        huge = true;
 161                else
 162                        pci_unmap_single(priv->pci_dev,
 163                                         dma_unmap_addr(&txq->meta[i], mapping),
 164                                         dma_unmap_len(&txq->meta[i], len),
 165                                         PCI_DMA_BIDIRECTIONAL);
 166
 167                q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
 168        }
 169
 170        if (huge) {
 171                i = q->n_window;
 172                pci_unmap_single(priv->pci_dev,
 173                                 dma_unmap_addr(&txq->meta[i], mapping),
 174                                 dma_unmap_len(&txq->meta[i], len),
 175                                 PCI_DMA_BIDIRECTIONAL);
 176        }
 177}
 178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
 179
 180/**
 181 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
 182 * @txq: Transmit queue to deallocate.
 183 *
 184 * Empty queue by removing and destroying all BD's.
 185 * Free all buffers.
 186 * 0-fill, but do not free "txq" descriptor structure.
 187 */
 188void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
 189{
 190        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
 191        struct device *dev = &priv->pci_dev->dev;
 192        int i;
 193
 194        iwl_legacy_cmd_queue_unmap(priv);
 195
 196        /* De-alloc array of command/tx buffers */
 197        for (i = 0; i <= TFD_CMD_SLOTS; i++)
 198                kfree(txq->cmd[i]);
 199
 200        /* De-alloc circular buffer of TFDs */
 201        if (txq->q.n_bd)
 202                dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
 203                                  txq->tfds, txq->q.dma_addr);
 204
 205        /* deallocate arrays */
 206        kfree(txq->cmd);
 207        kfree(txq->meta);
 208        txq->cmd = NULL;
 209        txq->meta = NULL;
 210
 211        /* 0-fill queue descriptor structure */
 212        memset(txq, 0, sizeof(*txq));
 213}
 214EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
 215
 216/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 217 * DMA services
 218 *
 219 * Theory of operation
 220 *
 221 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 222 * of buffer descriptors, each of which points to one or more data buffers for
 223 * the device to read from or fill.  Driver and device exchange status of each
 224 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 225 * entries in each circular buffer, to protect against confusing empty and full
 226 * queue states.
 227 *
 228 * The device reads or writes the data in the queues via the device's several
 229 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 230 *
 231 * For Tx queue, there are low mark and high mark limits. If, after queuing
 232 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 234 * Tx queue resumed.
 235 *
 236 * See more detailed info in iwl-4965-hw.h.
 237 ***************************************************/
 238
 239int iwl_legacy_queue_space(const struct iwl_queue *q)
 240{
 241        int s = q->read_ptr - q->write_ptr;
 242
 243        if (q->read_ptr > q->write_ptr)
 244                s -= q->n_bd;
 245
 246        if (s <= 0)
 247                s += q->n_window;
 248        /* keep some reserve to not confuse empty and full situations */
 249        s -= 2;
 250        if (s < 0)
 251                s = 0;
 252        return s;
 253}
 254EXPORT_SYMBOL(iwl_legacy_queue_space);
 255
 256
 257/**
 258 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
 259 */
 260static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
 261                          int count, int slots_num, u32 id)
 262{
 263        q->n_bd = count;
 264        q->n_window = slots_num;
 265        q->id = id;
 266
 267        /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
 268         * and iwl_legacy_queue_dec_wrap are broken. */
 269        BUG_ON(!is_power_of_2(count));
 270
 271        /* slots_num must be power-of-two size, otherwise
 272         * iwl_legacy_get_cmd_index is broken. */
 273        BUG_ON(!is_power_of_2(slots_num));
 274
 275        q->low_mark = q->n_window / 4;
 276        if (q->low_mark < 4)
 277                q->low_mark = 4;
 278
 279        q->high_mark = q->n_window / 8;
 280        if (q->high_mark < 2)
 281                q->high_mark = 2;
 282
 283        q->write_ptr = q->read_ptr = 0;
 284
 285        return 0;
 286}
 287
 288/**
 289 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
 290 */
 291static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
 292                              struct iwl_tx_queue *txq, u32 id)
 293{
 294        struct device *dev = &priv->pci_dev->dev;
 295        size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
 296
 297        /* Driver private data, only for Tx (not command) queues,
 298         * not shared with device. */
 299        if (id != priv->cmd_queue) {
 300                txq->txb = kzalloc(sizeof(txq->txb[0]) *
 301                                   TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
 302                if (!txq->txb) {
 303                        IWL_ERR(priv, "kmalloc for auxiliary BD "
 304                                  "structures failed\n");
 305                        goto error;
 306                }
 307        } else {
 308                txq->txb = NULL;
 309        }
 310
 311        /* Circular buffer of transmit frame descriptors (TFDs),
 312         * shared with device */
 313        txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
 314                                       GFP_KERNEL);
 315        if (!txq->tfds) {
 316                IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
 317                goto error;
 318        }
 319        txq->q.id = id;
 320
 321        return 0;
 322
 323 error:
 324        kfree(txq->txb);
 325        txq->txb = NULL;
 326
 327        return -ENOMEM;
 328}
 329
 330/**
 331 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
 332 */
 333int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 334                      int slots_num, u32 txq_id)
 335{
 336        int i, len;
 337        int ret;
 338        int actual_slots = slots_num;
 339
 340        /*
 341         * Alloc buffer array for commands (Tx or other types of commands).
 342         * For the command queue (#4/#9), allocate command space + one big
 343         * command for scan, since scan command is very huge; the system will
 344         * not have two scans at the same time, so only one is needed.
 345         * For normal Tx queues (all other queues), no super-size command
 346         * space is needed.
 347         */
 348        if (txq_id == priv->cmd_queue)
 349                actual_slots++;
 350
 351        txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
 352                            GFP_KERNEL);
 353        txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
 354                           GFP_KERNEL);
 355
 356        if (!txq->meta || !txq->cmd)
 357                goto out_free_arrays;
 358
 359        len = sizeof(struct iwl_device_cmd);
 360        for (i = 0; i < actual_slots; i++) {
 361                /* only happens for cmd queue */
 362                if (i == slots_num)
 363                        len = IWL_MAX_CMD_SIZE;
 364
 365                txq->cmd[i] = kmalloc(len, GFP_KERNEL);
 366                if (!txq->cmd[i])
 367                        goto err;
 368        }
 369
 370        /* Alloc driver data array and TFD circular buffer */
 371        ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
 372        if (ret)
 373                goto err;
 374
 375        txq->need_update = 0;
 376
 377        /*
 378         * For the default queues 0-3, set up the swq_id
 379         * already -- all others need to get one later
 380         * (if they need one at all).
 381         */
 382        if (txq_id < 4)
 383                iwl_legacy_set_swq_id(txq, txq_id, txq_id);
 384
 385        /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
 386         * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
 387        BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
 388
 389        /* Initialize queue's high/low-water marks, and head/tail indexes */
 390        iwl_legacy_queue_init(priv, &txq->q,
 391                                TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
 392
 393        /* Tell device where to find queue */
 394        priv->cfg->ops->lib->txq_init(priv, txq);
 395
 396        return 0;
 397err:
 398        for (i = 0; i < actual_slots; i++)
 399                kfree(txq->cmd[i]);
 400out_free_arrays:
 401        kfree(txq->meta);
 402        kfree(txq->cmd);
 403
 404        return -ENOMEM;
 405}
 406EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
 407
 408void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
 409                        int slots_num, u32 txq_id)
 410{
 411        int actual_slots = slots_num;
 412
 413        if (txq_id == priv->cmd_queue)
 414                actual_slots++;
 415
 416        memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
 417
 418        txq->need_update = 0;
 419
 420        /* Initialize queue's high/low-water marks, and head/tail indexes */
 421        iwl_legacy_queue_init(priv, &txq->q,
 422                                TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
 423
 424        /* Tell device where to find queue */
 425        priv->cfg->ops->lib->txq_init(priv, txq);
 426}
 427EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
 428
 429/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 430
 431/**
 432 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
 433 * @priv: device private data point
 434 * @cmd: a point to the ucode command structure
 435 *
 436 * The function returns < 0 values to indicate the operation is
 437 * failed. On success, it turns the index (> 0) of command in the
 438 * command queue.
 439 */
 440int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
 441{
 442        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
 443        struct iwl_queue *q = &txq->q;
 444        struct iwl_device_cmd *out_cmd;
 445        struct iwl_cmd_meta *out_meta;
 446        dma_addr_t phys_addr;
 447        unsigned long flags;
 448        int len;
 449        u32 idx;
 450        u16 fix_size;
 451
 452        cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
 453        fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
 454
 455        /* If any of the command structures end up being larger than
 456         * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
 457         * we will need to increase the size of the TFD entries
 458         * Also, check to see if command buffer should not exceed the size
 459         * of device_cmd and max_cmd_size. */
 460        BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
 461               !(cmd->flags & CMD_SIZE_HUGE));
 462        BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
 463
 464        if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
 465                IWL_WARN(priv, "Not sending command - %s KILL\n",
 466                         iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
 467                return -EIO;
 468        }
 469
 470        if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
 471                IWL_ERR(priv, "No space in command queue\n");
 472                IWL_ERR(priv, "Restarting adapter due to queue full\n");
 473                queue_work(priv->workqueue, &priv->restart);
 474                return -ENOSPC;
 475        }
 476
 477        spin_lock_irqsave(&priv->hcmd_lock, flags);
 478
 479        /* If this is a huge cmd, mark the huge flag also on the meta.flags
 480         * of the _original_ cmd. This is used for DMA mapping clean up.
 481         */
 482        if (cmd->flags & CMD_SIZE_HUGE) {
 483                idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
 484                txq->meta[idx].flags = CMD_SIZE_HUGE;
 485        }
 486
 487        idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
 488        out_cmd = txq->cmd[idx];
 489        out_meta = &txq->meta[idx];
 490
 491        memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
 492        out_meta->flags = cmd->flags;
 493        if (cmd->flags & CMD_WANT_SKB)
 494                out_meta->source = cmd;
 495        if (cmd->flags & CMD_ASYNC)
 496                out_meta->callback = cmd->callback;
 497
 498        out_cmd->hdr.cmd = cmd->id;
 499        memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
 500
 501        /* At this point, the out_cmd now has all of the incoming cmd
 502         * information */
 503
 504        out_cmd->hdr.flags = 0;
 505        out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
 506                        INDEX_TO_SEQ(q->write_ptr));
 507        if (cmd->flags & CMD_SIZE_HUGE)
 508                out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
 509        len = sizeof(struct iwl_device_cmd);
 510        if (idx == TFD_CMD_SLOTS)
 511                len = IWL_MAX_CMD_SIZE;
 512
 513#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
 514        switch (out_cmd->hdr.cmd) {
 515        case REPLY_TX_LINK_QUALITY_CMD:
 516        case SENSITIVITY_CMD:
 517                IWL_DEBUG_HC_DUMP(priv,
 518                                "Sending command %s (#%x), seq: 0x%04X, "
 519                                "%d bytes at %d[%d]:%d\n",
 520                                iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
 521                                out_cmd->hdr.cmd,
 522                                le16_to_cpu(out_cmd->hdr.sequence), fix_size,
 523                                q->write_ptr, idx, priv->cmd_queue);
 524                break;
 525        default:
 526                IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
 527                                "%d bytes at %d[%d]:%d\n",
 528                                iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
 529                                out_cmd->hdr.cmd,
 530                                le16_to_cpu(out_cmd->hdr.sequence), fix_size,
 531                                q->write_ptr, idx, priv->cmd_queue);
 532        }
 533#endif
 534        txq->need_update = 1;
 535
 536        if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
 537                /* Set up entry in queue's byte count circular buffer */
 538                priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
 539
 540        phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
 541                                   fix_size, PCI_DMA_BIDIRECTIONAL);
 542        dma_unmap_addr_set(out_meta, mapping, phys_addr);
 543        dma_unmap_len_set(out_meta, len, fix_size);
 544
 545        trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
 546                                                fix_size, cmd->flags);
 547
 548        priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
 549                                                   phys_addr, fix_size, 1,
 550                                                   U32_PAD(cmd->len));
 551
 552        /* Increment and update queue's write index */
 553        q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
 554        iwl_legacy_txq_update_write_ptr(priv, txq);
 555
 556        spin_unlock_irqrestore(&priv->hcmd_lock, flags);
 557        return idx;
 558}
 559
 560/**
 561 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 562 *
 563 * When FW advances 'R' index, all entries between old and new 'R' index
 564 * need to be reclaimed. As result, some free space forms.  If there is
 565 * enough free space (> low mark), wake the stack that feeds us.
 566 */
 567static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
 568                                   int idx, int cmd_idx)
 569{
 570        struct iwl_tx_queue *txq = &priv->txq[txq_id];
 571        struct iwl_queue *q = &txq->q;
 572        int nfreed = 0;
 573
 574        if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
 575                IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
 576                          "is out of range [0-%d] %d %d.\n", txq_id,
 577                          idx, q->n_bd, q->write_ptr, q->read_ptr);
 578                return;
 579        }
 580
 581        for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
 582             q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 583
 584                if (nfreed++ > 0) {
 585                        IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
 586                                        q->write_ptr, q->read_ptr);
 587                        queue_work(priv->workqueue, &priv->restart);
 588                }
 589
 590        }
 591}
 592
 593/**
 594 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 595 * @rxb: Rx buffer to reclaim
 596 *
 597 * If an Rx buffer has an async callback associated with it the callback
 598 * will be executed.  The attached skb (if present) will only be freed
 599 * if the callback returns 1
 600 */
 601void
 602iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
 603{
 604        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 605        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
 606        int txq_id = SEQ_TO_QUEUE(sequence);
 607        int index = SEQ_TO_INDEX(sequence);
 608        int cmd_index;
 609        bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
 610        struct iwl_device_cmd *cmd;
 611        struct iwl_cmd_meta *meta;
 612        struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
 613
 614        /* If a Tx command is being handled and it isn't in the actual
 615         * command queue then there a command routing bug has been introduced
 616         * in the queue management code. */
 617        if (WARN(txq_id != priv->cmd_queue,
 618                 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
 619                  txq_id, priv->cmd_queue, sequence,
 620                  priv->txq[priv->cmd_queue].q.read_ptr,
 621                  priv->txq[priv->cmd_queue].q.write_ptr)) {
 622                iwl_print_hex_error(priv, pkt, 32);
 623                return;
 624        }
 625
 626        /* If this is a huge cmd, clear the huge flag on the meta.flags
 627         * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
 628         * the DMA buffer for the scan (huge) command.
 629         */
 630        if (huge) {
 631                cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
 632                txq->meta[cmd_index].flags = 0;
 633        }
 634        cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
 635        cmd = txq->cmd[cmd_index];
 636        meta = &txq->meta[cmd_index];
 637
 638        pci_unmap_single(priv->pci_dev,
 639                         dma_unmap_addr(meta, mapping),
 640                         dma_unmap_len(meta, len),
 641                         PCI_DMA_BIDIRECTIONAL);
 642
 643        /* Input error checking is done when commands are added to queue. */
 644        if (meta->flags & CMD_WANT_SKB) {
 645                meta->source->reply_page = (unsigned long)rxb_addr(rxb);
 646                rxb->page = NULL;
 647        } else if (meta->callback)
 648                meta->callback(priv, cmd, pkt);
 649
 650        iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
 651
 652        if (!(meta->flags & CMD_ASYNC)) {
 653                clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
 654                IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
 655                               iwl_legacy_get_cmd_string(cmd->hdr.cmd));
 656                wake_up_interruptible(&priv->wait_command_queue);
 657        }
 658        meta->flags = 0;
 659}
 660EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
 661