linux/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2017 Intel Deutschland GmbH
   9 * Copyright(c) 2018 - 2019 Intel Corporation
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of version 2 of the GNU General Public License as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * BSD LICENSE
  21 *
  22 * Copyright(c) 2017 Intel Deutschland GmbH
  23 * Copyright(c) 2018 - 2019 Intel Corporation
  24 * All rights reserved.
  25 *
  26 * Redistribution and use in source and binary forms, with or without
  27 * modification, are permitted provided that the following conditions
  28 * are met:
  29 *
  30 *  * Redistributions of source code must retain the above copyright
  31 *    notice, this list of conditions and the following disclaimer.
  32 *  * Redistributions in binary form must reproduce the above copyright
  33 *    notice, this list of conditions and the following disclaimer in
  34 *    the documentation and/or other materials provided with the
  35 *    distribution.
  36 *  * Neither the name Intel Corporation nor the names of its
  37 *    contributors may be used to endorse or promote products derived
  38 *    from this software without specific prior written permission.
  39 *
  40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  43 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  44 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  46 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  50 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51 *
  52 *****************************************************************************/
  53#include <linux/pm_runtime.h>
  54#include <net/tso.h>
  55#include <linux/tcp.h>
  56
  57#include "iwl-debug.h"
  58#include "iwl-csr.h"
  59#include "iwl-io.h"
  60#include "internal.h"
  61#include "fw/api/tx.h"
  62
  63 /*
  64 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
  65 */
  66void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
  67{
  68        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  69        int txq_id;
  70
  71        /*
  72         * This function can be called before the op_mode disabled the
  73         * queues. This happens when we have an rfkill interrupt.
  74         * Since we stop Tx altogether - mark the queues as stopped.
  75         */
  76        memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  77        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  78
  79        /* Unmap DMA from host system and free skb's */
  80        for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
  81                if (!trans_pcie->txq[txq_id])
  82                        continue;
  83                iwl_pcie_gen2_txq_unmap(trans, txq_id);
  84        }
  85}
  86
  87/*
  88 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
  89 */
  90void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
  91                                   struct iwl_txq *txq, u16 byte_cnt,
  92                                   int num_tbs)
  93{
  94        struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
  95        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  96        struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
  97        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  98        u8 filled_tfd_size, num_fetch_chunks;
  99        u16 len = byte_cnt;
 100        __le16 bc_ent;
 101
 102        if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
 103                return;
 104
 105        filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
 106                                   num_tbs * sizeof(struct iwl_tfh_tb);
 107        /*
 108         * filled_tfd_size contains the number of filled bytes in the TFD.
 109         * Dividing it by 64 will give the number of chunks to fetch
 110         * to SRAM- 0 for one chunk, 1 for 2 and so on.
 111         * If, for example, TFD contains only 3 TBs then 32 bytes
 112         * of the TFD are used, and only one chunk of 64 bytes should
 113         * be fetched
 114         */
 115        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 116
 117        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
 118                /* Starting from 22560, the HW expects bytes */
 119                WARN_ON(trans_pcie->bc_table_dword);
 120                WARN_ON(len > 0x3FFF);
 121                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
 122                scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
 123        } else {
 124                /* Until 22560, the HW expects DW */
 125                WARN_ON(!trans_pcie->bc_table_dword);
 126                len = DIV_ROUND_UP(len, 4);
 127                WARN_ON(len > 0xFFF);
 128                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
 129                scd_bc_tbl->tfd_offset[idx] = bc_ent;
 130        }
 131}
 132
 133/*
 134 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
 135 */
 136void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
 137                                  struct iwl_txq *txq)
 138{
 139        lockdep_assert_held(&txq->lock);
 140
 141        IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
 142
 143        /*
 144         * if not in power-save mode, uCode will never sleep when we're
 145         * trying to tx (during RFKILL, we're not trying to tx).
 146         */
 147        iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
 148}
 149
 150static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
 151                                    struct iwl_tfh_tfd *tfd)
 152{
 153        return le16_to_cpu(tfd->num_tbs) & 0x1f;
 154}
 155
 156static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
 157                                    struct iwl_cmd_meta *meta,
 158                                    struct iwl_tfh_tfd *tfd)
 159{
 160        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 161        int i, num_tbs;
 162
 163        /* Sanity check on number of chunks */
 164        num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
 165
 166        if (num_tbs > trans_pcie->max_tbs) {
 167                IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
 168                return;
 169        }
 170
 171        /* first TB is never freed - it's the bidirectional DMA data */
 172        for (i = 1; i < num_tbs; i++) {
 173                if (meta->tbs & BIT(i))
 174                        dma_unmap_page(trans->dev,
 175                                       le64_to_cpu(tfd->tbs[i].addr),
 176                                       le16_to_cpu(tfd->tbs[i].tb_len),
 177                                       DMA_TO_DEVICE);
 178                else
 179                        dma_unmap_single(trans->dev,
 180                                         le64_to_cpu(tfd->tbs[i].addr),
 181                                         le16_to_cpu(tfd->tbs[i].tb_len),
 182                                         DMA_TO_DEVICE);
 183        }
 184
 185        tfd->num_tbs = 0;
 186}
 187
 188static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 189{
 190        /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
 191         * idx is bounded by n_window
 192         */
 193        int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
 194
 195        lockdep_assert_held(&txq->lock);
 196
 197        iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
 198                                iwl_pcie_get_tfd(trans, txq, idx));
 199
 200        /* free SKB */
 201        if (txq->entries) {
 202                struct sk_buff *skb;
 203
 204                skb = txq->entries[idx].skb;
 205
 206                /* Can be called from irqs-disabled context
 207                 * If skb is not NULL, it means that the whole queue is being
 208                 * freed and that the queue is not empty - free the skb
 209                 */
 210                if (skb) {
 211                        iwl_op_mode_free_skb(trans->op_mode, skb);
 212                        txq->entries[idx].skb = NULL;
 213                }
 214        }
 215}
 216
 217static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
 218                                struct iwl_tfh_tfd *tfd, dma_addr_t addr,
 219                                u16 len)
 220{
 221        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 222        int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
 223        struct iwl_tfh_tb *tb;
 224
 225        if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
 226                return -EINVAL;
 227        tb = &tfd->tbs[idx];
 228
 229        /* Each TFD can point to a maximum max_tbs Tx buffers */
 230        if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
 231                IWL_ERR(trans, "Error can not send more than %d chunks\n",
 232                        trans_pcie->max_tbs);
 233                return -EINVAL;
 234        }
 235
 236        put_unaligned_le64(addr, &tb->addr);
 237        tb->tb_len = cpu_to_le16(len);
 238
 239        tfd->num_tbs = cpu_to_le16(idx + 1);
 240
 241        return idx;
 242}
 243
 244static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 245                                     struct sk_buff *skb,
 246                                     struct iwl_tfh_tfd *tfd, int start_len,
 247                                     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
 248{
 249#ifdef CONFIG_INET
 250        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 251        struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
 252        struct ieee80211_hdr *hdr = (void *)skb->data;
 253        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
 254        unsigned int mss = skb_shinfo(skb)->gso_size;
 255        u16 length, iv_len, amsdu_pad;
 256        u8 *start_hdr;
 257        struct iwl_tso_hdr_page *hdr_page;
 258        struct page **page_ptr;
 259        struct tso_t tso;
 260
 261        /* if the packet is protected, then it must be CCMP or GCMP */
 262        iv_len = ieee80211_has_protected(hdr->frame_control) ?
 263                IEEE80211_CCMP_HDR_LEN : 0;
 264
 265        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
 266                             &dev_cmd->hdr, start_len, 0);
 267
 268        ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
 269        snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
 270        total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
 271        amsdu_pad = 0;
 272
 273        /* total amount of header we may need for this A-MSDU */
 274        hdr_room = DIV_ROUND_UP(total_len, mss) *
 275                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
 276
 277        /* Our device supports 9 segments at most, it will fit in 1 page */
 278        hdr_page = get_page_hdr(trans, hdr_room);
 279        if (!hdr_page)
 280                return -ENOMEM;
 281
 282        get_page(hdr_page->page);
 283        start_hdr = hdr_page->pos;
 284        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
 285        *page_ptr = hdr_page->page;
 286        memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
 287        hdr_page->pos += iv_len;
 288
 289        /*
 290         * Pull the ieee80211 header + IV to be able to use TSO core,
 291         * we will restore it for the tx_status flow.
 292         */
 293        skb_pull(skb, hdr_len + iv_len);
 294
 295        /*
 296         * Remove the length of all the headers that we don't actually
 297         * have in the MPDU by themselves, but that we duplicate into
 298         * all the different MSDUs inside the A-MSDU.
 299         */
 300        le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
 301
 302        tso_start(skb, &tso);
 303
 304        while (total_len) {
 305                /* this is the data left for this subframe */
 306                unsigned int data_left = min_t(unsigned int, mss, total_len);
 307                struct sk_buff *csum_skb = NULL;
 308                unsigned int tb_len;
 309                dma_addr_t tb_phys;
 310                u8 *subf_hdrs_start = hdr_page->pos;
 311
 312                total_len -= data_left;
 313
 314                memset(hdr_page->pos, 0, amsdu_pad);
 315                hdr_page->pos += amsdu_pad;
 316                amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
 317                                  data_left)) & 0x3;
 318                ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
 319                hdr_page->pos += ETH_ALEN;
 320                ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
 321                hdr_page->pos += ETH_ALEN;
 322
 323                length = snap_ip_tcp_hdrlen + data_left;
 324                *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
 325                hdr_page->pos += sizeof(length);
 326
 327                /*
 328                 * This will copy the SNAP as well which will be considered
 329                 * as MAC header.
 330                 */
 331                tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
 332
 333                hdr_page->pos += snap_ip_tcp_hdrlen;
 334
 335                tb_len = hdr_page->pos - start_hdr;
 336                tb_phys = dma_map_single(trans->dev, start_hdr,
 337                                         tb_len, DMA_TO_DEVICE);
 338                if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
 339                        dev_kfree_skb(csum_skb);
 340                        goto out_err;
 341                }
 342                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
 343                trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
 344                /* add this subframe's headers' length to the tx_cmd */
 345                le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
 346
 347                /* prepare the start_hdr for the next subframe */
 348                start_hdr = hdr_page->pos;
 349
 350                /* put the payload */
 351                while (data_left) {
 352                        tb_len = min_t(unsigned int, tso.size, data_left);
 353                        tb_phys = dma_map_single(trans->dev, tso.data,
 354                                                 tb_len, DMA_TO_DEVICE);
 355                        if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
 356                                dev_kfree_skb(csum_skb);
 357                                goto out_err;
 358                        }
 359                        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
 360                        trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
 361                                                tb_len);
 362
 363                        data_left -= tb_len;
 364                        tso_build_data(skb, &tso, tb_len);
 365                }
 366        }
 367
 368        /* re -add the WiFi header and IV */
 369        skb_push(skb, hdr_len + iv_len);
 370
 371        return 0;
 372
 373out_err:
 374#endif
 375        return -EINVAL;
 376}
 377
 378static struct
 379iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
 380                                          struct iwl_txq *txq,
 381                                          struct iwl_device_cmd *dev_cmd,
 382                                          struct sk_buff *skb,
 383                                          struct iwl_cmd_meta *out_meta,
 384                                          int hdr_len,
 385                                          int tx_cmd_len)
 386{
 387        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 388        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 389        dma_addr_t tb_phys;
 390        int len;
 391        void *tb1_addr;
 392
 393        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 394
 395        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 396
 397        /*
 398         * The second TB (tb1) points to the remainder of the TX command
 399         * and the 802.11 header - dword aligned size
 400         * (This calculation modifies the TX command, so do it before the
 401         * setup of the first TB)
 402         */
 403        len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
 404              IWL_FIRST_TB_SIZE;
 405
 406        /* do not align A-MSDU to dword as the subframe header aligns it */
 407
 408        /* map the data for TB1 */
 409        tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
 410        tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
 411        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 412                goto out_err;
 413        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
 414
 415        if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
 416                                      len + IWL_FIRST_TB_SIZE,
 417                                      hdr_len, dev_cmd))
 418                goto out_err;
 419
 420        /* building the A-MSDU might have changed this data, memcpy it now */
 421        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 422        return tfd;
 423
 424out_err:
 425        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 426        return NULL;
 427}
 428
 429static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
 430                                      struct sk_buff *skb,
 431                                      struct iwl_tfh_tfd *tfd,
 432                                      struct iwl_cmd_meta *out_meta)
 433{
 434        int i;
 435
 436        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 437                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 438                dma_addr_t tb_phys;
 439                int tb_idx;
 440
 441                if (!skb_frag_size(frag))
 442                        continue;
 443
 444                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
 445                                           skb_frag_size(frag), DMA_TO_DEVICE);
 446
 447                if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 448                        return -ENOMEM;
 449                tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
 450                                              skb_frag_size(frag));
 451                trace_iwlwifi_dev_tx_tb(trans->dev, skb,
 452                                        skb_frag_address(frag),
 453                                        skb_frag_size(frag));
 454                if (tb_idx < 0)
 455                        return tb_idx;
 456
 457                out_meta->tbs |= BIT(tb_idx);
 458        }
 459
 460        return 0;
 461}
 462
 463static struct
 464iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
 465                                    struct iwl_txq *txq,
 466                                    struct iwl_device_cmd *dev_cmd,
 467                                    struct sk_buff *skb,
 468                                    struct iwl_cmd_meta *out_meta,
 469                                    int hdr_len,
 470                                    int tx_cmd_len,
 471                                    bool pad)
 472{
 473        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 474        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 475        dma_addr_t tb_phys;
 476        int len, tb1_len, tb2_len;
 477        void *tb1_addr;
 478
 479        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 480
 481        /* The first TB points to bi-directional DMA data */
 482        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 483
 484        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 485
 486        /*
 487         * The second TB (tb1) points to the remainder of the TX command
 488         * and the 802.11 header - dword aligned size
 489         * (This calculation modifies the TX command, so do it before the
 490         * setup of the first TB)
 491         */
 492        len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
 493              IWL_FIRST_TB_SIZE;
 494
 495        if (pad)
 496                tb1_len = ALIGN(len, 4);
 497        else
 498                tb1_len = len;
 499
 500        /* map the data for TB1 */
 501        tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
 502        tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
 503        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 504                goto out_err;
 505        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
 506        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
 507                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
 508
 509        /* set up TFD's third entry to point to remainder of skb's head */
 510        tb2_len = skb_headlen(skb) - hdr_len;
 511
 512        if (tb2_len > 0) {
 513                tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
 514                                         tb2_len, DMA_TO_DEVICE);
 515                if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 516                        goto out_err;
 517                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
 518                trace_iwlwifi_dev_tx_tb(trans->dev, skb,
 519                                        skb->data + hdr_len,
 520                                        tb2_len);
 521        }
 522
 523        if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
 524                goto out_err;
 525
 526        return tfd;
 527
 528out_err:
 529        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 530        return NULL;
 531}
 532
 533static
 534struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 535                                            struct iwl_txq *txq,
 536                                            struct iwl_device_cmd *dev_cmd,
 537                                            struct sk_buff *skb,
 538                                            struct iwl_cmd_meta *out_meta)
 539{
 540        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 541        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 542        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 543        int len, hdr_len;
 544        bool amsdu;
 545
 546        /* There must be data left over for TB1 or this code must be changed */
 547        BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
 548
 549        memset(tfd, 0, sizeof(*tfd));
 550
 551        if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
 552                len = sizeof(struct iwl_tx_cmd_gen2);
 553        else
 554                len = sizeof(struct iwl_tx_cmd_gen3);
 555
 556        amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
 557                        (*ieee80211_get_qos_ctl(hdr) &
 558                         IEEE80211_QOS_CTL_A_MSDU_PRESENT);
 559
 560        hdr_len = ieee80211_hdrlen(hdr->frame_control);
 561
 562        /*
 563         * Only build A-MSDUs here if doing so by GSO, otherwise it may be
 564         * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
 565         * built in the higher layers already.
 566         */
 567        if (amsdu && skb_shinfo(skb)->gso_size)
 568                return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
 569                                                    out_meta, hdr_len, len);
 570
 571        return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
 572                                      hdr_len, len, !amsdu);
 573}
 574
 575int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 576                           struct iwl_device_cmd *dev_cmd, int txq_id)
 577{
 578        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 579        struct iwl_cmd_meta *out_meta;
 580        struct iwl_txq *txq = trans_pcie->txq[txq_id];
 581        u16 cmd_len;
 582        int idx;
 583        void *tfd;
 584
 585        if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
 586                      "TX on unused queue %d\n", txq_id))
 587                return -EINVAL;
 588
 589        if (skb_is_nonlinear(skb) &&
 590            skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
 591            __skb_linearize(skb))
 592                return -ENOMEM;
 593
 594        spin_lock(&txq->lock);
 595
 596        if (iwl_queue_space(trans, txq) < txq->high_mark) {
 597                iwl_stop_queue(trans, txq);
 598
 599                /* don't put the packet on the ring, if there is no room */
 600                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
 601                        struct iwl_device_cmd **dev_cmd_ptr;
 602
 603                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
 604                                               trans_pcie->dev_cmd_offs);
 605
 606                        *dev_cmd_ptr = dev_cmd;
 607                        __skb_queue_tail(&txq->overflow_q, skb);
 608                        spin_unlock(&txq->lock);
 609                        return 0;
 610                }
 611        }
 612
 613        idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 614
 615        /* Set up driver data for this TFD */
 616        txq->entries[idx].skb = skb;
 617        txq->entries[idx].cmd = dev_cmd;
 618
 619        dev_cmd->hdr.sequence =
 620                cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
 621                            INDEX_TO_SEQ(idx)));
 622
 623        /* Set up first empty entry in queue's array of Tx/cmd buffers */
 624        out_meta = &txq->entries[idx].meta;
 625        out_meta->flags = 0;
 626
 627        tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
 628        if (!tfd) {
 629                spin_unlock(&txq->lock);
 630                return -1;
 631        }
 632
 633        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
 634                struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
 635                        (void *)dev_cmd->payload;
 636
 637                cmd_len = le16_to_cpu(tx_cmd_gen3->len);
 638        } else {
 639                struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
 640                        (void *)dev_cmd->payload;
 641
 642                cmd_len = le16_to_cpu(tx_cmd_gen2->len);
 643        }
 644
 645        /* Set up entry for this TFD in Tx byte-count array */
 646        iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
 647                                      iwl_pcie_gen2_get_num_tbs(trans, tfd));
 648
 649        /* start timer if queue currently empty */
 650        if (txq->read_ptr == txq->write_ptr) {
 651                if (txq->wd_timeout)
 652                        mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 653                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
 654                iwl_trans_ref(trans);
 655        }
 656
 657        /* Tell device the write index *just past* this latest filled TFD */
 658        txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 659        iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 660        /*
 661         * At this point the frame is "transmitted" successfully
 662         * and we will get a TX status notification eventually.
 663         */
 664        spin_unlock(&txq->lock);
 665        return 0;
 666}
 667
 668/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 669
 670/*
 671 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
 672 * @priv: device private data point
 673 * @cmd: a pointer to the ucode command structure
 674 *
 675 * The function returns < 0 values to indicate the operation
 676 * failed. On success, it returns the index (>= 0) of command in the
 677 * command queue.
 678 */
 679static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
 680                                      struct iwl_host_cmd *cmd)
 681{
 682        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 683        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
 684        struct iwl_device_cmd *out_cmd;
 685        struct iwl_cmd_meta *out_meta;
 686        unsigned long flags;
 687        void *dup_buf = NULL;
 688        dma_addr_t phys_addr;
 689        int i, cmd_pos, idx;
 690        u16 copy_size, cmd_size, tb0_size;
 691        bool had_nocopy = false;
 692        u8 group_id = iwl_cmd_groupid(cmd->id);
 693        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
 694        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 695        struct iwl_tfh_tfd *tfd;
 696
 697        copy_size = sizeof(struct iwl_cmd_header_wide);
 698        cmd_size = sizeof(struct iwl_cmd_header_wide);
 699
 700        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 701                cmddata[i] = cmd->data[i];
 702                cmdlen[i] = cmd->len[i];
 703
 704                if (!cmd->len[i])
 705                        continue;
 706
 707                /* need at least IWL_FIRST_TB_SIZE copied */
 708                if (copy_size < IWL_FIRST_TB_SIZE) {
 709                        int copy = IWL_FIRST_TB_SIZE - copy_size;
 710
 711                        if (copy > cmdlen[i])
 712                                copy = cmdlen[i];
 713                        cmdlen[i] -= copy;
 714                        cmddata[i] += copy;
 715                        copy_size += copy;
 716                }
 717
 718                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
 719                        had_nocopy = true;
 720                        if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
 721                                idx = -EINVAL;
 722                                goto free_dup_buf;
 723                        }
 724                } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
 725                        /*
 726                         * This is also a chunk that isn't copied
 727                         * to the static buffer so set had_nocopy.
 728                         */
 729                        had_nocopy = true;
 730
 731                        /* only allowed once */
 732                        if (WARN_ON(dup_buf)) {
 733                                idx = -EINVAL;
 734                                goto free_dup_buf;
 735                        }
 736
 737                        dup_buf = kmemdup(cmddata[i], cmdlen[i],
 738                                          GFP_ATOMIC);
 739                        if (!dup_buf)
 740                                return -ENOMEM;
 741                } else {
 742                        /* NOCOPY must not be followed by normal! */
 743                        if (WARN_ON(had_nocopy)) {
 744                                idx = -EINVAL;
 745                                goto free_dup_buf;
 746                        }
 747                        copy_size += cmdlen[i];
 748                }
 749                cmd_size += cmd->len[i];
 750        }
 751
 752        /*
 753         * If any of the command structures end up being larger than the
 754         * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
 755         * separate TFDs, then we will need to increase the size of the buffers
 756         */
 757        if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
 758                 "Command %s (%#x) is too large (%d bytes)\n",
 759                 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
 760                idx = -EINVAL;
 761                goto free_dup_buf;
 762        }
 763
 764        spin_lock_bh(&txq->lock);
 765
 766        idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 767        tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
 768        memset(tfd, 0, sizeof(*tfd));
 769
 770        if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
 771                spin_unlock_bh(&txq->lock);
 772
 773                IWL_ERR(trans, "No space in command queue\n");
 774                iwl_op_mode_cmd_queue_full(trans->op_mode);
 775                idx = -ENOSPC;
 776                goto free_dup_buf;
 777        }
 778
 779        out_cmd = txq->entries[idx].cmd;
 780        out_meta = &txq->entries[idx].meta;
 781
 782        /* re-initialize to NULL */
 783        memset(out_meta, 0, sizeof(*out_meta));
 784        if (cmd->flags & CMD_WANT_SKB)
 785                out_meta->source = cmd;
 786
 787        /* set up the header */
 788        out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
 789        out_cmd->hdr_wide.group_id = group_id;
 790        out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
 791        out_cmd->hdr_wide.length =
 792                cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
 793        out_cmd->hdr_wide.reserved = 0;
 794        out_cmd->hdr_wide.sequence =
 795                cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
 796                                         INDEX_TO_SEQ(txq->write_ptr));
 797
 798        cmd_pos = sizeof(struct iwl_cmd_header_wide);
 799        copy_size = sizeof(struct iwl_cmd_header_wide);
 800
 801        /* and copy the data that needs to be copied */
 802        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 803                int copy;
 804
 805                if (!cmd->len[i])
 806                        continue;
 807
 808                /* copy everything if not nocopy/dup */
 809                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
 810                                           IWL_HCMD_DFL_DUP))) {
 811                        copy = cmd->len[i];
 812
 813                        memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
 814                        cmd_pos += copy;
 815                        copy_size += copy;
 816                        continue;
 817                }
 818
 819                /*
 820                 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
 821                 * in total (for bi-directional DMA), but copy up to what
 822                 * we can fit into the payload for debug dump purposes.
 823                 */
 824                copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
 825
 826                memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
 827                cmd_pos += copy;
 828
 829                /* However, treat copy_size the proper way, we need it below */
 830                if (copy_size < IWL_FIRST_TB_SIZE) {
 831                        copy = IWL_FIRST_TB_SIZE - copy_size;
 832
 833                        if (copy > cmd->len[i])
 834                                copy = cmd->len[i];
 835                        copy_size += copy;
 836                }
 837        }
 838
 839        IWL_DEBUG_HC(trans,
 840                     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
 841                     iwl_get_cmd_string(trans, cmd->id), group_id,
 842                     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
 843                     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
 844
 845        /* start the TFD with the minimum copy bytes */
 846        tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
 847        memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
 848        iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
 849                             tb0_size);
 850
 851        /* map first command fragment, if any remains */
 852        if (copy_size > tb0_size) {
 853                phys_addr = dma_map_single(trans->dev,
 854                                           (u8 *)out_cmd + tb0_size,
 855                                           copy_size - tb0_size,
 856                                           DMA_TO_DEVICE);
 857                if (dma_mapping_error(trans->dev, phys_addr)) {
 858                        idx = -ENOMEM;
 859                        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 860                        goto out;
 861                }
 862                iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
 863                                     copy_size - tb0_size);
 864        }
 865
 866        /* map the remaining (adjusted) nocopy/dup fragments */
 867        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 868                const void *data = cmddata[i];
 869
 870                if (!cmdlen[i])
 871                        continue;
 872                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
 873                                           IWL_HCMD_DFL_DUP)))
 874                        continue;
 875                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
 876                        data = dup_buf;
 877                phys_addr = dma_map_single(trans->dev, (void *)data,
 878                                           cmdlen[i], DMA_TO_DEVICE);
 879                if (dma_mapping_error(trans->dev, phys_addr)) {
 880                        idx = -ENOMEM;
 881                        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 882                        goto out;
 883                }
 884                iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
 885        }
 886
 887        BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
 888        out_meta->flags = cmd->flags;
 889        if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 890                kzfree(txq->entries[idx].free_buf);
 891        txq->entries[idx].free_buf = dup_buf;
 892
 893        trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 894
 895        /* start timer if queue currently empty */
 896        if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
 897                mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 898
 899        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
 900        if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
 901            !trans_pcie->ref_cmd_in_flight) {
 902                trans_pcie->ref_cmd_in_flight = true;
 903                IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
 904                iwl_trans_ref(trans);
 905        }
 906        /* Increment and update queue's write index */
 907        txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 908        iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 909        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
 910
 911out:
 912        spin_unlock_bh(&txq->lock);
 913free_dup_buf:
 914        if (idx < 0)
 915                kfree(dup_buf);
 916        return idx;
 917}
 918
 919#define HOST_COMPLETE_TIMEOUT   (2 * HZ)
 920
 921static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
 922                                        struct iwl_host_cmd *cmd)
 923{
 924        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 925        const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
 926        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
 927        int cmd_idx;
 928        int ret;
 929
 930        IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
 931
 932        if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
 933                                  &trans->status),
 934                 "Command %s: a command is already active!\n", cmd_str))
 935                return -EIO;
 936
 937        IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
 938
 939        if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
 940                ret = wait_event_timeout(trans_pcie->d0i3_waitq,
 941                                 pm_runtime_active(&trans_pcie->pci_dev->dev),
 942                                 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
 943                if (!ret) {
 944                        IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
 945                        return -ETIMEDOUT;
 946                }
 947        }
 948
 949        cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
 950        if (cmd_idx < 0) {
 951                ret = cmd_idx;
 952                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 953                IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
 954                        cmd_str, ret);
 955                return ret;
 956        }
 957
 958        ret = wait_event_timeout(trans_pcie->wait_command_queue,
 959                                 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
 960                                           &trans->status),
 961                                 HOST_COMPLETE_TIMEOUT);
 962        if (!ret) {
 963                IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
 964                        cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 965
 966                IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
 967                        txq->read_ptr, txq->write_ptr);
 968
 969                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 970                IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
 971                               cmd_str);
 972                ret = -ETIMEDOUT;
 973
 974                iwl_trans_pcie_sync_nmi(trans);
 975                goto cancel;
 976        }
 977
 978        if (test_bit(STATUS_FW_ERROR, &trans->status)) {
 979                IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
 980                dump_stack();
 981                ret = -EIO;
 982                goto cancel;
 983        }
 984
 985        if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
 986            test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
 987                IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
 988                ret = -ERFKILL;
 989                goto cancel;
 990        }
 991
 992        if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
 993                IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
 994                ret = -EIO;
 995                goto cancel;
 996        }
 997
 998        return 0;
 999
1000cancel:
1001        if (cmd->flags & CMD_WANT_SKB) {
1002                /*
1003                 * Cancel the CMD_WANT_SKB flag for the cmd in the
1004                 * TX cmd queue. Otherwise in case the cmd comes
1005                 * in later, it will possibly set an invalid
1006                 * address (cmd->meta.source).
1007                 */
1008                txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1009        }
1010
1011        if (cmd->resp_pkt) {
1012                iwl_free_resp(cmd);
1013                cmd->resp_pkt = NULL;
1014        }
1015
1016        return ret;
1017}
1018
1019int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1020                                  struct iwl_host_cmd *cmd)
1021{
1022        if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1023            test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1024                IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1025                                  cmd->id);
1026                return -ERFKILL;
1027        }
1028
1029        if (cmd->flags & CMD_ASYNC) {
1030                int ret;
1031
1032                /* An asynchronous command can not expect an SKB to be set. */
1033                if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1034                        return -EINVAL;
1035
1036                ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
1037                if (ret < 0) {
1038                        IWL_ERR(trans,
1039                                "Error sending %s: enqueue_hcmd failed: %d\n",
1040                                iwl_get_cmd_string(trans, cmd->id), ret);
1041                        return ret;
1042                }
1043                return 0;
1044        }
1045
1046        return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
1047}
1048
1049/*
1050 * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
1051 */
1052void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
1053{
1054        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1055        struct iwl_txq *txq = trans_pcie->txq[txq_id];
1056
1057        spin_lock_bh(&txq->lock);
1058        while (txq->write_ptr != txq->read_ptr) {
1059                IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
1060                                   txq_id, txq->read_ptr);
1061
1062                if (txq_id != trans_pcie->cmd_queue) {
1063                        int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1064                        struct sk_buff *skb = txq->entries[idx].skb;
1065
1066                        if (WARN_ON_ONCE(!skb))
1067                                continue;
1068
1069                        iwl_pcie_free_tso_page(trans_pcie, skb);
1070                }
1071                iwl_pcie_gen2_free_tfd(trans, txq);
1072                txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1073
1074                if (txq->read_ptr == txq->write_ptr) {
1075                        unsigned long flags;
1076
1077                        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1078                        if (txq_id != trans_pcie->cmd_queue) {
1079                                IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
1080                                              txq->id);
1081                                iwl_trans_unref(trans);
1082                        } else if (trans_pcie->ref_cmd_in_flight) {
1083                                trans_pcie->ref_cmd_in_flight = false;
1084                                IWL_DEBUG_RPM(trans,
1085                                              "clear ref_cmd_in_flight\n");
1086                                iwl_trans_unref(trans);
1087                        }
1088                        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1089                }
1090        }
1091
1092        while (!skb_queue_empty(&txq->overflow_q)) {
1093                struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
1094
1095                iwl_op_mode_free_skb(trans->op_mode, skb);
1096        }
1097
1098        spin_unlock_bh(&txq->lock);
1099
1100        /* just in case - this queue may have been stopped */
1101        iwl_wake_queue(trans, txq);
1102}
1103
1104void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1105                                   struct iwl_txq *txq)
1106{
1107        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1108        struct device *dev = trans->dev;
1109
1110        /* De-alloc circular buffer of TFDs */
1111        if (txq->tfds) {
1112                dma_free_coherent(dev,
1113                                  trans_pcie->tfd_size * txq->n_window,
1114                                  txq->tfds, txq->dma_addr);
1115                dma_free_coherent(dev,
1116                                  sizeof(*txq->first_tb_bufs) * txq->n_window,
1117                                  txq->first_tb_bufs, txq->first_tb_dma);
1118        }
1119
1120        kfree(txq->entries);
1121        iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1122        kfree(txq);
1123}
1124
1125/*
1126 * iwl_pcie_txq_free - Deallocate DMA queue.
1127 * @txq: Transmit queue to deallocate.
1128 *
1129 * Empty queue by removing and destroying all BD's.
1130 * Free all buffers.
1131 * 0-fill, but do not free "txq" descriptor structure.
1132 */
1133static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1134{
1135        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1136        struct iwl_txq *txq = trans_pcie->txq[txq_id];
1137        int i;
1138
1139        if (WARN_ON(!txq))
1140                return;
1141
1142        iwl_pcie_gen2_txq_unmap(trans, txq_id);
1143
1144        /* De-alloc array of command/tx buffers */
1145        if (txq_id == trans_pcie->cmd_queue)
1146                for (i = 0; i < txq->n_window; i++) {
1147                        kzfree(txq->entries[i].cmd);
1148                        kzfree(txq->entries[i].free_buf);
1149                }
1150        del_timer_sync(&txq->stuck_timer);
1151
1152        iwl_pcie_gen2_txq_free_memory(trans, txq);
1153
1154        trans_pcie->txq[txq_id] = NULL;
1155
1156        clear_bit(txq_id, trans_pcie->queue_used);
1157}
1158
1159int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1160                                     struct iwl_txq **intxq, int size,
1161                                     unsigned int timeout)
1162{
1163        int ret;
1164
1165        struct iwl_txq *txq;
1166        txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1167        if (!txq)
1168                return -ENOMEM;
1169        ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1170                                     (trans->cfg->device_family >=
1171                                      IWL_DEVICE_FAMILY_22560) ?
1172                                     sizeof(struct iwl_gen3_bc_tbl) :
1173                                     sizeof(struct iwlagn_scd_bc_tbl));
1174        if (ret) {
1175                IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1176                kfree(txq);
1177                return -ENOMEM;
1178        }
1179
1180        ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1181        if (ret) {
1182                IWL_ERR(trans, "Tx queue alloc failed\n");
1183                goto error;
1184        }
1185        ret = iwl_pcie_txq_init(trans, txq, size, false);
1186        if (ret) {
1187                IWL_ERR(trans, "Tx queue init failed\n");
1188                goto error;
1189        }
1190
1191        txq->wd_timeout = msecs_to_jiffies(timeout);
1192
1193        *intxq = txq;
1194        return 0;
1195
1196error:
1197        iwl_pcie_gen2_txq_free_memory(trans, txq);
1198        return ret;
1199}
1200
1201int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1202                                      struct iwl_txq *txq,
1203                                      struct iwl_host_cmd *hcmd)
1204{
1205        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1206        struct iwl_tx_queue_cfg_rsp *rsp;
1207        int ret, qid;
1208        u32 wr_ptr;
1209
1210        if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1211                    sizeof(*rsp))) {
1212                ret = -EINVAL;
1213                goto error_free_resp;
1214        }
1215
1216        rsp = (void *)hcmd->resp_pkt->data;
1217        qid = le16_to_cpu(rsp->queue_number);
1218        wr_ptr = le16_to_cpu(rsp->write_pointer);
1219
1220        if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1221                WARN_ONCE(1, "queue index %d unsupported", qid);
1222                ret = -EIO;
1223                goto error_free_resp;
1224        }
1225
1226        if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1227                WARN_ONCE(1, "queue %d already used", qid);
1228                ret = -EIO;
1229                goto error_free_resp;
1230        }
1231
1232        txq->id = qid;
1233        trans_pcie->txq[qid] = txq;
1234        wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
1235
1236        /* Place first TFD at index corresponding to start sequence number */
1237        txq->read_ptr = wr_ptr;
1238        txq->write_ptr = wr_ptr;
1239
1240        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1241
1242        iwl_free_resp(hcmd);
1243        return qid;
1244
1245error_free_resp:
1246        iwl_free_resp(hcmd);
1247        iwl_pcie_gen2_txq_free_memory(trans, txq);
1248        return ret;
1249}
1250
1251int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1252                                 __le16 flags, u8 sta_id, u8 tid,
1253                                 int cmd_id, int size,
1254                                 unsigned int timeout)
1255{
1256        struct iwl_txq *txq = NULL;
1257        struct iwl_tx_queue_cfg_cmd cmd = {
1258                .flags = flags,
1259                .sta_id = sta_id,
1260                .tid = tid,
1261        };
1262        struct iwl_host_cmd hcmd = {
1263                .id = cmd_id,
1264                .len = { sizeof(cmd) },
1265                .data = { &cmd, },
1266                .flags = CMD_WANT_SKB,
1267        };
1268        int ret;
1269
1270        ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
1271        if (ret)
1272                return ret;
1273
1274        cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1275        cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1276        cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1277
1278        ret = iwl_trans_send_cmd(trans, &hcmd);
1279        if (ret)
1280                goto error;
1281
1282        return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
1283
1284error:
1285        iwl_pcie_gen2_txq_free_memory(trans, txq);
1286        return ret;
1287}
1288
1289void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1290{
1291        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1292
1293        /*
1294         * Upon HW Rfkill - we stop the device, and then stop the queues
1295         * in the op_mode. Just for the sake of the simplicity of the op_mode,
1296         * allow the op_mode to call txq_disable after it already called
1297         * stop_device.
1298         */
1299        if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1300                WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1301                          "queue %d not used", queue);
1302                return;
1303        }
1304
1305        iwl_pcie_gen2_txq_unmap(trans, queue);
1306
1307        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1308}
1309
1310void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1311{
1312        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1313        int i;
1314
1315        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1316
1317        /* Free all TX queues */
1318        for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1319                if (!trans_pcie->txq[i])
1320                        continue;
1321
1322                iwl_pcie_gen2_txq_free(trans, i);
1323        }
1324}
1325
1326int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
1327{
1328        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1329        struct iwl_txq *queue;
1330        int ret;
1331
1332        /* alloc and init the tx queue */
1333        if (!trans_pcie->txq[txq_id]) {
1334                queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1335                if (!queue) {
1336                        IWL_ERR(trans, "Not enough memory for tx queue\n");
1337                        return -ENOMEM;
1338                }
1339                trans_pcie->txq[txq_id] = queue;
1340                ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1341                if (ret) {
1342                        IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1343                        goto error;
1344                }
1345        } else {
1346                queue = trans_pcie->txq[txq_id];
1347        }
1348
1349        ret = iwl_pcie_txq_init(trans, queue, queue_size,
1350                                (txq_id == trans_pcie->cmd_queue));
1351        if (ret) {
1352                IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1353                goto error;
1354        }
1355        trans_pcie->txq[txq_id]->id = txq_id;
1356        set_bit(txq_id, trans_pcie->queue_used);
1357
1358        return 0;
1359
1360error:
1361        iwl_pcie_gen2_tx_free(trans);
1362        return ret;
1363}
1364
1365