linux/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2017 Intel Deutschland GmbH
   9 * Copyright(c) 2018 - 2019 Intel Corporation
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of version 2 of the GNU General Public License as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * BSD LICENSE
  21 *
  22 * Copyright(c) 2017 Intel Deutschland GmbH
  23 * Copyright(c) 2018 - 2019 Intel Corporation
  24 * All rights reserved.
  25 *
  26 * Redistribution and use in source and binary forms, with or without
  27 * modification, are permitted provided that the following conditions
  28 * are met:
  29 *
  30 *  * Redistributions of source code must retain the above copyright
  31 *    notice, this list of conditions and the following disclaimer.
  32 *  * Redistributions in binary form must reproduce the above copyright
  33 *    notice, this list of conditions and the following disclaimer in
  34 *    the documentation and/or other materials provided with the
  35 *    distribution.
  36 *  * Neither the name Intel Corporation nor the names of its
  37 *    contributors may be used to endorse or promote products derived
  38 *    from this software without specific prior written permission.
  39 *
  40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  43 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  44 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  46 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  50 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51 *
  52 *****************************************************************************/
  53#include <net/tso.h>
  54#include <linux/tcp.h>
  55
  56#include "iwl-debug.h"
  57#include "iwl-csr.h"
  58#include "iwl-io.h"
  59#include "internal.h"
  60#include "fw/api/tx.h"
  61
  62 /*
  63 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
  64 */
  65void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
  66{
  67        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  68        int txq_id;
  69
  70        /*
  71         * This function can be called before the op_mode disabled the
  72         * queues. This happens when we have an rfkill interrupt.
  73         * Since we stop Tx altogether - mark the queues as stopped.
  74         */
  75        memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
  76        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
  77
  78        /* Unmap DMA from host system and free skb's */
  79        for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
  80                if (!trans_pcie->txq[txq_id])
  81                        continue;
  82                iwl_pcie_gen2_txq_unmap(trans, txq_id);
  83        }
  84}
  85
  86/*
  87 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
  88 */
  89void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
  90                                   struct iwl_txq *txq, u16 byte_cnt,
  91                                   int num_tbs)
  92{
  93        struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
  94        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
  95        struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
  96        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
  97        u8 filled_tfd_size, num_fetch_chunks;
  98        u16 len = byte_cnt;
  99        __le16 bc_ent;
 100
 101        if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
 102                return;
 103
 104        filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
 105                                   num_tbs * sizeof(struct iwl_tfh_tb);
 106        /*
 107         * filled_tfd_size contains the number of filled bytes in the TFD.
 108         * Dividing it by 64 will give the number of chunks to fetch
 109         * to SRAM- 0 for one chunk, 1 for 2 and so on.
 110         * If, for example, TFD contains only 3 TBs then 32 bytes
 111         * of the TFD are used, and only one chunk of 64 bytes should
 112         * be fetched
 113         */
 114        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
 115
 116        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
 117                /* Starting from 22560, the HW expects bytes */
 118                WARN_ON(trans_pcie->bc_table_dword);
 119                WARN_ON(len > 0x3FFF);
 120                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
 121                scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
 122        } else {
 123                /* Until 22560, the HW expects DW */
 124                WARN_ON(!trans_pcie->bc_table_dword);
 125                len = DIV_ROUND_UP(len, 4);
 126                WARN_ON(len > 0xFFF);
 127                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
 128                scd_bc_tbl->tfd_offset[idx] = bc_ent;
 129        }
 130}
 131
 132/*
 133 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
 134 */
 135void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
 136                                  struct iwl_txq *txq)
 137{
 138        lockdep_assert_held(&txq->lock);
 139
 140        IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
 141
 142        /*
 143         * if not in power-save mode, uCode will never sleep when we're
 144         * trying to tx (during RFKILL, we're not trying to tx).
 145         */
 146        iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
 147}
 148
 149static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
 150                                    struct iwl_tfh_tfd *tfd)
 151{
 152        return le16_to_cpu(tfd->num_tbs) & 0x1f;
 153}
 154
 155static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
 156                                    struct iwl_cmd_meta *meta,
 157                                    struct iwl_tfh_tfd *tfd)
 158{
 159        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 160        int i, num_tbs;
 161
 162        /* Sanity check on number of chunks */
 163        num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
 164
 165        if (num_tbs > trans_pcie->max_tbs) {
 166                IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
 167                return;
 168        }
 169
 170        /* first TB is never freed - it's the bidirectional DMA data */
 171        for (i = 1; i < num_tbs; i++) {
 172                if (meta->tbs & BIT(i))
 173                        dma_unmap_page(trans->dev,
 174                                       le64_to_cpu(tfd->tbs[i].addr),
 175                                       le16_to_cpu(tfd->tbs[i].tb_len),
 176                                       DMA_TO_DEVICE);
 177                else
 178                        dma_unmap_single(trans->dev,
 179                                         le64_to_cpu(tfd->tbs[i].addr),
 180                                         le16_to_cpu(tfd->tbs[i].tb_len),
 181                                         DMA_TO_DEVICE);
 182        }
 183
 184        tfd->num_tbs = 0;
 185}
 186
 187static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 188{
 189        /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
 190         * idx is bounded by n_window
 191         */
 192        int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
 193
 194        lockdep_assert_held(&txq->lock);
 195
 196        iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
 197                                iwl_pcie_get_tfd(trans, txq, idx));
 198
 199        /* free SKB */
 200        if (txq->entries) {
 201                struct sk_buff *skb;
 202
 203                skb = txq->entries[idx].skb;
 204
 205                /* Can be called from irqs-disabled context
 206                 * If skb is not NULL, it means that the whole queue is being
 207                 * freed and that the queue is not empty - free the skb
 208                 */
 209                if (skb) {
 210                        iwl_op_mode_free_skb(trans->op_mode, skb);
 211                        txq->entries[idx].skb = NULL;
 212                }
 213        }
 214}
 215
 216static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
 217                                struct iwl_tfh_tfd *tfd, dma_addr_t addr,
 218                                u16 len)
 219{
 220        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 221        int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
 222        struct iwl_tfh_tb *tb;
 223
 224        if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
 225                return -EINVAL;
 226        tb = &tfd->tbs[idx];
 227
 228        /* Each TFD can point to a maximum max_tbs Tx buffers */
 229        if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
 230                IWL_ERR(trans, "Error can not send more than %d chunks\n",
 231                        trans_pcie->max_tbs);
 232                return -EINVAL;
 233        }
 234
 235        put_unaligned_le64(addr, &tb->addr);
 236        tb->tb_len = cpu_to_le16(len);
 237
 238        tfd->num_tbs = cpu_to_le16(idx + 1);
 239
 240        return idx;
 241}
 242
 243static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 244                                     struct sk_buff *skb,
 245                                     struct iwl_tfh_tfd *tfd, int start_len,
 246                                     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
 247{
 248#ifdef CONFIG_INET
 249        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 250        struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
 251        struct ieee80211_hdr *hdr = (void *)skb->data;
 252        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
 253        unsigned int mss = skb_shinfo(skb)->gso_size;
 254        u16 length, amsdu_pad;
 255        u8 *start_hdr;
 256        struct iwl_tso_hdr_page *hdr_page;
 257        struct page **page_ptr;
 258        struct tso_t tso;
 259
 260        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
 261                             &dev_cmd->hdr, start_len, 0);
 262
 263        ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
 264        snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
 265        total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
 266        amsdu_pad = 0;
 267
 268        /* total amount of header we may need for this A-MSDU */
 269        hdr_room = DIV_ROUND_UP(total_len, mss) *
 270                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
 271
 272        /* Our device supports 9 segments at most, it will fit in 1 page */
 273        hdr_page = get_page_hdr(trans, hdr_room);
 274        if (!hdr_page)
 275                return -ENOMEM;
 276
 277        get_page(hdr_page->page);
 278        start_hdr = hdr_page->pos;
 279        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
 280        *page_ptr = hdr_page->page;
 281
 282        /*
 283         * Pull the ieee80211 header to be able to use TSO core,
 284         * we will restore it for the tx_status flow.
 285         */
 286        skb_pull(skb, hdr_len);
 287
 288        /*
 289         * Remove the length of all the headers that we don't actually
 290         * have in the MPDU by themselves, but that we duplicate into
 291         * all the different MSDUs inside the A-MSDU.
 292         */
 293        le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
 294
 295        tso_start(skb, &tso);
 296
 297        while (total_len) {
 298                /* this is the data left for this subframe */
 299                unsigned int data_left = min_t(unsigned int, mss, total_len);
 300                struct sk_buff *csum_skb = NULL;
 301                unsigned int tb_len;
 302                dma_addr_t tb_phys;
 303                u8 *subf_hdrs_start = hdr_page->pos;
 304
 305                total_len -= data_left;
 306
 307                memset(hdr_page->pos, 0, amsdu_pad);
 308                hdr_page->pos += amsdu_pad;
 309                amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
 310                                  data_left)) & 0x3;
 311                ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
 312                hdr_page->pos += ETH_ALEN;
 313                ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
 314                hdr_page->pos += ETH_ALEN;
 315
 316                length = snap_ip_tcp_hdrlen + data_left;
 317                *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
 318                hdr_page->pos += sizeof(length);
 319
 320                /*
 321                 * This will copy the SNAP as well which will be considered
 322                 * as MAC header.
 323                 */
 324                tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
 325
 326                hdr_page->pos += snap_ip_tcp_hdrlen;
 327
 328                tb_len = hdr_page->pos - start_hdr;
 329                tb_phys = dma_map_single(trans->dev, start_hdr,
 330                                         tb_len, DMA_TO_DEVICE);
 331                if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
 332                        dev_kfree_skb(csum_skb);
 333                        goto out_err;
 334                }
 335                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
 336                trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
 337                /* add this subframe's headers' length to the tx_cmd */
 338                le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
 339
 340                /* prepare the start_hdr for the next subframe */
 341                start_hdr = hdr_page->pos;
 342
 343                /* put the payload */
 344                while (data_left) {
 345                        tb_len = min_t(unsigned int, tso.size, data_left);
 346                        tb_phys = dma_map_single(trans->dev, tso.data,
 347                                                 tb_len, DMA_TO_DEVICE);
 348                        if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
 349                                dev_kfree_skb(csum_skb);
 350                                goto out_err;
 351                        }
 352                        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
 353                        trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
 354                                                tb_len);
 355
 356                        data_left -= tb_len;
 357                        tso_build_data(skb, &tso, tb_len);
 358                }
 359        }
 360
 361        /* re -add the WiFi header */
 362        skb_push(skb, hdr_len);
 363
 364        return 0;
 365
 366out_err:
 367#endif
 368        return -EINVAL;
 369}
 370
 371static struct
 372iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
 373                                          struct iwl_txq *txq,
 374                                          struct iwl_device_cmd *dev_cmd,
 375                                          struct sk_buff *skb,
 376                                          struct iwl_cmd_meta *out_meta,
 377                                          int hdr_len,
 378                                          int tx_cmd_len)
 379{
 380        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 381        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 382        dma_addr_t tb_phys;
 383        int len;
 384        void *tb1_addr;
 385
 386        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 387
 388        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 389
 390        /*
 391         * The second TB (tb1) points to the remainder of the TX command
 392         * and the 802.11 header - dword aligned size
 393         * (This calculation modifies the TX command, so do it before the
 394         * setup of the first TB)
 395         */
 396        len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
 397              IWL_FIRST_TB_SIZE;
 398
 399        /* do not align A-MSDU to dword as the subframe header aligns it */
 400
 401        /* map the data for TB1 */
 402        tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
 403        tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
 404        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 405                goto out_err;
 406        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
 407
 408        if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
 409                                      len + IWL_FIRST_TB_SIZE,
 410                                      hdr_len, dev_cmd))
 411                goto out_err;
 412
 413        /* building the A-MSDU might have changed this data, memcpy it now */
 414        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 415        return tfd;
 416
 417out_err:
 418        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 419        return NULL;
 420}
 421
 422static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
 423                                      struct sk_buff *skb,
 424                                      struct iwl_tfh_tfd *tfd,
 425                                      struct iwl_cmd_meta *out_meta)
 426{
 427        int i;
 428
 429        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 430                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 431                dma_addr_t tb_phys;
 432                int tb_idx;
 433
 434                if (!skb_frag_size(frag))
 435                        continue;
 436
 437                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
 438                                           skb_frag_size(frag), DMA_TO_DEVICE);
 439
 440                if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 441                        return -ENOMEM;
 442                tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
 443                                              skb_frag_size(frag));
 444                trace_iwlwifi_dev_tx_tb(trans->dev, skb,
 445                                        skb_frag_address(frag),
 446                                        skb_frag_size(frag));
 447                if (tb_idx < 0)
 448                        return tb_idx;
 449
 450                out_meta->tbs |= BIT(tb_idx);
 451        }
 452
 453        return 0;
 454}
 455
 456static struct
 457iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
 458                                    struct iwl_txq *txq,
 459                                    struct iwl_device_cmd *dev_cmd,
 460                                    struct sk_buff *skb,
 461                                    struct iwl_cmd_meta *out_meta,
 462                                    int hdr_len,
 463                                    int tx_cmd_len,
 464                                    bool pad)
 465{
 466        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 467        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 468        dma_addr_t tb_phys;
 469        int len, tb1_len, tb2_len;
 470        void *tb1_addr;
 471
 472        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 473
 474        /* The first TB points to bi-directional DMA data */
 475        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 476
 477        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 478
 479        /*
 480         * The second TB (tb1) points to the remainder of the TX command
 481         * and the 802.11 header - dword aligned size
 482         * (This calculation modifies the TX command, so do it before the
 483         * setup of the first TB)
 484         */
 485        len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
 486              IWL_FIRST_TB_SIZE;
 487
 488        if (pad)
 489                tb1_len = ALIGN(len, 4);
 490        else
 491                tb1_len = len;
 492
 493        /* map the data for TB1 */
 494        tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
 495        tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
 496        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 497                goto out_err;
 498        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
 499        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
 500                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
 501
 502        /* set up TFD's third entry to point to remainder of skb's head */
 503        tb2_len = skb_headlen(skb) - hdr_len;
 504
 505        if (tb2_len > 0) {
 506                tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
 507                                         tb2_len, DMA_TO_DEVICE);
 508                if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
 509                        goto out_err;
 510                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
 511                trace_iwlwifi_dev_tx_tb(trans->dev, skb,
 512                                        skb->data + hdr_len,
 513                                        tb2_len);
 514        }
 515
 516        if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
 517                goto out_err;
 518
 519        return tfd;
 520
 521out_err:
 522        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 523        return NULL;
 524}
 525
 526static
 527struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 528                                            struct iwl_txq *txq,
 529                                            struct iwl_device_cmd *dev_cmd,
 530                                            struct sk_buff *skb,
 531                                            struct iwl_cmd_meta *out_meta)
 532{
 533        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 534        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 535        struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
 536        int len, hdr_len;
 537        bool amsdu;
 538
 539        /* There must be data left over for TB1 or this code must be changed */
 540        BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
 541
 542        memset(tfd, 0, sizeof(*tfd));
 543
 544        if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22560)
 545                len = sizeof(struct iwl_tx_cmd_gen2);
 546        else
 547                len = sizeof(struct iwl_tx_cmd_gen3);
 548
 549        amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
 550                        (*ieee80211_get_qos_ctl(hdr) &
 551                         IEEE80211_QOS_CTL_A_MSDU_PRESENT);
 552
 553        hdr_len = ieee80211_hdrlen(hdr->frame_control);
 554
 555        /*
 556         * Only build A-MSDUs here if doing so by GSO, otherwise it may be
 557         * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
 558         * built in the higher layers already.
 559         */
 560        if (amsdu && skb_shinfo(skb)->gso_size)
 561                return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
 562                                                    out_meta, hdr_len, len);
 563
 564        return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
 565                                      hdr_len, len, !amsdu);
 566}
 567
 568int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 569                           struct iwl_device_cmd *dev_cmd, int txq_id)
 570{
 571        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 572        struct iwl_cmd_meta *out_meta;
 573        struct iwl_txq *txq = trans_pcie->txq[txq_id];
 574        u16 cmd_len;
 575        int idx;
 576        void *tfd;
 577
 578        if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
 579                      "TX on unused queue %d\n", txq_id))
 580                return -EINVAL;
 581
 582        if (skb_is_nonlinear(skb) &&
 583            skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
 584            __skb_linearize(skb))
 585                return -ENOMEM;
 586
 587        spin_lock(&txq->lock);
 588
 589        if (iwl_queue_space(trans, txq) < txq->high_mark) {
 590                iwl_stop_queue(trans, txq);
 591
 592                /* don't put the packet on the ring, if there is no room */
 593                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
 594                        struct iwl_device_cmd **dev_cmd_ptr;
 595
 596                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
 597                                               trans_pcie->dev_cmd_offs);
 598
 599                        *dev_cmd_ptr = dev_cmd;
 600                        __skb_queue_tail(&txq->overflow_q, skb);
 601                        spin_unlock(&txq->lock);
 602                        return 0;
 603                }
 604        }
 605
 606        idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 607
 608        /* Set up driver data for this TFD */
 609        txq->entries[idx].skb = skb;
 610        txq->entries[idx].cmd = dev_cmd;
 611
 612        dev_cmd->hdr.sequence =
 613                cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
 614                            INDEX_TO_SEQ(idx)));
 615
 616        /* Set up first empty entry in queue's array of Tx/cmd buffers */
 617        out_meta = &txq->entries[idx].meta;
 618        out_meta->flags = 0;
 619
 620        tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
 621        if (!tfd) {
 622                spin_unlock(&txq->lock);
 623                return -1;
 624        }
 625
 626        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
 627                struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
 628                        (void *)dev_cmd->payload;
 629
 630                cmd_len = le16_to_cpu(tx_cmd_gen3->len);
 631        } else {
 632                struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
 633                        (void *)dev_cmd->payload;
 634
 635                cmd_len = le16_to_cpu(tx_cmd_gen2->len);
 636        }
 637
 638        /* Set up entry for this TFD in Tx byte-count array */
 639        iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
 640                                      iwl_pcie_gen2_get_num_tbs(trans, tfd));
 641
 642        /* start timer if queue currently empty */
 643        if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
 644                mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 645
 646        /* Tell device the write index *just past* this latest filled TFD */
 647        txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 648        iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 649        /*
 650         * At this point the frame is "transmitted" successfully
 651         * and we will get a TX status notification eventually.
 652         */
 653        spin_unlock(&txq->lock);
 654        return 0;
 655}
 656
 657/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
 658
 659/*
 660 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
 661 * @priv: device private data point
 662 * @cmd: a pointer to the ucode command structure
 663 *
 664 * The function returns < 0 values to indicate the operation
 665 * failed. On success, it returns the index (>= 0) of command in the
 666 * command queue.
 667 */
 668static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
 669                                      struct iwl_host_cmd *cmd)
 670{
 671        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 672        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
 673        struct iwl_device_cmd *out_cmd;
 674        struct iwl_cmd_meta *out_meta;
 675        unsigned long flags;
 676        void *dup_buf = NULL;
 677        dma_addr_t phys_addr;
 678        int i, cmd_pos, idx;
 679        u16 copy_size, cmd_size, tb0_size;
 680        bool had_nocopy = false;
 681        u8 group_id = iwl_cmd_groupid(cmd->id);
 682        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
 683        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 684        struct iwl_tfh_tfd *tfd;
 685
 686        copy_size = sizeof(struct iwl_cmd_header_wide);
 687        cmd_size = sizeof(struct iwl_cmd_header_wide);
 688
 689        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 690                cmddata[i] = cmd->data[i];
 691                cmdlen[i] = cmd->len[i];
 692
 693                if (!cmd->len[i])
 694                        continue;
 695
 696                /* need at least IWL_FIRST_TB_SIZE copied */
 697                if (copy_size < IWL_FIRST_TB_SIZE) {
 698                        int copy = IWL_FIRST_TB_SIZE - copy_size;
 699
 700                        if (copy > cmdlen[i])
 701                                copy = cmdlen[i];
 702                        cmdlen[i] -= copy;
 703                        cmddata[i] += copy;
 704                        copy_size += copy;
 705                }
 706
 707                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
 708                        had_nocopy = true;
 709                        if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
 710                                idx = -EINVAL;
 711                                goto free_dup_buf;
 712                        }
 713                } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
 714                        /*
 715                         * This is also a chunk that isn't copied
 716                         * to the static buffer so set had_nocopy.
 717                         */
 718                        had_nocopy = true;
 719
 720                        /* only allowed once */
 721                        if (WARN_ON(dup_buf)) {
 722                                idx = -EINVAL;
 723                                goto free_dup_buf;
 724                        }
 725
 726                        dup_buf = kmemdup(cmddata[i], cmdlen[i],
 727                                          GFP_ATOMIC);
 728                        if (!dup_buf)
 729                                return -ENOMEM;
 730                } else {
 731                        /* NOCOPY must not be followed by normal! */
 732                        if (WARN_ON(had_nocopy)) {
 733                                idx = -EINVAL;
 734                                goto free_dup_buf;
 735                        }
 736                        copy_size += cmdlen[i];
 737                }
 738                cmd_size += cmd->len[i];
 739        }
 740
 741        /*
 742         * If any of the command structures end up being larger than the
 743         * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
 744         * separate TFDs, then we will need to increase the size of the buffers
 745         */
 746        if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
 747                 "Command %s (%#x) is too large (%d bytes)\n",
 748                 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
 749                idx = -EINVAL;
 750                goto free_dup_buf;
 751        }
 752
 753        spin_lock_bh(&txq->lock);
 754
 755        idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
 756        tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
 757        memset(tfd, 0, sizeof(*tfd));
 758
 759        if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
 760                spin_unlock_bh(&txq->lock);
 761
 762                IWL_ERR(trans, "No space in command queue\n");
 763                iwl_op_mode_cmd_queue_full(trans->op_mode);
 764                idx = -ENOSPC;
 765                goto free_dup_buf;
 766        }
 767
 768        out_cmd = txq->entries[idx].cmd;
 769        out_meta = &txq->entries[idx].meta;
 770
 771        /* re-initialize to NULL */
 772        memset(out_meta, 0, sizeof(*out_meta));
 773        if (cmd->flags & CMD_WANT_SKB)
 774                out_meta->source = cmd;
 775
 776        /* set up the header */
 777        out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
 778        out_cmd->hdr_wide.group_id = group_id;
 779        out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
 780        out_cmd->hdr_wide.length =
 781                cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
 782        out_cmd->hdr_wide.reserved = 0;
 783        out_cmd->hdr_wide.sequence =
 784                cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
 785                                         INDEX_TO_SEQ(txq->write_ptr));
 786
 787        cmd_pos = sizeof(struct iwl_cmd_header_wide);
 788        copy_size = sizeof(struct iwl_cmd_header_wide);
 789
 790        /* and copy the data that needs to be copied */
 791        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 792                int copy;
 793
 794                if (!cmd->len[i])
 795                        continue;
 796
 797                /* copy everything if not nocopy/dup */
 798                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
 799                                           IWL_HCMD_DFL_DUP))) {
 800                        copy = cmd->len[i];
 801
 802                        memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
 803                        cmd_pos += copy;
 804                        copy_size += copy;
 805                        continue;
 806                }
 807
 808                /*
 809                 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
 810                 * in total (for bi-directional DMA), but copy up to what
 811                 * we can fit into the payload for debug dump purposes.
 812                 */
 813                copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
 814
 815                memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
 816                cmd_pos += copy;
 817
 818                /* However, treat copy_size the proper way, we need it below */
 819                if (copy_size < IWL_FIRST_TB_SIZE) {
 820                        copy = IWL_FIRST_TB_SIZE - copy_size;
 821
 822                        if (copy > cmd->len[i])
 823                                copy = cmd->len[i];
 824                        copy_size += copy;
 825                }
 826        }
 827
 828        IWL_DEBUG_HC(trans,
 829                     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
 830                     iwl_get_cmd_string(trans, cmd->id), group_id,
 831                     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
 832                     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
 833
 834        /* start the TFD with the minimum copy bytes */
 835        tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
 836        memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
 837        iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
 838                             tb0_size);
 839
 840        /* map first command fragment, if any remains */
 841        if (copy_size > tb0_size) {
 842                phys_addr = dma_map_single(trans->dev,
 843                                           (u8 *)out_cmd + tb0_size,
 844                                           copy_size - tb0_size,
 845                                           DMA_TO_DEVICE);
 846                if (dma_mapping_error(trans->dev, phys_addr)) {
 847                        idx = -ENOMEM;
 848                        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 849                        goto out;
 850                }
 851                iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
 852                                     copy_size - tb0_size);
 853        }
 854
 855        /* map the remaining (adjusted) nocopy/dup fragments */
 856        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 857                const void *data = cmddata[i];
 858
 859                if (!cmdlen[i])
 860                        continue;
 861                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
 862                                           IWL_HCMD_DFL_DUP)))
 863                        continue;
 864                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
 865                        data = dup_buf;
 866                phys_addr = dma_map_single(trans->dev, (void *)data,
 867                                           cmdlen[i], DMA_TO_DEVICE);
 868                if (dma_mapping_error(trans->dev, phys_addr)) {
 869                        idx = -ENOMEM;
 870                        iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
 871                        goto out;
 872                }
 873                iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
 874        }
 875
 876        BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
 877        out_meta->flags = cmd->flags;
 878        if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 879                kzfree(txq->entries[idx].free_buf);
 880        txq->entries[idx].free_buf = dup_buf;
 881
 882        trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 883
 884        /* start timer if queue currently empty */
 885        if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
 886                mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
 887
 888        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
 889        /* Increment and update queue's write index */
 890        txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
 891        iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
 892        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
 893
 894out:
 895        spin_unlock_bh(&txq->lock);
 896free_dup_buf:
 897        if (idx < 0)
 898                kfree(dup_buf);
 899        return idx;
 900}
 901
 902#define HOST_COMPLETE_TIMEOUT   (2 * HZ)
 903
 904static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
 905                                        struct iwl_host_cmd *cmd)
 906{
 907        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 908        const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
 909        struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
 910        int cmd_idx;
 911        int ret;
 912
 913        IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
 914
 915        if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
 916                                  &trans->status),
 917                 "Command %s: a command is already active!\n", cmd_str))
 918                return -EIO;
 919
 920        IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
 921
 922        cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
 923        if (cmd_idx < 0) {
 924                ret = cmd_idx;
 925                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 926                IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
 927                        cmd_str, ret);
 928                return ret;
 929        }
 930
 931        ret = wait_event_timeout(trans_pcie->wait_command_queue,
 932                                 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
 933                                           &trans->status),
 934                                 HOST_COMPLETE_TIMEOUT);
 935        if (!ret) {
 936                IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
 937                        cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 938
 939                IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
 940                        txq->read_ptr, txq->write_ptr);
 941
 942                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 943                IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
 944                               cmd_str);
 945                ret = -ETIMEDOUT;
 946
 947                iwl_trans_pcie_sync_nmi(trans);
 948                goto cancel;
 949        }
 950
 951        if (test_bit(STATUS_FW_ERROR, &trans->status)) {
 952                IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
 953                dump_stack();
 954                ret = -EIO;
 955                goto cancel;
 956        }
 957
 958        if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
 959            test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
 960                IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
 961                ret = -ERFKILL;
 962                goto cancel;
 963        }
 964
 965        if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
 966                IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
 967                ret = -EIO;
 968                goto cancel;
 969        }
 970
 971        return 0;
 972
 973cancel:
 974        if (cmd->flags & CMD_WANT_SKB) {
 975                /*
 976                 * Cancel the CMD_WANT_SKB flag for the cmd in the
 977                 * TX cmd queue. Otherwise in case the cmd comes
 978                 * in later, it will possibly set an invalid
 979                 * address (cmd->meta.source).
 980                 */
 981                txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
 982        }
 983
 984        if (cmd->resp_pkt) {
 985                iwl_free_resp(cmd);
 986                cmd->resp_pkt = NULL;
 987        }
 988
 989        return ret;
 990}
 991
 992int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
 993                                  struct iwl_host_cmd *cmd)
 994{
 995        if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
 996            test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
 997                IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
 998                                  cmd->id);
 999                return -ERFKILL;
1000        }
1001
1002        if (cmd->flags & CMD_ASYNC) {
1003                int ret;
1004
1005                /* An asynchronous command can not expect an SKB to be set. */
1006                if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1007                        return -EINVAL;
1008
1009                ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
1010                if (ret < 0) {
1011                        IWL_ERR(trans,
1012                                "Error sending %s: enqueue_hcmd failed: %d\n",
1013                                iwl_get_cmd_string(trans, cmd->id), ret);
1014                        return ret;
1015                }
1016                return 0;
1017        }
1018
1019        return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
1020}
1021
1022/*
1023 * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
1024 */
1025void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
1026{
1027        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028        struct iwl_txq *txq = trans_pcie->txq[txq_id];
1029
1030        spin_lock_bh(&txq->lock);
1031        while (txq->write_ptr != txq->read_ptr) {
1032                IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
1033                                   txq_id, txq->read_ptr);
1034
1035                if (txq_id != trans_pcie->cmd_queue) {
1036                        int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1037                        struct sk_buff *skb = txq->entries[idx].skb;
1038
1039                        if (WARN_ON_ONCE(!skb))
1040                                continue;
1041
1042                        iwl_pcie_free_tso_page(trans_pcie, skb);
1043                }
1044                iwl_pcie_gen2_free_tfd(trans, txq);
1045                txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1046        }
1047
1048        while (!skb_queue_empty(&txq->overflow_q)) {
1049                struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
1050
1051                iwl_op_mode_free_skb(trans->op_mode, skb);
1052        }
1053
1054        spin_unlock_bh(&txq->lock);
1055
1056        /* just in case - this queue may have been stopped */
1057        iwl_wake_queue(trans, txq);
1058}
1059
1060void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1061                                   struct iwl_txq *txq)
1062{
1063        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1064        struct device *dev = trans->dev;
1065
1066        /* De-alloc circular buffer of TFDs */
1067        if (txq->tfds) {
1068                dma_free_coherent(dev,
1069                                  trans_pcie->tfd_size * txq->n_window,
1070                                  txq->tfds, txq->dma_addr);
1071                dma_free_coherent(dev,
1072                                  sizeof(*txq->first_tb_bufs) * txq->n_window,
1073                                  txq->first_tb_bufs, txq->first_tb_dma);
1074        }
1075
1076        kfree(txq->entries);
1077        iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1078        kfree(txq);
1079}
1080
1081/*
1082 * iwl_pcie_txq_free - Deallocate DMA queue.
1083 * @txq: Transmit queue to deallocate.
1084 *
1085 * Empty queue by removing and destroying all BD's.
1086 * Free all buffers.
1087 * 0-fill, but do not free "txq" descriptor structure.
1088 */
1089static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1090{
1091        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1092        struct iwl_txq *txq = trans_pcie->txq[txq_id];
1093        int i;
1094
1095        if (WARN_ON(!txq))
1096                return;
1097
1098        iwl_pcie_gen2_txq_unmap(trans, txq_id);
1099
1100        /* De-alloc array of command/tx buffers */
1101        if (txq_id == trans_pcie->cmd_queue)
1102                for (i = 0; i < txq->n_window; i++) {
1103                        kzfree(txq->entries[i].cmd);
1104                        kzfree(txq->entries[i].free_buf);
1105                }
1106        del_timer_sync(&txq->stuck_timer);
1107
1108        iwl_pcie_gen2_txq_free_memory(trans, txq);
1109
1110        trans_pcie->txq[txq_id] = NULL;
1111
1112        clear_bit(txq_id, trans_pcie->queue_used);
1113}
1114
1115int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1116                                     struct iwl_txq **intxq, int size,
1117                                     unsigned int timeout)
1118{
1119        int ret;
1120
1121        struct iwl_txq *txq;
1122        txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1123        if (!txq)
1124                return -ENOMEM;
1125        ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1126                                     (trans->trans_cfg->device_family >=
1127                                      IWL_DEVICE_FAMILY_22560) ?
1128                                     sizeof(struct iwl_gen3_bc_tbl) :
1129                                     sizeof(struct iwlagn_scd_bc_tbl));
1130        if (ret) {
1131                IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1132                kfree(txq);
1133                return -ENOMEM;
1134        }
1135
1136        ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1137        if (ret) {
1138                IWL_ERR(trans, "Tx queue alloc failed\n");
1139                goto error;
1140        }
1141        ret = iwl_pcie_txq_init(trans, txq, size, false);
1142        if (ret) {
1143                IWL_ERR(trans, "Tx queue init failed\n");
1144                goto error;
1145        }
1146
1147        txq->wd_timeout = msecs_to_jiffies(timeout);
1148
1149        *intxq = txq;
1150        return 0;
1151
1152error:
1153        iwl_pcie_gen2_txq_free_memory(trans, txq);
1154        return ret;
1155}
1156
1157int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1158                                      struct iwl_txq *txq,
1159                                      struct iwl_host_cmd *hcmd)
1160{
1161        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1162        struct iwl_tx_queue_cfg_rsp *rsp;
1163        int ret, qid;
1164        u32 wr_ptr;
1165
1166        if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1167                    sizeof(*rsp))) {
1168                ret = -EINVAL;
1169                goto error_free_resp;
1170        }
1171
1172        rsp = (void *)hcmd->resp_pkt->data;
1173        qid = le16_to_cpu(rsp->queue_number);
1174        wr_ptr = le16_to_cpu(rsp->write_pointer);
1175
1176        if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1177                WARN_ONCE(1, "queue index %d unsupported", qid);
1178                ret = -EIO;
1179                goto error_free_resp;
1180        }
1181
1182        if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1183                WARN_ONCE(1, "queue %d already used", qid);
1184                ret = -EIO;
1185                goto error_free_resp;
1186        }
1187
1188        txq->id = qid;
1189        trans_pcie->txq[qid] = txq;
1190        wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1191
1192        /* Place first TFD at index corresponding to start sequence number */
1193        txq->read_ptr = wr_ptr;
1194        txq->write_ptr = wr_ptr;
1195
1196        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1197
1198        iwl_free_resp(hcmd);
1199        return qid;
1200
1201error_free_resp:
1202        iwl_free_resp(hcmd);
1203        iwl_pcie_gen2_txq_free_memory(trans, txq);
1204        return ret;
1205}
1206
1207int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1208                                 __le16 flags, u8 sta_id, u8 tid,
1209                                 int cmd_id, int size,
1210                                 unsigned int timeout)
1211{
1212        struct iwl_txq *txq = NULL;
1213        struct iwl_tx_queue_cfg_cmd cmd = {
1214                .flags = flags,
1215                .sta_id = sta_id,
1216                .tid = tid,
1217        };
1218        struct iwl_host_cmd hcmd = {
1219                .id = cmd_id,
1220                .len = { sizeof(cmd) },
1221                .data = { &cmd, },
1222                .flags = CMD_WANT_SKB,
1223        };
1224        int ret;
1225
1226        ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
1227        if (ret)
1228                return ret;
1229
1230        cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1231        cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1232        cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1233
1234        ret = iwl_trans_send_cmd(trans, &hcmd);
1235        if (ret)
1236                goto error;
1237
1238        return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
1239
1240error:
1241        iwl_pcie_gen2_txq_free_memory(trans, txq);
1242        return ret;
1243}
1244
1245void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1246{
1247        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1248
1249        /*
1250         * Upon HW Rfkill - we stop the device, and then stop the queues
1251         * in the op_mode. Just for the sake of the simplicity of the op_mode,
1252         * allow the op_mode to call txq_disable after it already called
1253         * stop_device.
1254         */
1255        if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1256                WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1257                          "queue %d not used", queue);
1258                return;
1259        }
1260
1261        iwl_pcie_gen2_txq_unmap(trans, queue);
1262
1263        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1264}
1265
1266void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1267{
1268        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1269        int i;
1270
1271        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1272
1273        /* Free all TX queues */
1274        for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1275                if (!trans_pcie->txq[i])
1276                        continue;
1277
1278                iwl_pcie_gen2_txq_free(trans, i);
1279        }
1280}
1281
1282int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
1283{
1284        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1285        struct iwl_txq *queue;
1286        int ret;
1287
1288        /* alloc and init the tx queue */
1289        if (!trans_pcie->txq[txq_id]) {
1290                queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1291                if (!queue) {
1292                        IWL_ERR(trans, "Not enough memory for tx queue\n");
1293                        return -ENOMEM;
1294                }
1295                trans_pcie->txq[txq_id] = queue;
1296                ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1297                if (ret) {
1298                        IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1299                        goto error;
1300                }
1301        } else {
1302                queue = trans_pcie->txq[txq_id];
1303        }
1304
1305        ret = iwl_pcie_txq_init(trans, queue, queue_size,
1306                                (txq_id == trans_pcie->cmd_queue));
1307        if (ret) {
1308                IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1309                goto error;
1310        }
1311        trans_pcie->txq[txq_id]->id = txq_id;
1312        set_bit(txq_id, trans_pcie->queue_used);
1313
1314        return 0;
1315
1316error:
1317        iwl_pcie_gen2_tx_free(trans);
1318        return ret;
1319}
1320
1321