linux/drivers/net/wireless/iwmc3200wifi/tx.c
<<
>>
Prefs
   1/*
   2 * Intel Wireless Multicomm 3200 WiFi driver
   3 *
   4 * Copyright (C) 2009 Intel Corporation. All rights reserved.
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions
   8 * are met:
   9 *
  10 *   * Redistributions of source code must retain the above copyright
  11 *     notice, this list of conditions and the following disclaimer.
  12 *   * Redistributions in binary form must reproduce the above copyright
  13 *     notice, this list of conditions and the following disclaimer in
  14 *     the documentation and/or other materials provided with the
  15 *     distribution.
  16 *   * Neither the name of Intel Corporation nor the names of its
  17 *     contributors may be used to endorse or promote products derived
  18 *     from this software without specific prior written permission.
  19 *
  20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31 *
  32 *
  33 * Intel Corporation <ilw@linux.intel.com>
  34 * Samuel Ortiz <samuel.ortiz@intel.com>
  35 * Zhu Yi <yi.zhu@intel.com>
  36 *
  37 */
  38
  39/*
  40 * iwm Tx theory of operation:
  41 *
  42 * 1) We receive a 802.3 frame from the stack
  43 * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
  44 * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
  45 * 4) We schedule the tx worker. There is one worker per tx
  46 *    queue. [iwm_xmit_frame]
  47 * 5) The tx worker is scheduled
  48 * 6) We go through every queued skb on the tx queue, and for each
  49 *    and every one of them: [iwm_tx_worker]
  50 *    a) We check if we have enough Tx credits (see below for a Tx
  51 *       credits description) for the frame length. [iwm_tx_worker]
  52 *    b) If we do, we aggregate the Tx frame into a UDMA one, by
  53 *       concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
  54 *    c) When we run out of credits, or when we reach the maximum
  55 *       concatenation size, we actually send the concatenated UDMA
  56 *       frame. [iwm_tx_worker]
  57 *
  58 * When we run out of Tx credits, the skbs are filling the tx queue,
  59 * and eventually we will stop the netdev queue. [iwm_tx_worker]
  60 * The tx queue is emptied as we're getting new tx credits, by
  61 * scheduling the tx_worker. [iwm_tx_credit_inc]
  62 * The netdev queue is started again when we have enough tx credits,
  63 * and when our tx queue has some reasonable amout of space available
  64 * (i.e. half of the max size). [iwm_tx_worker]
  65 */
  66
  67#include <linux/skbuff.h>
  68#include <linux/netdevice.h>
  69#include <linux/ieee80211.h>
  70
  71#include "iwm.h"
  72#include "debug.h"
  73#include "commands.h"
  74#include "hal.h"
  75#include "umac.h"
  76#include "bus.h"
  77
  78#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
  79
  80#define BYTES_TO_PAGES(n)        (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
  81                                 (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
  82
  83#define pool_id_to_queue(id)     ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
  84#define queue_to_pool_id(q)      ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
  85
  86/* require to hold tx_credit lock */
  87static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
  88{
  89        struct pool_entry *pool = &tx_credit->pools[id];
  90        struct spool_entry *spool = &tx_credit->spools[pool->sid];
  91        int spool_pages;
  92
  93        /* number of pages can be taken from spool by this pool */
  94        spool_pages = spool->max_pages - spool->alloc_pages +
  95                      max(pool->min_pages - pool->alloc_pages, 0);
  96
  97        return min(pool->max_pages - pool->alloc_pages, spool_pages);
  98}
  99
 100static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
 101{
 102        u32 npages = BYTES_TO_PAGES(nb);
 103
 104        if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
 105                return 1;
 106
 107        set_bit(id, &iwm->tx_credit.full_pools_map);
 108
 109        IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
 110                   pool_id_to_queue(id),
 111                   iwm_tx_credit_get(&iwm->tx_credit, id));
 112
 113        return 0;
 114}
 115
 116void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
 117{
 118        struct pool_entry *pool;
 119        struct spool_entry *spool;
 120        int freed_pages;
 121        int queue;
 122
 123        BUG_ON(id >= IWM_MACS_OUT_GROUPS);
 124
 125        pool = &iwm->tx_credit.pools[id];
 126        spool = &iwm->tx_credit.spools[pool->sid];
 127
 128        freed_pages = total_freed_pages - pool->total_freed_pages;
 129        IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
 130
 131        if (!freed_pages) {
 132                IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
 133                return;
 134        } else if (freed_pages < 0)
 135                freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
 136
 137        if (pool->alloc_pages > pool->min_pages) {
 138                int spool_pages = pool->alloc_pages - pool->min_pages;
 139                spool_pages = min(spool_pages, freed_pages);
 140                spool->alloc_pages -= spool_pages;
 141        }
 142
 143        pool->alloc_pages -= freed_pages;
 144        pool->total_freed_pages = total_freed_pages;
 145
 146        IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
 147                   "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
 148                   pool->total_freed_pages, pool->sid, spool->alloc_pages);
 149
 150        if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
 151            (pool->alloc_pages < pool->max_pages / 2)) {
 152                clear_bit(id, &iwm->tx_credit.full_pools_map);
 153
 154                queue = pool_id_to_queue(id);
 155
 156                IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
 157                           "credit: %d\n", queue,
 158                           iwm_tx_credit_get(&iwm->tx_credit, id));
 159                queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
 160        }
 161}
 162
 163static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
 164{
 165        struct pool_entry *pool;
 166        struct spool_entry *spool;
 167        int spool_pages;
 168
 169        IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
 170                   alloc_pages, id);
 171
 172        BUG_ON(id >= IWM_MACS_OUT_GROUPS);
 173
 174        pool = &iwm->tx_credit.pools[id];
 175        spool = &iwm->tx_credit.spools[pool->sid];
 176
 177        spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
 178
 179        if (pool->alloc_pages >= pool->min_pages)
 180                spool->alloc_pages += alloc_pages;
 181        else if (spool_pages > 0)
 182                spool->alloc_pages += spool_pages;
 183
 184        pool->alloc_pages += alloc_pages;
 185
 186        IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
 187                   "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
 188                   pool->total_freed_pages, pool->sid, spool->alloc_pages);
 189}
 190
 191int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
 192{
 193        u32 npages = BYTES_TO_PAGES(nb);
 194        int ret = 0;
 195
 196        spin_lock(&iwm->tx_credit.lock);
 197
 198        if (!iwm_tx_credit_ok(iwm, id, nb)) {
 199                IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id);
 200                ret = -ENOSPC;
 201                goto out;
 202        }
 203
 204        iwm_tx_credit_dec(iwm, id, npages);
 205
 206 out:
 207        spin_unlock(&iwm->tx_credit.lock);
 208        return ret;
 209}
 210
 211/*
 212 * Since we're on an SDIO or USB bus, we are not sharing memory
 213 * for storing to be transmitted frames. The host needs to push
 214 * them upstream. As a consequence there needs to be a way for
 215 * the target to let us know if it can actually take more TX frames
 216 * or not. This is what Tx credits are for.
 217 *
 218 * For each Tx HW queue, we have a Tx pool, and then we have one
 219 * unique super pool (spool), which is actually a global pool of
 220 * all the UMAC pages.
 221 * For each Tx pool we have a min_pages, a max_pages fields, and a
 222 * alloc_pages fields. The alloc_pages tracks the number of pages
 223 * currently allocated from the tx pool.
 224 * Here are the rules to check if given a tx frame we have enough
 225 * tx credits for it:
 226 * 1) We translate the frame length into a number of UMAC pages.
 227 *    Let's call them n_pages.
 228 * 2) For the corresponding tx pool, we check if n_pages +
 229 *    pool->alloc_pages is higher than pool->min_pages. min_pages
 230 *    represent a set of pre-allocated pages on the tx pool. If
 231 *    that's the case, then we need to allocate those pages from
 232 *    the spool. We can do so until we reach spool->max_pages.
 233 * 3) Each tx pool is not allowed to allocate more than pool->max_pages
 234 *    from the spool, so once we're over min_pages, we can allocate
 235 *    pages from the spool, but not more than max_pages.
 236 *
 237 * When the tx code path needs to send a tx frame, it checks first
 238 * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
 239 * If it does, it then updates the pool and spool counters and
 240 * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
 241 * On the other side, when the UMAC is done transmitting frames, it
 242 * will send a credit update notification to the host. This is when
 243 * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
 244 * called from rx.c:iwm_ntf_tx_credit_update]
 245 *
 246 */
 247void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
 248                              struct iwm_umac_notif_alive *alive)
 249{
 250        int i, sid, pool_pages;
 251
 252        spin_lock(&iwm->tx_credit.lock);
 253
 254        iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
 255        iwm->tx_credit.full_pools_map = 0;
 256        memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
 257
 258        IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
 259
 260        for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
 261                __le32 page_grp_state = alive->page_grp_state[i];
 262
 263                iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
 264                                UMAC_ALIVE_PAGE_STS_GRP_NUM);
 265                iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
 266                                UMAC_ALIVE_PAGE_STS_SGRP_NUM);
 267                iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
 268                                UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
 269                iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
 270                                UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
 271                iwm->tx_credit.pools[i].alloc_pages = 0;
 272                iwm->tx_credit.pools[i].total_freed_pages = 0;
 273
 274                sid = iwm->tx_credit.pools[i].sid;
 275                pool_pages = iwm->tx_credit.pools[i].min_pages;
 276
 277                if (iwm->tx_credit.spools[sid].max_pages == 0) {
 278                        iwm->tx_credit.spools[sid].id = sid;
 279                        iwm->tx_credit.spools[sid].max_pages =
 280                                GET_VAL32(page_grp_state,
 281                                          UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
 282                        iwm->tx_credit.spools[sid].alloc_pages = 0;
 283                }
 284
 285                iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
 286
 287                IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
 288                           "min: %d, max: %d, pool alloc: %d, total_free: %d, "
 289                           "super poll alloc: %d\n",
 290                           i, iwm->tx_credit.pools[i].id,
 291                           iwm->tx_credit.pools[i].sid,
 292                           iwm->tx_credit.pools[i].min_pages,
 293                           iwm->tx_credit.pools[i].max_pages,
 294                           iwm->tx_credit.pools[i].alloc_pages,
 295                           iwm->tx_credit.pools[i].total_freed_pages,
 296                           iwm->tx_credit.spools[sid].alloc_pages);
 297        }
 298
 299        spin_unlock(&iwm->tx_credit.lock);
 300}
 301
 302#define IWM_UDMA_HDR_LEN        sizeof(struct iwm_umac_wifi_out_hdr)
 303
 304static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
 305                               int pool_id, u8 *buf)
 306{
 307        struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
 308        struct iwm_udma_wifi_cmd udma_cmd;
 309        struct iwm_umac_cmd umac_cmd;
 310        struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
 311
 312        udma_cmd.count = cpu_to_le16(skb->len +
 313                                     sizeof(struct iwm_umac_fw_cmd_hdr));
 314        /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
 315         * called later to set EOP for the last packet. */
 316        udma_cmd.eop = 0;
 317        udma_cmd.credit_group = pool_id;
 318        udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
 319        udma_cmd.lmac_offset = 0;
 320
 321        umac_cmd.id = REPLY_TX;
 322        umac_cmd.count = cpu_to_le16(skb->len);
 323        umac_cmd.color = tx_info->color;
 324        umac_cmd.resp = 0;
 325        umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
 326
 327        iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
 328        iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
 329
 330        memcpy(buf + sizeof(*hdr), skb->data, skb->len);
 331
 332        return 0;
 333}
 334
 335static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
 336                                      struct iwm_tx_queue *txq)
 337{
 338        int ret;
 339
 340        if (!txq->concat_count)
 341                return 0;
 342
 343        IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
 344                   txq->id, txq->concat_count);
 345
 346        /* mark EOP for the last packet */
 347        iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
 348
 349        ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
 350
 351        txq->concat_count = 0;
 352        txq->concat_ptr = txq->concat_buf;
 353
 354        return ret;
 355}
 356
 357#define CONFIG_IWM_TX_CONCATENATED 1
 358
 359void iwm_tx_worker(struct work_struct *work)
 360{
 361        struct iwm_priv *iwm;
 362        struct iwm_tx_info *tx_info = NULL;
 363        struct sk_buff *skb;
 364        int cmdlen, ret;
 365        struct iwm_tx_queue *txq;
 366        int pool_id;
 367
 368        txq = container_of(work, struct iwm_tx_queue, worker);
 369        iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
 370
 371        pool_id = queue_to_pool_id(txq->id);
 372
 373        while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
 374               !skb_queue_empty(&txq->queue)) {
 375
 376                skb = skb_dequeue(&txq->queue);
 377                tx_info = skb_to_tx_info(skb);
 378                cmdlen = IWM_UDMA_HDR_LEN + skb->len;
 379
 380                IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
 381                           "%d, color: %d\n", txq->id, skb, tx_info->sta,
 382                           tx_info->color);
 383
 384#if !CONFIG_IWM_TX_CONCATENATED
 385                /* temporarily keep this to comparing the performance */
 386                ret = iwm_send_packet(iwm, skb, pool_id);
 387#else
 388
 389                if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
 390                        iwm_tx_send_concat_packets(iwm, txq);
 391
 392                ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
 393                if (ret) {
 394                        IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
 395                                   "%d, Tx worker stopped\n", txq->id);
 396                        skb_queue_head(&txq->queue, skb);
 397                        break;
 398                }
 399
 400                txq->concat_ptr = txq->concat_buf + txq->concat_count;
 401                iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
 402                txq->concat_count += ALIGN(cmdlen, 16);
 403#endif
 404                kfree_skb(skb);
 405        }
 406
 407        iwm_tx_send_concat_packets(iwm, txq);
 408
 409        if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
 410            !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
 411            (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
 412                IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
 413                netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
 414        }
 415}
 416
 417int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 418{
 419        struct iwm_priv *iwm = ndev_to_iwm(netdev);
 420        struct net_device *ndev = iwm_to_ndev(iwm);
 421        struct wireless_dev *wdev = iwm_to_wdev(iwm);
 422        u8 *dst_addr;
 423        struct iwm_tx_info *tx_info;
 424        struct iwm_tx_queue *txq;
 425        struct iwm_sta_info *sta_info;
 426        u8 sta_id;
 427        u16 queue;
 428        int ret;
 429
 430        if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
 431                IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
 432                           "not associated\n");
 433                netif_tx_stop_all_queues(netdev);
 434                goto drop;
 435        }
 436
 437        queue = skb_get_queue_mapping(skb);
 438        BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
 439
 440        txq = &iwm->txq[queue];
 441
 442        /* No free space for Tx, tx_worker is too slow */
 443        if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) {
 444                IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
 445                netif_stop_subqueue(netdev, queue);
 446                return NETDEV_TX_BUSY;
 447        }
 448
 449        ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
 450                                       iwm->bssid, 0);
 451        if (ret) {
 452                IWM_ERR(iwm, "build wifi header failed\n");
 453                goto drop;
 454        }
 455
 456        dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
 457
 458        for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
 459                sta_info = &iwm->sta_table[sta_id];
 460                if (sta_info->valid &&
 461                    !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
 462                        break;
 463        }
 464
 465        if (sta_id == IWM_STA_TABLE_NUM) {
 466                IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
 467                        dst_addr);
 468                goto drop;
 469        }
 470
 471        tx_info = skb_to_tx_info(skb);
 472        tx_info->sta = sta_id;
 473        tx_info->color = sta_info->color;
 474        /* UMAC uses TID 8 (vs. 0) for non QoS packets */
 475        if (sta_info->qos)
 476                tx_info->tid = skb->priority;
 477        else
 478                tx_info->tid = IWM_UMAC_MGMT_TID;
 479
 480        skb_queue_tail(&iwm->txq[queue].queue, skb);
 481
 482        queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
 483
 484        ndev->stats.tx_packets++;
 485        ndev->stats.tx_bytes += skb->len;
 486        return NETDEV_TX_OK;
 487
 488 drop:
 489        ndev->stats.tx_dropped++;
 490        dev_kfree_skb_any(skb);
 491        return NETDEV_TX_OK;
 492}
 493