linux/drivers/net/wireless/rt2x00/rt2x00queue.c
<<
>>
Prefs
   1/*
   2        Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
   3        Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   4        Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
   5        <http://rt2x00.serialmonkey.com>
   6
   7        This program is free software; you can redistribute it and/or modify
   8        it under the terms of the GNU General Public License as published by
   9        the Free Software Foundation; either version 2 of the License, or
  10        (at your option) any later version.
  11
  12        This program is distributed in the hope that it will be useful,
  13        but WITHOUT ANY WARRANTY; without even the implied warranty of
  14        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15        GNU General Public License for more details.
  16
  17        You should have received a copy of the GNU General Public License
  18        along with this program; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/*
  22        Module: rt2x00lib
  23        Abstract: rt2x00 queue specific routines.
  24 */
  25
  26#include <linux/slab.h>
  27#include <linux/kernel.h>
  28#include <linux/module.h>
  29#include <linux/dma-mapping.h>
  30
  31#include "rt2x00.h"
  32#include "rt2x00lib.h"
  33
  34struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
  35{
  36        struct data_queue *queue = entry->queue;
  37        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  38        struct sk_buff *skb;
  39        struct skb_frame_desc *skbdesc;
  40        unsigned int frame_size;
  41        unsigned int head_size = 0;
  42        unsigned int tail_size = 0;
  43
  44        /*
  45         * The frame size includes descriptor size, because the
  46         * hardware directly receive the frame into the skbuffer.
  47         */
  48        frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
  49
  50        /*
  51         * The payload should be aligned to a 4-byte boundary,
  52         * this means we need at least 3 bytes for moving the frame
  53         * into the correct offset.
  54         */
  55        head_size = 4;
  56
  57        /*
  58         * For IV/EIV/ICV assembly we must make sure there is
  59         * at least 8 bytes bytes available in headroom for IV/EIV
  60         * and 8 bytes for ICV data as tailroon.
  61         */
  62        if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
  63                head_size += 8;
  64                tail_size += 8;
  65        }
  66
  67        /*
  68         * Allocate skbuffer.
  69         */
  70        skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
  71        if (!skb)
  72                return NULL;
  73
  74        /*
  75         * Make sure we not have a frame with the requested bytes
  76         * available in the head and tail.
  77         */
  78        skb_reserve(skb, head_size);
  79        skb_put(skb, frame_size);
  80
  81        /*
  82         * Populate skbdesc.
  83         */
  84        skbdesc = get_skb_frame_desc(skb);
  85        memset(skbdesc, 0, sizeof(*skbdesc));
  86        skbdesc->entry = entry;
  87
  88        if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
  89                dma_addr_t skb_dma;
  90
  91                skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
  92                                         DMA_FROM_DEVICE);
  93                if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
  94                        dev_kfree_skb_any(skb);
  95                        return NULL;
  96                }
  97
  98                skbdesc->skb_dma = skb_dma;
  99                skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
 100        }
 101
 102        return skb;
 103}
 104
 105int rt2x00queue_map_txskb(struct queue_entry *entry)
 106{
 107        struct device *dev = entry->queue->rt2x00dev->dev;
 108        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 109
 110        skbdesc->skb_dma =
 111            dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
 112
 113        if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
 114                return -ENOMEM;
 115
 116        skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 117        return 0;
 118}
 119EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 120
 121void rt2x00queue_unmap_skb(struct queue_entry *entry)
 122{
 123        struct device *dev = entry->queue->rt2x00dev->dev;
 124        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 125
 126        if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
 127                dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 128                                 DMA_FROM_DEVICE);
 129                skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
 130        } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
 131                dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 132                                 DMA_TO_DEVICE);
 133                skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 134        }
 135}
 136EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
 137
 138void rt2x00queue_free_skb(struct queue_entry *entry)
 139{
 140        if (!entry->skb)
 141                return;
 142
 143        rt2x00queue_unmap_skb(entry);
 144        dev_kfree_skb_any(entry->skb);
 145        entry->skb = NULL;
 146}
 147
 148void rt2x00queue_align_frame(struct sk_buff *skb)
 149{
 150        unsigned int frame_length = skb->len;
 151        unsigned int align = ALIGN_SIZE(skb, 0);
 152
 153        if (!align)
 154                return;
 155
 156        skb_push(skb, align);
 157        memmove(skb->data, skb->data + align, frame_length);
 158        skb_trim(skb, frame_length);
 159}
 160
 161/*
 162 * H/W needs L2 padding between the header and the paylod if header size
 163 * is not 4 bytes aligned.
 164 */
 165void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 166{
 167        unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 168
 169        if (!l2pad)
 170                return;
 171
 172        skb_push(skb, l2pad);
 173        memmove(skb->data, skb->data + l2pad, hdr_len);
 174}
 175
 176void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 177{
 178        unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 179
 180        if (!l2pad)
 181                return;
 182
 183        memmove(skb->data + l2pad, skb->data, hdr_len);
 184        skb_pull(skb, l2pad);
 185}
 186
 187static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
 188                                                 struct sk_buff *skb,
 189                                                 struct txentry_desc *txdesc)
 190{
 191        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 192        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 193        struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
 194        u16 seqno;
 195
 196        if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 197                return;
 198
 199        __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 200
 201        if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
 202                /*
 203                 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
 204                 * seqno on retransmited data (non-QOS) frames. To workaround
 205                 * the problem let's generate seqno in software if QOS is
 206                 * disabled.
 207                 */
 208                if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
 209                        __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 210                else
 211                        /* H/W will generate sequence number */
 212                        return;
 213        }
 214
 215        /*
 216         * The hardware is not able to insert a sequence number. Assign a
 217         * software generated one here.
 218         *
 219         * This is wrong because beacons are not getting sequence
 220         * numbers assigned properly.
 221         *
 222         * A secondary problem exists for drivers that cannot toggle
 223         * sequence counting per-frame, since those will override the
 224         * sequence counter given by mac80211.
 225         */
 226        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 227                seqno = atomic_add_return(0x10, &intf->seqno);
 228        else
 229                seqno = atomic_read(&intf->seqno);
 230
 231        hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 232        hdr->seq_ctrl |= cpu_to_le16(seqno);
 233}
 234
 235static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 236                                                  struct sk_buff *skb,
 237                                                  struct txentry_desc *txdesc,
 238                                                  const struct rt2x00_rate *hwrate)
 239{
 240        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 241        struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 242        unsigned int data_length;
 243        unsigned int duration;
 244        unsigned int residual;
 245
 246        /*
 247         * Determine with what IFS priority this frame should be send.
 248         * Set ifs to IFS_SIFS when the this is not the first fragment,
 249         * or this fragment came after RTS/CTS.
 250         */
 251        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 252                txdesc->u.plcp.ifs = IFS_BACKOFF;
 253        else
 254                txdesc->u.plcp.ifs = IFS_SIFS;
 255
 256        /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
 257        data_length = skb->len + 4;
 258        data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
 259
 260        /*
 261         * PLCP setup
 262         * Length calculation depends on OFDM/CCK rate.
 263         */
 264        txdesc->u.plcp.signal = hwrate->plcp;
 265        txdesc->u.plcp.service = 0x04;
 266
 267        if (hwrate->flags & DEV_RATE_OFDM) {
 268                txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
 269                txdesc->u.plcp.length_low = data_length & 0x3f;
 270        } else {
 271                /*
 272                 * Convert length to microseconds.
 273                 */
 274                residual = GET_DURATION_RES(data_length, hwrate->bitrate);
 275                duration = GET_DURATION(data_length, hwrate->bitrate);
 276
 277                if (residual != 0) {
 278                        duration++;
 279
 280                        /*
 281                         * Check if we need to set the Length Extension
 282                         */
 283                        if (hwrate->bitrate == 110 && residual <= 30)
 284                                txdesc->u.plcp.service |= 0x80;
 285                }
 286
 287                txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
 288                txdesc->u.plcp.length_low = duration & 0xff;
 289
 290                /*
 291                 * When preamble is enabled we should set the
 292                 * preamble bit for the signal.
 293                 */
 294                if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 295                        txdesc->u.plcp.signal |= 0x08;
 296        }
 297}
 298
 299static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 300                                                struct sk_buff *skb,
 301                                                struct txentry_desc *txdesc,
 302                                                struct ieee80211_sta *sta,
 303                                                const struct rt2x00_rate *hwrate)
 304{
 305        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 306        struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 307        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 308        struct rt2x00_sta *sta_priv = NULL;
 309
 310        if (sta) {
 311                txdesc->u.ht.mpdu_density =
 312                    sta->ht_cap.ampdu_density;
 313
 314                sta_priv = sta_to_rt2x00_sta(sta);
 315                txdesc->u.ht.wcid = sta_priv->wcid;
 316        }
 317
 318        /*
 319         * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
 320         * mcs rate to be used
 321         */
 322        if (txrate->flags & IEEE80211_TX_RC_MCS) {
 323                txdesc->u.ht.mcs = txrate->idx;
 324
 325                /*
 326                 * MIMO PS should be set to 1 for STA's using dynamic SM PS
 327                 * when using more then one tx stream (>MCS7).
 328                 */
 329                if (sta && txdesc->u.ht.mcs > 7 &&
 330                    sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
 331                        __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
 332        } else {
 333                txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
 334                if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 335                        txdesc->u.ht.mcs |= 0x08;
 336        }
 337
 338        if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
 339                if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 340                        txdesc->u.ht.txop = TXOP_SIFS;
 341                else
 342                        txdesc->u.ht.txop = TXOP_BACKOFF;
 343
 344                /* Left zero on all other settings. */
 345                return;
 346        }
 347
 348        txdesc->u.ht.ba_size = 7;       /* FIXME: What value is needed? */
 349
 350        /*
 351         * Only one STBC stream is supported for now.
 352         */
 353        if (tx_info->flags & IEEE80211_TX_CTL_STBC)
 354                txdesc->u.ht.stbc = 1;
 355
 356        /*
 357         * This frame is eligible for an AMPDU, however, don't aggregate
 358         * frames that are intended to probe a specific tx rate.
 359         */
 360        if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
 361            !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
 362                __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
 363
 364        /*
 365         * Set 40Mhz mode if necessary (for legacy rates this will
 366         * duplicate the frame to both channels).
 367         */
 368        if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
 369            txrate->flags & IEEE80211_TX_RC_DUP_DATA)
 370                __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
 371        if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
 372                __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
 373
 374        /*
 375         * Determine IFS values
 376         * - Use TXOP_BACKOFF for management frames except beacons
 377         * - Use TXOP_SIFS for fragment bursts
 378         * - Use TXOP_HTTXOP for everything else
 379         *
 380         * Note: rt2800 devices won't use CTS protection (if used)
 381         * for frames not transmitted with TXOP_HTTXOP
 382         */
 383        if (ieee80211_is_mgmt(hdr->frame_control) &&
 384            !ieee80211_is_beacon(hdr->frame_control))
 385                txdesc->u.ht.txop = TXOP_BACKOFF;
 386        else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 387                txdesc->u.ht.txop = TXOP_SIFS;
 388        else
 389                txdesc->u.ht.txop = TXOP_HTTXOP;
 390}
 391
 392static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 393                                             struct sk_buff *skb,
 394                                             struct txentry_desc *txdesc,
 395                                             struct ieee80211_sta *sta)
 396{
 397        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 398        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 399        struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 400        struct ieee80211_rate *rate;
 401        const struct rt2x00_rate *hwrate = NULL;
 402
 403        memset(txdesc, 0, sizeof(*txdesc));
 404
 405        /*
 406         * Header and frame information.
 407         */
 408        txdesc->length = skb->len;
 409        txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
 410
 411        /*
 412         * Check whether this frame is to be acked.
 413         */
 414        if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
 415                __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 416
 417        /*
 418         * Check if this is a RTS/CTS frame
 419         */
 420        if (ieee80211_is_rts(hdr->frame_control) ||
 421            ieee80211_is_cts(hdr->frame_control)) {
 422                __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 423                if (ieee80211_is_rts(hdr->frame_control))
 424                        __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
 425                else
 426                        __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
 427                if (tx_info->control.rts_cts_rate_idx >= 0)
 428                        rate =
 429                            ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
 430        }
 431
 432        /*
 433         * Determine retry information.
 434         */
 435        txdesc->retry_limit = tx_info->control.rates[0].count - 1;
 436        if (txdesc->retry_limit >= rt2x00dev->long_retry)
 437                __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
 438
 439        /*
 440         * Check if more fragments are pending
 441         */
 442        if (ieee80211_has_morefrags(hdr->frame_control)) {
 443                __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 444                __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
 445        }
 446
 447        /*
 448         * Check if more frames (!= fragments) are pending
 449         */
 450        if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
 451                __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 452
 453        /*
 454         * Beacons and probe responses require the tsf timestamp
 455         * to be inserted into the frame.
 456         */
 457        if (ieee80211_is_beacon(hdr->frame_control) ||
 458            ieee80211_is_probe_resp(hdr->frame_control))
 459                __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 460
 461        if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
 462            !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
 463                __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
 464
 465        /*
 466         * Determine rate modulation.
 467         */
 468        if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 469                txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
 470        else if (txrate->flags & IEEE80211_TX_RC_MCS)
 471                txdesc->rate_mode = RATE_MODE_HT_MIX;
 472        else {
 473                rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
 474                hwrate = rt2x00_get_rate(rate->hw_value);
 475                if (hwrate->flags & DEV_RATE_OFDM)
 476                        txdesc->rate_mode = RATE_MODE_OFDM;
 477                else
 478                        txdesc->rate_mode = RATE_MODE_CCK;
 479        }
 480
 481        /*
 482         * Apply TX descriptor handling by components
 483         */
 484        rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
 485        rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
 486
 487        if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
 488                rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
 489                                                   sta, hwrate);
 490        else
 491                rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
 492                                                      hwrate);
 493}
 494
 495static int rt2x00queue_write_tx_data(struct queue_entry *entry,
 496                                     struct txentry_desc *txdesc)
 497{
 498        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 499
 500        /*
 501         * This should not happen, we already checked the entry
 502         * was ours. When the hardware disagrees there has been
 503         * a queue corruption!
 504         */
 505        if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
 506                     rt2x00dev->ops->lib->get_entry_state(entry))) {
 507                rt2x00_err(rt2x00dev,
 508                           "Corrupt queue %d, accessing entry which is not ours\n"
 509                           "Please file bug report to %s\n",
 510                           entry->queue->qid, DRV_PROJECT);
 511                return -EINVAL;
 512        }
 513
 514        /*
 515         * Add the requested extra tx headroom in front of the skb.
 516         */
 517        skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
 518        memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
 519
 520        /*
 521         * Call the driver's write_tx_data function, if it exists.
 522         */
 523        if (rt2x00dev->ops->lib->write_tx_data)
 524                rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
 525
 526        /*
 527         * Map the skb to DMA.
 528         */
 529        if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
 530            rt2x00queue_map_txskb(entry))
 531                return -ENOMEM;
 532
 533        return 0;
 534}
 535
 536static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 537                                            struct txentry_desc *txdesc)
 538{
 539        struct data_queue *queue = entry->queue;
 540
 541        queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
 542
 543        /*
 544         * All processing on the frame has been completed, this means
 545         * it is now ready to be dumped to userspace through debugfs.
 546         */
 547        rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
 548}
 549
 550static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
 551                                      struct txentry_desc *txdesc)
 552{
 553        /*
 554         * Check if we need to kick the queue, there are however a few rules
 555         *      1) Don't kick unless this is the last in frame in a burst.
 556         *         When the burst flag is set, this frame is always followed
 557         *         by another frame which in some way are related to eachother.
 558         *         This is true for fragments, RTS or CTS-to-self frames.
 559         *      2) Rule 1 can be broken when the available entries
 560         *         in the queue are less then a certain threshold.
 561         */
 562        if (rt2x00queue_threshold(queue) ||
 563            !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
 564                queue->rt2x00dev->ops->lib->kick_queue(queue);
 565}
 566
 567static void rt2x00queue_bar_check(struct queue_entry *entry)
 568{
 569        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 570        struct ieee80211_bar *bar = (void *) (entry->skb->data +
 571                                    rt2x00dev->extra_tx_headroom);
 572        struct rt2x00_bar_list_entry *bar_entry;
 573
 574        if (likely(!ieee80211_is_back_req(bar->frame_control)))
 575                return;
 576
 577        bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
 578
 579        /*
 580         * If the alloc fails we still send the BAR out but just don't track
 581         * it in our bar list. And as a result we will report it to mac80211
 582         * back as failed.
 583         */
 584        if (!bar_entry)
 585                return;
 586
 587        bar_entry->entry = entry;
 588        bar_entry->block_acked = 0;
 589
 590        /*
 591         * Copy the relevant parts of the 802.11 BAR into out check list
 592         * such that we can use RCU for less-overhead in the RX path since
 593         * sending BARs and processing the according BlockAck should be
 594         * the exception.
 595         */
 596        memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
 597        memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
 598        bar_entry->control = bar->control;
 599        bar_entry->start_seq_num = bar->start_seq_num;
 600
 601        /*
 602         * Insert BAR into our BAR check list.
 603         */
 604        spin_lock_bh(&rt2x00dev->bar_list_lock);
 605        list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
 606        spin_unlock_bh(&rt2x00dev->bar_list_lock);
 607}
 608
 609int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 610                               struct ieee80211_sta *sta, bool local)
 611{
 612        struct ieee80211_tx_info *tx_info;
 613        struct queue_entry *entry;
 614        struct txentry_desc txdesc;
 615        struct skb_frame_desc *skbdesc;
 616        u8 rate_idx, rate_flags;
 617        int ret = 0;
 618
 619        /*
 620         * Copy all TX descriptor information into txdesc,
 621         * after that we are free to use the skb->cb array
 622         * for our information.
 623         */
 624        rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
 625
 626        /*
 627         * All information is retrieved from the skb->cb array,
 628         * now we should claim ownership of the driver part of that
 629         * array, preserving the bitrate index and flags.
 630         */
 631        tx_info = IEEE80211_SKB_CB(skb);
 632        rate_idx = tx_info->control.rates[0].idx;
 633        rate_flags = tx_info->control.rates[0].flags;
 634        skbdesc = get_skb_frame_desc(skb);
 635        memset(skbdesc, 0, sizeof(*skbdesc));
 636        skbdesc->tx_rate_idx = rate_idx;
 637        skbdesc->tx_rate_flags = rate_flags;
 638
 639        if (local)
 640                skbdesc->flags |= SKBDESC_NOT_MAC80211;
 641
 642        /*
 643         * When hardware encryption is supported, and this frame
 644         * is to be encrypted, we should strip the IV/EIV data from
 645         * the frame so we can provide it to the driver separately.
 646         */
 647        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
 648            !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
 649                if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
 650                        rt2x00crypto_tx_copy_iv(skb, &txdesc);
 651                else
 652                        rt2x00crypto_tx_remove_iv(skb, &txdesc);
 653        }
 654
 655        /*
 656         * When DMA allocation is required we should guarantee to the
 657         * driver that the DMA is aligned to a 4-byte boundary.
 658         * However some drivers require L2 padding to pad the payload
 659         * rather then the header. This could be a requirement for
 660         * PCI and USB devices, while header alignment only is valid
 661         * for PCI devices.
 662         */
 663        if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
 664                rt2x00queue_insert_l2pad(skb, txdesc.header_length);
 665        else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
 666                rt2x00queue_align_frame(skb);
 667
 668        /*
 669         * That function must be called with bh disabled.
 670         */
 671        spin_lock(&queue->tx_lock);
 672
 673        if (unlikely(rt2x00queue_full(queue))) {
 674                rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
 675                           queue->qid);
 676                ret = -ENOBUFS;
 677                goto out;
 678        }
 679
 680        entry = rt2x00queue_get_entry(queue, Q_INDEX);
 681
 682        if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
 683                                      &entry->flags))) {
 684                rt2x00_err(queue->rt2x00dev,
 685                           "Arrived at non-free entry in the non-full queue %d\n"
 686                           "Please file bug report to %s\n",
 687                           queue->qid, DRV_PROJECT);
 688                ret = -EINVAL;
 689                goto out;
 690        }
 691
 692        skbdesc->entry = entry;
 693        entry->skb = skb;
 694
 695        /*
 696         * It could be possible that the queue was corrupted and this
 697         * call failed. Since we always return NETDEV_TX_OK to mac80211,
 698         * this frame will simply be dropped.
 699         */
 700        if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
 701                clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 702                entry->skb = NULL;
 703                ret = -EIO;
 704                goto out;
 705        }
 706
 707        /*
 708         * Put BlockAckReqs into our check list for driver BA processing.
 709         */
 710        rt2x00queue_bar_check(entry);
 711
 712        set_bit(ENTRY_DATA_PENDING, &entry->flags);
 713
 714        rt2x00queue_index_inc(entry, Q_INDEX);
 715        rt2x00queue_write_tx_descriptor(entry, &txdesc);
 716        rt2x00queue_kick_tx_queue(queue, &txdesc);
 717
 718out:
 719        spin_unlock(&queue->tx_lock);
 720        return ret;
 721}
 722
 723int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
 724                             struct ieee80211_vif *vif)
 725{
 726        struct rt2x00_intf *intf = vif_to_intf(vif);
 727
 728        if (unlikely(!intf->beacon))
 729                return -ENOBUFS;
 730
 731        /*
 732         * Clean up the beacon skb.
 733         */
 734        rt2x00queue_free_skb(intf->beacon);
 735
 736        /*
 737         * Clear beacon (single bssid devices don't need to clear the beacon
 738         * since the beacon queue will get stopped anyway).
 739         */
 740        if (rt2x00dev->ops->lib->clear_beacon)
 741                rt2x00dev->ops->lib->clear_beacon(intf->beacon);
 742
 743        return 0;
 744}
 745
 746int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
 747                              struct ieee80211_vif *vif)
 748{
 749        struct rt2x00_intf *intf = vif_to_intf(vif);
 750        struct skb_frame_desc *skbdesc;
 751        struct txentry_desc txdesc;
 752
 753        if (unlikely(!intf->beacon))
 754                return -ENOBUFS;
 755
 756        /*
 757         * Clean up the beacon skb.
 758         */
 759        rt2x00queue_free_skb(intf->beacon);
 760
 761        intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
 762        if (!intf->beacon->skb)
 763                return -ENOMEM;
 764
 765        /*
 766         * Copy all TX descriptor information into txdesc,
 767         * after that we are free to use the skb->cb array
 768         * for our information.
 769         */
 770        rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 771
 772        /*
 773         * Fill in skb descriptor
 774         */
 775        skbdesc = get_skb_frame_desc(intf->beacon->skb);
 776        memset(skbdesc, 0, sizeof(*skbdesc));
 777        skbdesc->entry = intf->beacon;
 778
 779        /*
 780         * Send beacon to hardware.
 781         */
 782        rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 783
 784        return 0;
 785
 786}
 787
 788bool rt2x00queue_for_each_entry(struct data_queue *queue,
 789                                enum queue_index start,
 790                                enum queue_index end,
 791                                void *data,
 792                                bool (*fn)(struct queue_entry *entry,
 793                                           void *data))
 794{
 795        unsigned long irqflags;
 796        unsigned int index_start;
 797        unsigned int index_end;
 798        unsigned int i;
 799
 800        if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
 801                rt2x00_err(queue->rt2x00dev,
 802                           "Entry requested from invalid index range (%d - %d)\n",
 803                           start, end);
 804                return true;
 805        }
 806
 807        /*
 808         * Only protect the range we are going to loop over,
 809         * if during our loop a extra entry is set to pending
 810         * it should not be kicked during this run, since it
 811         * is part of another TX operation.
 812         */
 813        spin_lock_irqsave(&queue->index_lock, irqflags);
 814        index_start = queue->index[start];
 815        index_end = queue->index[end];
 816        spin_unlock_irqrestore(&queue->index_lock, irqflags);
 817
 818        /*
 819         * Start from the TX done pointer, this guarantees that we will
 820         * send out all frames in the correct order.
 821         */
 822        if (index_start < index_end) {
 823                for (i = index_start; i < index_end; i++) {
 824                        if (fn(&queue->entries[i], data))
 825                                return true;
 826                }
 827        } else {
 828                for (i = index_start; i < queue->limit; i++) {
 829                        if (fn(&queue->entries[i], data))
 830                                return true;
 831                }
 832
 833                for (i = 0; i < index_end; i++) {
 834                        if (fn(&queue->entries[i], data))
 835                                return true;
 836                }
 837        }
 838
 839        return false;
 840}
 841EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
 842
 843struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
 844                                          enum queue_index index)
 845{
 846        struct queue_entry *entry;
 847        unsigned long irqflags;
 848
 849        if (unlikely(index >= Q_INDEX_MAX)) {
 850                rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
 851                           index);
 852                return NULL;
 853        }
 854
 855        spin_lock_irqsave(&queue->index_lock, irqflags);
 856
 857        entry = &queue->entries[queue->index[index]];
 858
 859        spin_unlock_irqrestore(&queue->index_lock, irqflags);
 860
 861        return entry;
 862}
 863EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
 864
 865void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
 866{
 867        struct data_queue *queue = entry->queue;
 868        unsigned long irqflags;
 869
 870        if (unlikely(index >= Q_INDEX_MAX)) {
 871                rt2x00_err(queue->rt2x00dev,
 872                           "Index change on invalid index type (%d)\n", index);
 873                return;
 874        }
 875
 876        spin_lock_irqsave(&queue->index_lock, irqflags);
 877
 878        queue->index[index]++;
 879        if (queue->index[index] >= queue->limit)
 880                queue->index[index] = 0;
 881
 882        entry->last_action = jiffies;
 883
 884        if (index == Q_INDEX) {
 885                queue->length++;
 886        } else if (index == Q_INDEX_DONE) {
 887                queue->length--;
 888                queue->count++;
 889        }
 890
 891        spin_unlock_irqrestore(&queue->index_lock, irqflags);
 892}
 893
 894static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
 895{
 896        switch (queue->qid) {
 897        case QID_AC_VO:
 898        case QID_AC_VI:
 899        case QID_AC_BE:
 900        case QID_AC_BK:
 901                /*
 902                 * For TX queues, we have to disable the queue
 903                 * inside mac80211.
 904                 */
 905                ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
 906                break;
 907        default:
 908                break;
 909        }
 910}
 911void rt2x00queue_pause_queue(struct data_queue *queue)
 912{
 913        if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 914            !test_bit(QUEUE_STARTED, &queue->flags) ||
 915            test_and_set_bit(QUEUE_PAUSED, &queue->flags))
 916                return;
 917
 918        rt2x00queue_pause_queue_nocheck(queue);
 919}
 920EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
 921
 922void rt2x00queue_unpause_queue(struct data_queue *queue)
 923{
 924        if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 925            !test_bit(QUEUE_STARTED, &queue->flags) ||
 926            !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
 927                return;
 928
 929        switch (queue->qid) {
 930        case QID_AC_VO:
 931        case QID_AC_VI:
 932        case QID_AC_BE:
 933        case QID_AC_BK:
 934                /*
 935                 * For TX queues, we have to enable the queue
 936                 * inside mac80211.
 937                 */
 938                ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
 939                break;
 940        case QID_RX:
 941                /*
 942                 * For RX we need to kick the queue now in order to
 943                 * receive frames.
 944                 */
 945                queue->rt2x00dev->ops->lib->kick_queue(queue);
 946        default:
 947                break;
 948        }
 949}
 950EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
 951
 952void rt2x00queue_start_queue(struct data_queue *queue)
 953{
 954        mutex_lock(&queue->status_lock);
 955
 956        if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 957            test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
 958                mutex_unlock(&queue->status_lock);
 959                return;
 960        }
 961
 962        set_bit(QUEUE_PAUSED, &queue->flags);
 963
 964        queue->rt2x00dev->ops->lib->start_queue(queue);
 965
 966        rt2x00queue_unpause_queue(queue);
 967
 968        mutex_unlock(&queue->status_lock);
 969}
 970EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
 971
 972void rt2x00queue_stop_queue(struct data_queue *queue)
 973{
 974        mutex_lock(&queue->status_lock);
 975
 976        if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
 977                mutex_unlock(&queue->status_lock);
 978                return;
 979        }
 980
 981        rt2x00queue_pause_queue_nocheck(queue);
 982
 983        queue->rt2x00dev->ops->lib->stop_queue(queue);
 984
 985        mutex_unlock(&queue->status_lock);
 986}
 987EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
 988
 989void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
 990{
 991        bool tx_queue =
 992                (queue->qid == QID_AC_VO) ||
 993                (queue->qid == QID_AC_VI) ||
 994                (queue->qid == QID_AC_BE) ||
 995                (queue->qid == QID_AC_BK);
 996
 997
 998        /*
 999         * If we are not supposed to drop any pending
1000         * frames, this means we must force a start (=kick)
1001         * to the queue to make sure the hardware will
1002         * start transmitting.
1003         */
1004        if (!drop && tx_queue)
1005                queue->rt2x00dev->ops->lib->kick_queue(queue);
1006
1007        /*
1008         * Check if driver supports flushing, if that is the case we can
1009         * defer the flushing to the driver. Otherwise we must use the
1010         * alternative which just waits for the queue to become empty.
1011         */
1012        if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1013                queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1014
1015        /*
1016         * The queue flush has failed...
1017         */
1018        if (unlikely(!rt2x00queue_empty(queue)))
1019                rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1020                            queue->qid);
1021}
1022EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1023
1024void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1025{
1026        struct data_queue *queue;
1027
1028        /*
1029         * rt2x00queue_start_queue will call ieee80211_wake_queue
1030         * for each queue after is has been properly initialized.
1031         */
1032        tx_queue_for_each(rt2x00dev, queue)
1033                rt2x00queue_start_queue(queue);
1034
1035        rt2x00queue_start_queue(rt2x00dev->rx);
1036}
1037EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1038
1039void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1040{
1041        struct data_queue *queue;
1042
1043        /*
1044         * rt2x00queue_stop_queue will call ieee80211_stop_queue
1045         * as well, but we are completely shutting doing everything
1046         * now, so it is much safer to stop all TX queues at once,
1047         * and use rt2x00queue_stop_queue for cleaning up.
1048         */
1049        ieee80211_stop_queues(rt2x00dev->hw);
1050
1051        tx_queue_for_each(rt2x00dev, queue)
1052                rt2x00queue_stop_queue(queue);
1053
1054        rt2x00queue_stop_queue(rt2x00dev->rx);
1055}
1056EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1057
1058void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1059{
1060        struct data_queue *queue;
1061
1062        tx_queue_for_each(rt2x00dev, queue)
1063                rt2x00queue_flush_queue(queue, drop);
1064
1065        rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1066}
1067EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1068
1069static void rt2x00queue_reset(struct data_queue *queue)
1070{
1071        unsigned long irqflags;
1072        unsigned int i;
1073
1074        spin_lock_irqsave(&queue->index_lock, irqflags);
1075
1076        queue->count = 0;
1077        queue->length = 0;
1078
1079        for (i = 0; i < Q_INDEX_MAX; i++)
1080                queue->index[i] = 0;
1081
1082        spin_unlock_irqrestore(&queue->index_lock, irqflags);
1083}
1084
1085void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1086{
1087        struct data_queue *queue;
1088        unsigned int i;
1089
1090        queue_for_each(rt2x00dev, queue) {
1091                rt2x00queue_reset(queue);
1092
1093                for (i = 0; i < queue->limit; i++)
1094                        rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1095        }
1096}
1097
1098static int rt2x00queue_alloc_entries(struct data_queue *queue)
1099{
1100        struct queue_entry *entries;
1101        unsigned int entry_size;
1102        unsigned int i;
1103
1104        rt2x00queue_reset(queue);
1105
1106        /*
1107         * Allocate all queue entries.
1108         */
1109        entry_size = sizeof(*entries) + queue->priv_size;
1110        entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1111        if (!entries)
1112                return -ENOMEM;
1113
1114#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1115        (((char *)(__base)) + ((__limit) * (__esize)) + \
1116            ((__index) * (__psize)))
1117
1118        for (i = 0; i < queue->limit; i++) {
1119                entries[i].flags = 0;
1120                entries[i].queue = queue;
1121                entries[i].skb = NULL;
1122                entries[i].entry_idx = i;
1123                entries[i].priv_data =
1124                    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1125                                            sizeof(*entries), queue->priv_size);
1126        }
1127
1128#undef QUEUE_ENTRY_PRIV_OFFSET
1129
1130        queue->entries = entries;
1131
1132        return 0;
1133}
1134
1135static void rt2x00queue_free_skbs(struct data_queue *queue)
1136{
1137        unsigned int i;
1138
1139        if (!queue->entries)
1140                return;
1141
1142        for (i = 0; i < queue->limit; i++) {
1143                rt2x00queue_free_skb(&queue->entries[i]);
1144        }
1145}
1146
1147static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1148{
1149        unsigned int i;
1150        struct sk_buff *skb;
1151
1152        for (i = 0; i < queue->limit; i++) {
1153                skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1154                if (!skb)
1155                        return -ENOMEM;
1156                queue->entries[i].skb = skb;
1157        }
1158
1159        return 0;
1160}
1161
1162int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1163{
1164        struct data_queue *queue;
1165        int status;
1166
1167        status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1168        if (status)
1169                goto exit;
1170
1171        tx_queue_for_each(rt2x00dev, queue) {
1172                status = rt2x00queue_alloc_entries(queue);
1173                if (status)
1174                        goto exit;
1175        }
1176
1177        status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1178        if (status)
1179                goto exit;
1180
1181        if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1182                status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1183                if (status)
1184                        goto exit;
1185        }
1186
1187        status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1188        if (status)
1189                goto exit;
1190
1191        return 0;
1192
1193exit:
1194        rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1195
1196        rt2x00queue_uninitialize(rt2x00dev);
1197
1198        return status;
1199}
1200
1201void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1202{
1203        struct data_queue *queue;
1204
1205        rt2x00queue_free_skbs(rt2x00dev->rx);
1206
1207        queue_for_each(rt2x00dev, queue) {
1208                kfree(queue->entries);
1209                queue->entries = NULL;
1210        }
1211}
1212
1213static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1214                             struct data_queue *queue, enum data_queue_qid qid)
1215{
1216        mutex_init(&queue->status_lock);
1217        spin_lock_init(&queue->tx_lock);
1218        spin_lock_init(&queue->index_lock);
1219
1220        queue->rt2x00dev = rt2x00dev;
1221        queue->qid = qid;
1222        queue->txop = 0;
1223        queue->aifs = 2;
1224        queue->cw_min = 5;
1225        queue->cw_max = 10;
1226
1227        rt2x00dev->ops->queue_init(queue);
1228
1229        queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1230}
1231
1232int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1233{
1234        struct data_queue *queue;
1235        enum data_queue_qid qid;
1236        unsigned int req_atim =
1237            !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1238
1239        /*
1240         * We need the following queues:
1241         * RX: 1
1242         * TX: ops->tx_queues
1243         * Beacon: 1
1244         * Atim: 1 (if required)
1245         */
1246        rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1247
1248        queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1249        if (!queue) {
1250                rt2x00_err(rt2x00dev, "Queue allocation failed\n");
1251                return -ENOMEM;
1252        }
1253
1254        /*
1255         * Initialize pointers
1256         */
1257        rt2x00dev->rx = queue;
1258        rt2x00dev->tx = &queue[1];
1259        rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1260        rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1261
1262        /*
1263         * Initialize queue parameters.
1264         * RX: qid = QID_RX
1265         * TX: qid = QID_AC_VO + index
1266         * TX: cw_min: 2^5 = 32.
1267         * TX: cw_max: 2^10 = 1024.
1268         * BCN: qid = QID_BEACON
1269         * ATIM: qid = QID_ATIM
1270         */
1271        rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1272
1273        qid = QID_AC_VO;
1274        tx_queue_for_each(rt2x00dev, queue)
1275                rt2x00queue_init(rt2x00dev, queue, qid++);
1276
1277        rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1278        if (req_atim)
1279                rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1280
1281        return 0;
1282}
1283
1284void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1285{
1286        kfree(rt2x00dev->rx);
1287        rt2x00dev->rx = NULL;
1288        rt2x00dev->tx = NULL;
1289        rt2x00dev->bcn = NULL;
1290}
1291