linux/drivers/net/wireless/rt2x00/rt2800mmio.c
<<
>>
Prefs
   1/*      Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   2 *      Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
   3 *      Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
   4 *      Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
   5 *      Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
   6 *      Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
   7 *      Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
   8 *      Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
   9 *      <http://rt2x00.serialmonkey.com>
  10 *
  11 *      This program is free software; you can redistribute it and/or modify
  12 *      it under the terms of the GNU General Public License as published by
  13 *      the Free Software Foundation; either version 2 of the License, or
  14 *      (at your option) any later version.
  15 *
  16 *      This program is distributed in the hope that it will be useful,
  17 *      but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19 *      GNU General Public License for more details.
  20 *
  21 *      You should have received a copy of the GNU General Public License
  22 *      along with this program; if not, write to the
  23 *      Free Software Foundation, Inc.,
  24 *      59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  25 */
  26
  27/*      Module: rt2800mmio
  28 *      Abstract: rt2800 MMIO device routines.
  29 */
  30
  31#include <linux/kernel.h>
  32#include <linux/module.h>
  33#include <linux/export.h>
  34
  35#include "rt2x00.h"
  36#include "rt2x00mmio.h"
  37#include "rt2800.h"
  38#include "rt2800lib.h"
  39#include "rt2800mmio.h"
  40
  41/*
  42 * TX descriptor initialization
  43 */
  44__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
  45{
  46        return (__le32 *) entry->skb->data;
  47}
  48EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
  49
  50void rt2800mmio_write_tx_desc(struct queue_entry *entry,
  51                              struct txentry_desc *txdesc)
  52{
  53        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  54        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  55        __le32 *txd = entry_priv->desc;
  56        u32 word;
  57        const unsigned int txwi_size = entry->queue->winfo_size;
  58
  59        /*
  60         * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
  61         * must contains a TXWI structure + 802.11 header + padding + 802.11
  62         * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
  63         * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
  64         * data. It means that LAST_SEC0 is always 0.
  65         */
  66
  67        /*
  68         * Initialize TX descriptor
  69         */
  70        word = 0;
  71        rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
  72        rt2x00_desc_write(txd, 0, word);
  73
  74        word = 0;
  75        rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
  76        rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
  77                           !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
  78        rt2x00_set_field32(&word, TXD_W1_BURST,
  79                           test_bit(ENTRY_TXD_BURST, &txdesc->flags));
  80        rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
  81        rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
  82        rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
  83        rt2x00_desc_write(txd, 1, word);
  84
  85        word = 0;
  86        rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
  87                           skbdesc->skb_dma + txwi_size);
  88        rt2x00_desc_write(txd, 2, word);
  89
  90        word = 0;
  91        rt2x00_set_field32(&word, TXD_W3_WIV,
  92                           !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
  93        rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
  94        rt2x00_desc_write(txd, 3, word);
  95
  96        /*
  97         * Register descriptor details in skb frame descriptor.
  98         */
  99        skbdesc->desc = txd;
 100        skbdesc->desc_len = TXD_DESC_SIZE;
 101}
 102EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
 103
 104/*
 105 * RX control handlers
 106 */
 107void rt2800mmio_fill_rxdone(struct queue_entry *entry,
 108                            struct rxdone_entry_desc *rxdesc)
 109{
 110        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 111        __le32 *rxd = entry_priv->desc;
 112        u32 word;
 113
 114        rt2x00_desc_read(rxd, 3, &word);
 115
 116        if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
 117                rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 118
 119        /*
 120         * Unfortunately we don't know the cipher type used during
 121         * decryption. This prevents us from correct providing
 122         * correct statistics through debugfs.
 123         */
 124        rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
 125
 126        if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
 127                /*
 128                 * Hardware has stripped IV/EIV data from 802.11 frame during
 129                 * decryption. Unfortunately the descriptor doesn't contain
 130                 * any fields with the EIV/IV data either, so they can't
 131                 * be restored by rt2x00lib.
 132                 */
 133                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 134
 135                /*
 136                 * The hardware has already checked the Michael Mic and has
 137                 * stripped it from the frame. Signal this to mac80211.
 138                 */
 139                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 140
 141                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 142                        rxdesc->flags |= RX_FLAG_DECRYPTED;
 143                else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
 144                        rxdesc->flags |= RX_FLAG_MMIC_ERROR;
 145        }
 146
 147        if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
 148                rxdesc->dev_flags |= RXDONE_MY_BSS;
 149
 150        if (rt2x00_get_field32(word, RXD_W3_L2PAD))
 151                rxdesc->dev_flags |= RXDONE_L2PAD;
 152
 153        /*
 154         * Process the RXWI structure that is at the start of the buffer.
 155         */
 156        rt2800_process_rxwi(entry, rxdesc);
 157}
 158EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
 159
 160/*
 161 * Interrupt functions.
 162 */
 163static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
 164{
 165        struct ieee80211_conf conf = { .flags = 0 };
 166        struct rt2x00lib_conf libconf = { .conf = &conf };
 167
 168        rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 169}
 170
 171static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
 172{
 173        __le32 *txwi;
 174        u32 word;
 175        int wcid, tx_wcid;
 176
 177        wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
 178
 179        txwi = rt2800_drv_get_txwi(entry);
 180        rt2x00_desc_read(txwi, 1, &word);
 181        tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
 182
 183        return (tx_wcid == wcid);
 184}
 185
 186static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
 187{
 188        u32 status = *(u32 *)data;
 189
 190        /*
 191         * rt2800pci hardware might reorder frames when exchanging traffic
 192         * with multiple BA enabled STAs.
 193         *
 194         * For example, a tx queue
 195         *    [ STA1 | STA2 | STA1 | STA2 ]
 196         * can result in tx status reports
 197         *    [ STA1 | STA1 | STA2 | STA2 ]
 198         * when the hw decides to aggregate the frames for STA1 into one AMPDU.
 199         *
 200         * To mitigate this effect, associate the tx status to the first frame
 201         * in the tx queue with a matching wcid.
 202         */
 203        if (rt2800mmio_txdone_entry_check(entry, status) &&
 204            !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
 205                /*
 206                 * Got a matching frame, associate the tx status with
 207                 * the frame
 208                 */
 209                entry->status = status;
 210                set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
 211                return true;
 212        }
 213
 214        /* Check the next frame */
 215        return false;
 216}
 217
 218static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
 219{
 220        u32 status = *(u32 *)data;
 221
 222        /*
 223         * Find the first frame without tx status and assign this status to it
 224         * regardless if it matches or not.
 225         */
 226        if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
 227                /*
 228                 * Got a matching frame, associate the tx status with
 229                 * the frame
 230                 */
 231                entry->status = status;
 232                set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
 233                return true;
 234        }
 235
 236        /* Check the next frame */
 237        return false;
 238}
 239static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
 240                                              void *data)
 241{
 242        if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
 243                rt2800_txdone_entry(entry, entry->status,
 244                                    rt2800mmio_get_txwi(entry));
 245                return false;
 246        }
 247
 248        /* No more frames to release */
 249        return true;
 250}
 251
 252static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
 253{
 254        struct data_queue *queue;
 255        u32 status;
 256        u8 qid;
 257        int max_tx_done = 16;
 258
 259        while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
 260                qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
 261                if (unlikely(qid >= QID_RX)) {
 262                        /*
 263                         * Unknown queue, this shouldn't happen. Just drop
 264                         * this tx status.
 265                         */
 266                        rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
 267                                    qid);
 268                        break;
 269                }
 270
 271                queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
 272                if (unlikely(queue == NULL)) {
 273                        /*
 274                         * The queue is NULL, this shouldn't happen. Stop
 275                         * processing here and drop the tx status
 276                         */
 277                        rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
 278                                    qid);
 279                        break;
 280                }
 281
 282                if (unlikely(rt2x00queue_empty(queue))) {
 283                        /*
 284                         * The queue is empty. Stop processing here
 285                         * and drop the tx status.
 286                         */
 287                        rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
 288                                    qid);
 289                        break;
 290                }
 291
 292                /*
 293                 * Let's associate this tx status with the first
 294                 * matching frame.
 295                 */
 296                if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
 297                                                Q_INDEX, &status,
 298                                                rt2800mmio_txdone_find_entry)) {
 299                        /*
 300                         * We cannot match the tx status to any frame, so just
 301                         * use the first one.
 302                         */
 303                        if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
 304                                                        Q_INDEX, &status,
 305                                                        rt2800mmio_txdone_match_first)) {
 306                                rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
 307                                            qid);
 308                                break;
 309                        }
 310                }
 311
 312                /*
 313                 * Release all frames with a valid tx status.
 314                 */
 315                rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
 316                                           Q_INDEX, NULL,
 317                                           rt2800mmio_txdone_release_entries);
 318
 319                if (--max_tx_done == 0)
 320                        break;
 321        }
 322
 323        return !max_tx_done;
 324}
 325
 326static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
 327                                               struct rt2x00_field32 irq_field)
 328{
 329        u32 reg;
 330
 331        /*
 332         * Enable a single interrupt. The interrupt mask register
 333         * access needs locking.
 334         */
 335        spin_lock_irq(&rt2x00dev->irqmask_lock);
 336        rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
 337        rt2x00_set_field32(&reg, irq_field, 1);
 338        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 339        spin_unlock_irq(&rt2x00dev->irqmask_lock);
 340}
 341
 342void rt2800mmio_txstatus_tasklet(unsigned long data)
 343{
 344        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 345        if (rt2800mmio_txdone(rt2x00dev))
 346                tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 347
 348        /*
 349         * No need to enable the tx status interrupt here as we always
 350         * leave it enabled to minimize the possibility of a tx status
 351         * register overflow. See comment in interrupt handler.
 352         */
 353}
 354EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
 355
 356void rt2800mmio_pretbtt_tasklet(unsigned long data)
 357{
 358        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 359        rt2x00lib_pretbtt(rt2x00dev);
 360        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 361                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
 362}
 363EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
 364
 365void rt2800mmio_tbtt_tasklet(unsigned long data)
 366{
 367        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 368        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
 369        u32 reg;
 370
 371        rt2x00lib_beacondone(rt2x00dev);
 372
 373        if (rt2x00dev->intf_ap_count) {
 374                /*
 375                 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
 376                 * causing beacon skew and as a result causing problems with
 377                 * some powersaving clients over time. Shorten the beacon
 378                 * interval every 64 beacons by 64us to mitigate this effect.
 379                 */
 380                if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
 381                        rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 382                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 383                                           (rt2x00dev->beacon_int * 16) - 1);
 384                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 385                } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
 386                        rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 387                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 388                                           (rt2x00dev->beacon_int * 16));
 389                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 390                }
 391                drv_data->tbtt_tick++;
 392                drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
 393        }
 394
 395        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 396                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
 397}
 398EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
 399
 400void rt2800mmio_rxdone_tasklet(unsigned long data)
 401{
 402        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 403        if (rt2x00mmio_rxdone(rt2x00dev))
 404                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 405        else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 406                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
 407}
 408EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
 409
 410void rt2800mmio_autowake_tasklet(unsigned long data)
 411{
 412        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 413        rt2800mmio_wakeup(rt2x00dev);
 414        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 415                rt2800mmio_enable_interrupt(rt2x00dev,
 416                                            INT_MASK_CSR_AUTO_WAKEUP);
 417}
 418EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
 419
 420static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
 421{
 422        u32 status;
 423        int i;
 424
 425        /*
 426         * The TX_FIFO_STATUS interrupt needs special care. We should
 427         * read TX_STA_FIFO but we should do it immediately as otherwise
 428         * the register can overflow and we would lose status reports.
 429         *
 430         * Hence, read the TX_STA_FIFO register and copy all tx status
 431         * reports into a kernel FIFO which is handled in the txstatus
 432         * tasklet. We use a tasklet to process the tx status reports
 433         * because we can schedule the tasklet multiple times (when the
 434         * interrupt fires again during tx status processing).
 435         *
 436         * Furthermore we don't disable the TX_FIFO_STATUS
 437         * interrupt here but leave it enabled so that the TX_STA_FIFO
 438         * can also be read while the tx status tasklet gets executed.
 439         *
 440         * Since we have only one producer and one consumer we don't
 441         * need to lock the kfifo.
 442         */
 443        for (i = 0; i < rt2x00dev->tx->limit; i++) {
 444                rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
 445
 446                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
 447                        break;
 448
 449                if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
 450                        rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
 451                        break;
 452                }
 453        }
 454
 455        /* Schedule the tasklet for processing the tx status. */
 456        tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 457}
 458
 459irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
 460{
 461        struct rt2x00_dev *rt2x00dev = dev_instance;
 462        u32 reg, mask;
 463
 464        /* Read status and ACK all interrupts */
 465        rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
 466        rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 467
 468        if (!reg)
 469                return IRQ_NONE;
 470
 471        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 472                return IRQ_HANDLED;
 473
 474        /*
 475         * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
 476         * for interrupts and interrupt masks we can just use the value of
 477         * INT_SOURCE_CSR to create the interrupt mask.
 478         */
 479        mask = ~reg;
 480
 481        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
 482                rt2800mmio_txstatus_interrupt(rt2x00dev);
 483                /*
 484                 * Never disable the TX_FIFO_STATUS interrupt.
 485                 */
 486                rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 487        }
 488
 489        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
 490                tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
 491
 492        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
 493                tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 494
 495        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
 496                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 497
 498        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
 499                tasklet_schedule(&rt2x00dev->autowake_tasklet);
 500
 501        /*
 502         * Disable all interrupts for which a tasklet was scheduled right now,
 503         * the tasklet will reenable the appropriate interrupts.
 504         */
 505        spin_lock(&rt2x00dev->irqmask_lock);
 506        rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
 507        reg &= mask;
 508        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 509        spin_unlock(&rt2x00dev->irqmask_lock);
 510
 511        return IRQ_HANDLED;
 512}
 513EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
 514
 515void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
 516                           enum dev_state state)
 517{
 518        u32 reg;
 519        unsigned long flags;
 520
 521        /*
 522         * When interrupts are being enabled, the interrupt registers
 523         * should clear the register to assure a clean state.
 524         */
 525        if (state == STATE_RADIO_IRQ_ON) {
 526                rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
 527                rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 528        }
 529
 530        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 531        reg = 0;
 532        if (state == STATE_RADIO_IRQ_ON) {
 533                rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
 534                rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
 535                rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
 536                rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 537                rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
 538        }
 539        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 540        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 541
 542        if (state == STATE_RADIO_IRQ_OFF) {
 543                /*
 544                 * Wait for possibly running tasklets to finish.
 545                 */
 546                tasklet_kill(&rt2x00dev->txstatus_tasklet);
 547                tasklet_kill(&rt2x00dev->rxdone_tasklet);
 548                tasklet_kill(&rt2x00dev->autowake_tasklet);
 549                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 550                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 551        }
 552}
 553EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
 554
 555/*
 556 * Queue handlers.
 557 */
 558void rt2800mmio_start_queue(struct data_queue *queue)
 559{
 560        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 561        u32 reg;
 562
 563        switch (queue->qid) {
 564        case QID_RX:
 565                rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
 566                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
 567                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 568                break;
 569        case QID_BEACON:
 570                rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 571                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
 572                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
 573                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
 574                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 575
 576                rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
 577                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
 578                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 579                break;
 580        default:
 581                break;
 582        }
 583}
 584EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
 585
 586void rt2800mmio_kick_queue(struct data_queue *queue)
 587{
 588        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 589        struct queue_entry *entry;
 590
 591        switch (queue->qid) {
 592        case QID_AC_VO:
 593        case QID_AC_VI:
 594        case QID_AC_BE:
 595        case QID_AC_BK:
 596                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 597                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
 598                                          entry->entry_idx);
 599                break;
 600        case QID_MGMT:
 601                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 602                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
 603                                          entry->entry_idx);
 604                break;
 605        default:
 606                break;
 607        }
 608}
 609EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
 610
 611void rt2800mmio_stop_queue(struct data_queue *queue)
 612{
 613        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 614        u32 reg;
 615
 616        switch (queue->qid) {
 617        case QID_RX:
 618                rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
 619                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
 620                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 621                break;
 622        case QID_BEACON:
 623                rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
 624                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
 625                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
 626                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
 627                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 628
 629                rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
 630                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
 631                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 632
 633                /*
 634                 * Wait for current invocation to finish. The tasklet
 635                 * won't be scheduled anymore afterwards since we disabled
 636                 * the TBTT and PRE TBTT timer.
 637                 */
 638                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 639                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 640
 641                break;
 642        default:
 643                break;
 644        }
 645}
 646EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
 647
 648void rt2800mmio_queue_init(struct data_queue *queue)
 649{
 650        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 651        unsigned short txwi_size, rxwi_size;
 652
 653        rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
 654
 655        switch (queue->qid) {
 656        case QID_RX:
 657                queue->limit = 128;
 658                queue->data_size = AGGREGATION_SIZE;
 659                queue->desc_size = RXD_DESC_SIZE;
 660                queue->winfo_size = rxwi_size;
 661                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 662                break;
 663
 664        case QID_AC_VO:
 665        case QID_AC_VI:
 666        case QID_AC_BE:
 667        case QID_AC_BK:
 668                queue->limit = 64;
 669                queue->data_size = AGGREGATION_SIZE;
 670                queue->desc_size = TXD_DESC_SIZE;
 671                queue->winfo_size = txwi_size;
 672                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 673                break;
 674
 675        case QID_BEACON:
 676                queue->limit = 8;
 677                queue->data_size = 0; /* No DMA required for beacons */
 678                queue->desc_size = TXD_DESC_SIZE;
 679                queue->winfo_size = txwi_size;
 680                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 681                break;
 682
 683        case QID_ATIM:
 684                /* fallthrough */
 685        default:
 686                BUG();
 687                break;
 688        }
 689}
 690EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
 691
 692/*
 693 * Initialization functions.
 694 */
 695bool rt2800mmio_get_entry_state(struct queue_entry *entry)
 696{
 697        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 698        u32 word;
 699
 700        if (entry->queue->qid == QID_RX) {
 701                rt2x00_desc_read(entry_priv->desc, 1, &word);
 702
 703                return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
 704        } else {
 705                rt2x00_desc_read(entry_priv->desc, 1, &word);
 706
 707                return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
 708        }
 709}
 710EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
 711
 712void rt2800mmio_clear_entry(struct queue_entry *entry)
 713{
 714        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 715        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 716        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 717        u32 word;
 718
 719        if (entry->queue->qid == QID_RX) {
 720                rt2x00_desc_read(entry_priv->desc, 0, &word);
 721                rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
 722                rt2x00_desc_write(entry_priv->desc, 0, word);
 723
 724                rt2x00_desc_read(entry_priv->desc, 1, &word);
 725                rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
 726                rt2x00_desc_write(entry_priv->desc, 1, word);
 727
 728                /*
 729                 * Set RX IDX in register to inform hardware that we have
 730                 * handled this entry and it is available for reuse again.
 731                 */
 732                rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 733                                          entry->entry_idx);
 734        } else {
 735                rt2x00_desc_read(entry_priv->desc, 1, &word);
 736                rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
 737                rt2x00_desc_write(entry_priv->desc, 1, word);
 738        }
 739}
 740EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
 741
 742int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
 743{
 744        struct queue_entry_priv_mmio *entry_priv;
 745
 746        /*
 747         * Initialize registers.
 748         */
 749        entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
 750        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
 751                                  entry_priv->desc_dma);
 752        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
 753                                  rt2x00dev->tx[0].limit);
 754        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
 755        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
 756
 757        entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
 758        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
 759                                  entry_priv->desc_dma);
 760        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
 761                                  rt2x00dev->tx[1].limit);
 762        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
 763        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
 764
 765        entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
 766        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
 767                                  entry_priv->desc_dma);
 768        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
 769                                  rt2x00dev->tx[2].limit);
 770        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
 771        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
 772
 773        entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
 774        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
 775                                  entry_priv->desc_dma);
 776        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
 777                                  rt2x00dev->tx[3].limit);
 778        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
 779        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
 780
 781        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
 782        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
 783        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
 784        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
 785
 786        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
 787        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
 788        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
 789        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
 790
 791        entry_priv = rt2x00dev->rx->entries[0].priv_data;
 792        rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
 793                                  entry_priv->desc_dma);
 794        rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
 795                                  rt2x00dev->rx[0].limit);
 796        rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 797                                  rt2x00dev->rx[0].limit - 1);
 798        rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
 799
 800        rt2800_disable_wpdma(rt2x00dev);
 801
 802        rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
 803
 804        return 0;
 805}
 806EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
 807
 808int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
 809{
 810        u32 reg;
 811
 812        /*
 813         * Reset DMA indexes
 814         */
 815        rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
 816        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
 817        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
 818        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
 819        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
 820        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
 821        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
 822        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
 823        rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
 824
 825        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
 826        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 827
 828        if (rt2x00_is_pcie(rt2x00dev) &&
 829            (rt2x00_rt(rt2x00dev, RT3090) ||
 830             rt2x00_rt(rt2x00dev, RT3390) ||
 831             rt2x00_rt(rt2x00dev, RT3572) ||
 832             rt2x00_rt(rt2x00dev, RT3593) ||
 833             rt2x00_rt(rt2x00dev, RT5390) ||
 834             rt2x00_rt(rt2x00dev, RT5392) ||
 835             rt2x00_rt(rt2x00dev, RT5592))) {
 836                rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
 837                rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
 838                rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
 839                rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
 840        }
 841
 842        rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
 843
 844        reg = 0;
 845        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
 846        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
 847        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 848
 849        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
 850
 851        return 0;
 852}
 853EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
 854
 855/*
 856 * Device state switch handlers.
 857 */
 858int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
 859{
 860        /* Wait for DMA, ignore error until we initialize queues. */
 861        rt2800_wait_wpdma_ready(rt2x00dev);
 862
 863        if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
 864                return -EIO;
 865
 866        return rt2800_enable_radio(rt2x00dev);
 867}
 868EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
 869
 870MODULE_AUTHOR(DRV_PROJECT);
 871MODULE_VERSION(DRV_VERSION);
 872MODULE_DESCRIPTION("rt2800 MMIO library");
 873MODULE_LICENSE("GPL");
 874