linux/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*      Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   3 *      Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
   4 *      Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
   5 *      Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
   6 *      Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
   7 *      Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
   8 *      Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
   9 *      Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
  10 *      <http://rt2x00.serialmonkey.com>
  11 */
  12
  13/*      Module: rt2800mmio
  14 *      Abstract: rt2800 MMIO device routines.
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/export.h>
  20
  21#include "rt2x00.h"
  22#include "rt2x00mmio.h"
  23#include "rt2800.h"
  24#include "rt2800lib.h"
  25#include "rt2800mmio.h"
  26
  27unsigned int rt2800mmio_get_dma_done(struct data_queue *queue)
  28{
  29        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  30        struct queue_entry *entry;
  31        int idx, qid;
  32
  33        switch (queue->qid) {
  34        case QID_AC_VO:
  35        case QID_AC_VI:
  36        case QID_AC_BE:
  37        case QID_AC_BK:
  38                qid = queue->qid;
  39                idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(qid));
  40                break;
  41        case QID_MGMT:
  42                idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(5));
  43                break;
  44        case QID_RX:
  45                entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
  46                idx = entry->entry_idx;
  47                break;
  48        default:
  49                WARN_ON_ONCE(1);
  50                idx = 0;
  51                break;
  52        }
  53
  54        return idx;
  55}
  56EXPORT_SYMBOL_GPL(rt2800mmio_get_dma_done);
  57
  58/*
  59 * TX descriptor initialization
  60 */
  61__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
  62{
  63        return (__le32 *) entry->skb->data;
  64}
  65EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
  66
  67void rt2800mmio_write_tx_desc(struct queue_entry *entry,
  68                              struct txentry_desc *txdesc)
  69{
  70        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  71        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  72        __le32 *txd = entry_priv->desc;
  73        u32 word;
  74        const unsigned int txwi_size = entry->queue->winfo_size;
  75
  76        /*
  77         * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
  78         * must contains a TXWI structure + 802.11 header + padding + 802.11
  79         * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
  80         * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
  81         * data. It means that LAST_SEC0 is always 0.
  82         */
  83
  84        /*
  85         * Initialize TX descriptor
  86         */
  87        word = 0;
  88        rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
  89        rt2x00_desc_write(txd, 0, word);
  90
  91        word = 0;
  92        rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
  93        rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
  94                           !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
  95        rt2x00_set_field32(&word, TXD_W1_BURST,
  96                           test_bit(ENTRY_TXD_BURST, &txdesc->flags));
  97        rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
  98        rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
  99        rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
 100        rt2x00_desc_write(txd, 1, word);
 101
 102        word = 0;
 103        rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
 104                           skbdesc->skb_dma + txwi_size);
 105        rt2x00_desc_write(txd, 2, word);
 106
 107        word = 0;
 108        rt2x00_set_field32(&word, TXD_W3_WIV,
 109                           !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
 110        rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
 111        rt2x00_desc_write(txd, 3, word);
 112
 113        /*
 114         * Register descriptor details in skb frame descriptor.
 115         */
 116        skbdesc->desc = txd;
 117        skbdesc->desc_len = TXD_DESC_SIZE;
 118}
 119EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
 120
 121/*
 122 * RX control handlers
 123 */
 124void rt2800mmio_fill_rxdone(struct queue_entry *entry,
 125                            struct rxdone_entry_desc *rxdesc)
 126{
 127        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 128        __le32 *rxd = entry_priv->desc;
 129        u32 word;
 130
 131        word = rt2x00_desc_read(rxd, 3);
 132
 133        if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
 134                rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 135
 136        /*
 137         * Unfortunately we don't know the cipher type used during
 138         * decryption. This prevents us from correct providing
 139         * correct statistics through debugfs.
 140         */
 141        rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
 142
 143        if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
 144                /*
 145                 * Hardware has stripped IV/EIV data from 802.11 frame during
 146                 * decryption. Unfortunately the descriptor doesn't contain
 147                 * any fields with the EIV/IV data either, so they can't
 148                 * be restored by rt2x00lib.
 149                 */
 150                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 151
 152                /*
 153                 * The hardware has already checked the Michael Mic and has
 154                 * stripped it from the frame. Signal this to mac80211.
 155                 */
 156                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 157
 158                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) {
 159                        rxdesc->flags |= RX_FLAG_DECRYPTED;
 160                } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) {
 161                        /*
 162                         * In order to check the Michael Mic, the packet must have
 163                         * been decrypted.  Mac80211 doesnt check the MMIC failure 
 164                         * flag to initiate MMIC countermeasures if the decoded flag
 165                         * has not been set.
 166                         */
 167                        rxdesc->flags |= RX_FLAG_DECRYPTED;
 168
 169                        rxdesc->flags |= RX_FLAG_MMIC_ERROR;
 170                }
 171        }
 172
 173        if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
 174                rxdesc->dev_flags |= RXDONE_MY_BSS;
 175
 176        if (rt2x00_get_field32(word, RXD_W3_L2PAD))
 177                rxdesc->dev_flags |= RXDONE_L2PAD;
 178
 179        /*
 180         * Process the RXWI structure that is at the start of the buffer.
 181         */
 182        rt2800_process_rxwi(entry, rxdesc);
 183}
 184EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
 185
 186/*
 187 * Interrupt functions.
 188 */
 189static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
 190{
 191        struct ieee80211_conf conf = { .flags = 0 };
 192        struct rt2x00lib_conf libconf = { .conf = &conf };
 193
 194        rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 195}
 196
 197static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
 198                                               struct rt2x00_field32 irq_field)
 199{
 200        u32 reg;
 201
 202        /*
 203         * Enable a single interrupt. The interrupt mask register
 204         * access needs locking.
 205         */
 206        spin_lock_irq(&rt2x00dev->irqmask_lock);
 207        reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
 208        rt2x00_set_field32(&reg, irq_field, 1);
 209        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 210        spin_unlock_irq(&rt2x00dev->irqmask_lock);
 211}
 212
 213void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t)
 214{
 215        struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
 216                                                    pretbtt_tasklet);
 217        rt2x00lib_pretbtt(rt2x00dev);
 218        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 219                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
 220}
 221EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
 222
 223void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t)
 224{
 225        struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
 226        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
 227        u32 reg;
 228
 229        rt2x00lib_beacondone(rt2x00dev);
 230
 231        if (rt2x00dev->intf_ap_count) {
 232                /*
 233                 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
 234                 * causing beacon skew and as a result causing problems with
 235                 * some powersaving clients over time. Shorten the beacon
 236                 * interval every 64 beacons by 64us to mitigate this effect.
 237                 */
 238                if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
 239                        reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 240                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 241                                           (rt2x00dev->beacon_int * 16) - 1);
 242                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 243                } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
 244                        reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 245                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 246                                           (rt2x00dev->beacon_int * 16));
 247                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 248                }
 249                drv_data->tbtt_tick++;
 250                drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
 251        }
 252
 253        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 254                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
 255}
 256EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
 257
 258void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t)
 259{
 260        struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
 261                                                    rxdone_tasklet);
 262        if (rt2x00mmio_rxdone(rt2x00dev))
 263                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 264        else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 265                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
 266}
 267EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
 268
 269void rt2800mmio_autowake_tasklet(struct tasklet_struct *t)
 270{
 271        struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
 272                                                    autowake_tasklet);
 273        rt2800mmio_wakeup(rt2x00dev);
 274        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 275                rt2800mmio_enable_interrupt(rt2x00dev,
 276                                            INT_MASK_CSR_AUTO_WAKEUP);
 277}
 278EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
 279
 280static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
 281{
 282        u32 status;
 283        unsigned long flags;
 284
 285        /*
 286         * The TX_FIFO_STATUS interrupt needs special care. We should
 287         * read TX_STA_FIFO but we should do it immediately as otherwise
 288         * the register can overflow and we would lose status reports.
 289         *
 290         * Hence, read the TX_STA_FIFO register and copy all tx status
 291         * reports into a kernel FIFO which is handled in the txstatus
 292         * tasklet. We use a tasklet to process the tx status reports
 293         * because we can schedule the tasklet multiple times (when the
 294         * interrupt fires again during tx status processing).
 295         *
 296         * We also read statuses from tx status timeout timer, use
 297         * lock to prevent concurent writes to fifo.
 298         */
 299
 300        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 301
 302        while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
 303                status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
 304                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
 305                        break;
 306
 307                kfifo_put(&rt2x00dev->txstatus_fifo, status);
 308        }
 309
 310        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 311}
 312
 313void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t)
 314{
 315        struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
 316                                                    txstatus_tasklet);
 317
 318        rt2800_txdone(rt2x00dev, 16);
 319
 320        if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 321                tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 322
 323}
 324EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
 325
 326irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
 327{
 328        struct rt2x00_dev *rt2x00dev = dev_instance;
 329        u32 reg, mask;
 330
 331        /* Read status and ACK all interrupts */
 332        reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
 333        rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 334
 335        if (!reg)
 336                return IRQ_NONE;
 337
 338        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 339                return IRQ_HANDLED;
 340
 341        /*
 342         * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
 343         * for interrupts and interrupt masks we can just use the value of
 344         * INT_SOURCE_CSR to create the interrupt mask.
 345         */
 346        mask = ~reg;
 347
 348        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
 349                rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 350                rt2800mmio_fetch_txstatus(rt2x00dev);
 351                if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 352                        tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 353        }
 354
 355        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
 356                tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
 357
 358        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
 359                tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 360
 361        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
 362                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 363
 364        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
 365                tasklet_schedule(&rt2x00dev->autowake_tasklet);
 366
 367        /*
 368         * Disable all interrupts for which a tasklet was scheduled right now,
 369         * the tasklet will reenable the appropriate interrupts.
 370         */
 371        spin_lock(&rt2x00dev->irqmask_lock);
 372        reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
 373        reg &= mask;
 374        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 375        spin_unlock(&rt2x00dev->irqmask_lock);
 376
 377        return IRQ_HANDLED;
 378}
 379EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
 380
 381void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
 382                           enum dev_state state)
 383{
 384        u32 reg;
 385        unsigned long flags;
 386
 387        /*
 388         * When interrupts are being enabled, the interrupt registers
 389         * should clear the register to assure a clean state.
 390         */
 391        if (state == STATE_RADIO_IRQ_ON) {
 392                reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
 393                rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 394        }
 395
 396        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 397        reg = 0;
 398        if (state == STATE_RADIO_IRQ_ON) {
 399                rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
 400                rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
 401                rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
 402                rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 403                rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
 404        }
 405        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 406        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 407
 408        if (state == STATE_RADIO_IRQ_OFF) {
 409                /*
 410                 * Wait for possibly running tasklets to finish.
 411                 */
 412                tasklet_kill(&rt2x00dev->txstatus_tasklet);
 413                tasklet_kill(&rt2x00dev->rxdone_tasklet);
 414                tasklet_kill(&rt2x00dev->autowake_tasklet);
 415                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 416                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 417        }
 418}
 419EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
 420
 421/*
 422 * Queue handlers.
 423 */
 424void rt2800mmio_start_queue(struct data_queue *queue)
 425{
 426        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 427        u32 reg;
 428
 429        switch (queue->qid) {
 430        case QID_RX:
 431                reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
 432                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
 433                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 434                break;
 435        case QID_BEACON:
 436                reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 437                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
 438                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
 439                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
 440                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 441
 442                reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
 443                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
 444                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 445                break;
 446        default:
 447                break;
 448        }
 449}
 450EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
 451
 452/* 200 ms */
 453#define TXSTATUS_TIMEOUT 200000000
 454
 455void rt2800mmio_kick_queue(struct data_queue *queue)
 456{
 457        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 458        struct queue_entry *entry;
 459
 460        switch (queue->qid) {
 461        case QID_AC_VO:
 462        case QID_AC_VI:
 463        case QID_AC_BE:
 464        case QID_AC_BK:
 465                WARN_ON_ONCE(rt2x00queue_empty(queue));
 466                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 467                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
 468                                          entry->entry_idx);
 469                hrtimer_start(&rt2x00dev->txstatus_timer,
 470                              TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
 471                break;
 472        case QID_MGMT:
 473                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 474                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
 475                                          entry->entry_idx);
 476                break;
 477        default:
 478                break;
 479        }
 480}
 481EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
 482
 483void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
 484{
 485        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 486        bool tx_queue = false;
 487        unsigned int i;
 488
 489        switch (queue->qid) {
 490        case QID_AC_VO:
 491        case QID_AC_VI:
 492        case QID_AC_BE:
 493        case QID_AC_BK:
 494                tx_queue = true;
 495                break;
 496        case QID_RX:
 497                break;
 498        default:
 499                return;
 500        }
 501
 502        for (i = 0; i < 5; i++) {
 503                /*
 504                 * Check if the driver is already done, otherwise we
 505                 * have to sleep a little while to give the driver/hw
 506                 * the oppurtunity to complete interrupt process itself.
 507                 */
 508                if (rt2x00queue_empty(queue))
 509                        break;
 510
 511                /*
 512                 * For TX queues schedule completion tasklet to catch
 513                 * tx status timeouts, othewise just wait.
 514                 */
 515                if (tx_queue)
 516                        queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 517
 518                /*
 519                 * Wait for a little while to give the driver
 520                 * the oppurtunity to recover itself.
 521                 */
 522                msleep(50);
 523        }
 524}
 525EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue);
 526
 527void rt2800mmio_stop_queue(struct data_queue *queue)
 528{
 529        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 530        u32 reg;
 531
 532        switch (queue->qid) {
 533        case QID_RX:
 534                reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
 535                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
 536                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 537                break;
 538        case QID_BEACON:
 539                reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 540                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
 541                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
 542                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
 543                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 544
 545                reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
 546                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
 547                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 548
 549                /*
 550                 * Wait for current invocation to finish. The tasklet
 551                 * won't be scheduled anymore afterwards since we disabled
 552                 * the TBTT and PRE TBTT timer.
 553                 */
 554                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 555                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 556
 557                break;
 558        default:
 559                break;
 560        }
 561}
 562EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
 563
 564void rt2800mmio_queue_init(struct data_queue *queue)
 565{
 566        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 567        unsigned short txwi_size, rxwi_size;
 568
 569        rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
 570
 571        switch (queue->qid) {
 572        case QID_RX:
 573                queue->limit = 128;
 574                queue->data_size = AGGREGATION_SIZE;
 575                queue->desc_size = RXD_DESC_SIZE;
 576                queue->winfo_size = rxwi_size;
 577                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 578                break;
 579
 580        case QID_AC_VO:
 581        case QID_AC_VI:
 582        case QID_AC_BE:
 583        case QID_AC_BK:
 584                queue->limit = 64;
 585                queue->data_size = AGGREGATION_SIZE;
 586                queue->desc_size = TXD_DESC_SIZE;
 587                queue->winfo_size = txwi_size;
 588                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 589                break;
 590
 591        case QID_BEACON:
 592                queue->limit = 8;
 593                queue->data_size = 0; /* No DMA required for beacons */
 594                queue->desc_size = TXD_DESC_SIZE;
 595                queue->winfo_size = txwi_size;
 596                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 597                break;
 598
 599        case QID_ATIM:
 600        default:
 601                BUG();
 602                break;
 603        }
 604}
 605EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
 606
 607/*
 608 * Initialization functions.
 609 */
 610bool rt2800mmio_get_entry_state(struct queue_entry *entry)
 611{
 612        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 613        u32 word;
 614
 615        if (entry->queue->qid == QID_RX) {
 616                word = rt2x00_desc_read(entry_priv->desc, 1);
 617
 618                return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
 619        } else {
 620                word = rt2x00_desc_read(entry_priv->desc, 1);
 621
 622                return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
 623        }
 624}
 625EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
 626
 627void rt2800mmio_clear_entry(struct queue_entry *entry)
 628{
 629        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 630        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 631        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 632        u32 word;
 633
 634        if (entry->queue->qid == QID_RX) {
 635                word = rt2x00_desc_read(entry_priv->desc, 0);
 636                rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
 637                rt2x00_desc_write(entry_priv->desc, 0, word);
 638
 639                word = rt2x00_desc_read(entry_priv->desc, 1);
 640                rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
 641                rt2x00_desc_write(entry_priv->desc, 1, word);
 642
 643                /*
 644                 * Set RX IDX in register to inform hardware that we have
 645                 * handled this entry and it is available for reuse again.
 646                 */
 647                rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 648                                          entry->entry_idx);
 649        } else {
 650                word = rt2x00_desc_read(entry_priv->desc, 1);
 651                rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
 652                rt2x00_desc_write(entry_priv->desc, 1, word);
 653
 654                /* If last entry stop txstatus timer */
 655                if (entry->queue->length == 1)
 656                        hrtimer_cancel(&rt2x00dev->txstatus_timer);
 657        }
 658}
 659EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
 660
 661int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
 662{
 663        struct queue_entry_priv_mmio *entry_priv;
 664
 665        /*
 666         * Initialize registers.
 667         */
 668        entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
 669        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
 670                                  entry_priv->desc_dma);
 671        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
 672                                  rt2x00dev->tx[0].limit);
 673        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
 674        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
 675
 676        entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
 677        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
 678                                  entry_priv->desc_dma);
 679        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
 680                                  rt2x00dev->tx[1].limit);
 681        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
 682        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
 683
 684        entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
 685        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
 686                                  entry_priv->desc_dma);
 687        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
 688                                  rt2x00dev->tx[2].limit);
 689        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
 690        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
 691
 692        entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
 693        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
 694                                  entry_priv->desc_dma);
 695        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
 696                                  rt2x00dev->tx[3].limit);
 697        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
 698        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
 699
 700        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
 701        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
 702        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
 703        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
 704
 705        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
 706        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
 707        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
 708        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
 709
 710        entry_priv = rt2x00dev->rx->entries[0].priv_data;
 711        rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
 712                                  entry_priv->desc_dma);
 713        rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
 714                                  rt2x00dev->rx[0].limit);
 715        rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 716                                  rt2x00dev->rx[0].limit - 1);
 717        rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
 718
 719        rt2800_disable_wpdma(rt2x00dev);
 720
 721        rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
 722
 723        return 0;
 724}
 725EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
 726
 727int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
 728{
 729        u32 reg;
 730
 731        /*
 732         * Reset DMA indexes
 733         */
 734        reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX);
 735        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
 736        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
 737        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
 738        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
 739        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
 740        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
 741        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
 742        rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
 743
 744        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
 745        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 746
 747        if (rt2x00_is_pcie(rt2x00dev) &&
 748            (rt2x00_rt(rt2x00dev, RT3090) ||
 749             rt2x00_rt(rt2x00dev, RT3390) ||
 750             rt2x00_rt(rt2x00dev, RT3572) ||
 751             rt2x00_rt(rt2x00dev, RT3593) ||
 752             rt2x00_rt(rt2x00dev, RT5390) ||
 753             rt2x00_rt(rt2x00dev, RT5392) ||
 754             rt2x00_rt(rt2x00dev, RT5592))) {
 755                reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL);
 756                rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
 757                rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
 758                rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
 759        }
 760
 761        rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
 762
 763        reg = 0;
 764        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
 765        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
 766        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 767
 768        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
 769
 770        return 0;
 771}
 772EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
 773
 774/*
 775 * Device state switch handlers.
 776 */
 777int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
 778{
 779        /* Wait for DMA, ignore error until we initialize queues. */
 780        rt2800_wait_wpdma_ready(rt2x00dev);
 781
 782        if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
 783                return -EIO;
 784
 785        return rt2800_enable_radio(rt2x00dev);
 786}
 787EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
 788
 789static void rt2800mmio_work_txdone(struct work_struct *work)
 790{
 791        struct rt2x00_dev *rt2x00dev =
 792            container_of(work, struct rt2x00_dev, txdone_work);
 793
 794        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 795                return;
 796
 797        while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
 798               rt2800_txstatus_timeout(rt2x00dev)) {
 799
 800                tasklet_disable(&rt2x00dev->txstatus_tasklet);
 801                rt2800_txdone(rt2x00dev, UINT_MAX);
 802                rt2800_txdone_nostatus(rt2x00dev);
 803                tasklet_enable(&rt2x00dev->txstatus_tasklet);
 804        }
 805
 806        if (rt2800_txstatus_pending(rt2x00dev))
 807                hrtimer_start(&rt2x00dev->txstatus_timer,
 808                              TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
 809}
 810
 811static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer)
 812{
 813        struct rt2x00_dev *rt2x00dev =
 814            container_of(timer, struct rt2x00_dev, txstatus_timer);
 815
 816        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 817                goto out;
 818
 819        if (!rt2800_txstatus_pending(rt2x00dev))
 820                goto out;
 821
 822        rt2800mmio_fetch_txstatus(rt2x00dev);
 823        if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 824                tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 825        else
 826                queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 827out:
 828        return HRTIMER_NORESTART;
 829}
 830
 831int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
 832{
 833        int retval;
 834
 835        retval = rt2800_probe_hw(rt2x00dev);
 836        if (retval)
 837                return retval;
 838
 839        /*
 840         * Set txstatus timer function.
 841         */
 842        rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
 843
 844        /*
 845         * Overwrite TX done handler
 846         */
 847        INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone);
 848
 849        return 0;
 850}
 851EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw);
 852
 853MODULE_AUTHOR(DRV_PROJECT);
 854MODULE_VERSION(DRV_VERSION);
 855MODULE_DESCRIPTION("rt2800 MMIO library");
 856MODULE_LICENSE("GPL");
 857