linux/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*      Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   3 *      Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
   4 *      Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
   5 *      Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
   6 *      Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
   7 *      Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
   8 *      Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
   9 *      Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
  10 *      <http://rt2x00.serialmonkey.com>
  11 */
  12
  13/*      Module: rt2800mmio
  14 *      Abstract: rt2800 MMIO device routines.
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/export.h>
  20
  21#include "rt2x00.h"
  22#include "rt2x00mmio.h"
  23#include "rt2800.h"
  24#include "rt2800lib.h"
  25#include "rt2800mmio.h"
  26
  27unsigned int rt2800mmio_get_dma_done(struct data_queue *queue)
  28{
  29        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  30        struct queue_entry *entry;
  31        int idx, qid;
  32
  33        switch (queue->qid) {
  34        case QID_AC_VO:
  35        case QID_AC_VI:
  36        case QID_AC_BE:
  37        case QID_AC_BK:
  38                qid = queue->qid;
  39                idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(qid));
  40                break;
  41        case QID_MGMT:
  42                idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(5));
  43                break;
  44        case QID_RX:
  45                entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
  46                idx = entry->entry_idx;
  47                break;
  48        default:
  49                WARN_ON_ONCE(1);
  50                idx = 0;
  51                break;
  52        }
  53
  54        return idx;
  55}
  56EXPORT_SYMBOL_GPL(rt2800mmio_get_dma_done);
  57
  58/*
  59 * TX descriptor initialization
  60 */
  61__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
  62{
  63        return (__le32 *) entry->skb->data;
  64}
  65EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
  66
  67void rt2800mmio_write_tx_desc(struct queue_entry *entry,
  68                              struct txentry_desc *txdesc)
  69{
  70        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  71        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  72        __le32 *txd = entry_priv->desc;
  73        u32 word;
  74        const unsigned int txwi_size = entry->queue->winfo_size;
  75
  76        /*
  77         * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
  78         * must contains a TXWI structure + 802.11 header + padding + 802.11
  79         * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
  80         * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
  81         * data. It means that LAST_SEC0 is always 0.
  82         */
  83
  84        /*
  85         * Initialize TX descriptor
  86         */
  87        word = 0;
  88        rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
  89        rt2x00_desc_write(txd, 0, word);
  90
  91        word = 0;
  92        rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
  93        rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
  94                           !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
  95        rt2x00_set_field32(&word, TXD_W1_BURST,
  96                           test_bit(ENTRY_TXD_BURST, &txdesc->flags));
  97        rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
  98        rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
  99        rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
 100        rt2x00_desc_write(txd, 1, word);
 101
 102        word = 0;
 103        rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
 104                           skbdesc->skb_dma + txwi_size);
 105        rt2x00_desc_write(txd, 2, word);
 106
 107        word = 0;
 108        rt2x00_set_field32(&word, TXD_W3_WIV,
 109                           !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
 110        rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
 111        rt2x00_desc_write(txd, 3, word);
 112
 113        /*
 114         * Register descriptor details in skb frame descriptor.
 115         */
 116        skbdesc->desc = txd;
 117        skbdesc->desc_len = TXD_DESC_SIZE;
 118}
 119EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
 120
 121/*
 122 * RX control handlers
 123 */
 124void rt2800mmio_fill_rxdone(struct queue_entry *entry,
 125                            struct rxdone_entry_desc *rxdesc)
 126{
 127        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 128        __le32 *rxd = entry_priv->desc;
 129        u32 word;
 130
 131        word = rt2x00_desc_read(rxd, 3);
 132
 133        if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
 134                rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
 135
 136        /*
 137         * Unfortunately we don't know the cipher type used during
 138         * decryption. This prevents us from correct providing
 139         * correct statistics through debugfs.
 140         */
 141        rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
 142
 143        if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
 144                /*
 145                 * Hardware has stripped IV/EIV data from 802.11 frame during
 146                 * decryption. Unfortunately the descriptor doesn't contain
 147                 * any fields with the EIV/IV data either, so they can't
 148                 * be restored by rt2x00lib.
 149                 */
 150                rxdesc->flags |= RX_FLAG_IV_STRIPPED;
 151
 152                /*
 153                 * The hardware has already checked the Michael Mic and has
 154                 * stripped it from the frame. Signal this to mac80211.
 155                 */
 156                rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 157
 158                if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) {
 159                        rxdesc->flags |= RX_FLAG_DECRYPTED;
 160                } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) {
 161                        /*
 162                         * In order to check the Michael Mic, the packet must have
 163                         * been decrypted.  Mac80211 doesnt check the MMIC failure 
 164                         * flag to initiate MMIC countermeasures if the decoded flag
 165                         * has not been set.
 166                         */
 167                        rxdesc->flags |= RX_FLAG_DECRYPTED;
 168
 169                        rxdesc->flags |= RX_FLAG_MMIC_ERROR;
 170                }
 171        }
 172
 173        if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
 174                rxdesc->dev_flags |= RXDONE_MY_BSS;
 175
 176        if (rt2x00_get_field32(word, RXD_W3_L2PAD))
 177                rxdesc->dev_flags |= RXDONE_L2PAD;
 178
 179        /*
 180         * Process the RXWI structure that is at the start of the buffer.
 181         */
 182        rt2800_process_rxwi(entry, rxdesc);
 183}
 184EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
 185
 186/*
 187 * Interrupt functions.
 188 */
 189static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
 190{
 191        struct ieee80211_conf conf = { .flags = 0 };
 192        struct rt2x00lib_conf libconf = { .conf = &conf };
 193
 194        rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
 195}
 196
 197static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
 198                                               struct rt2x00_field32 irq_field)
 199{
 200        u32 reg;
 201
 202        /*
 203         * Enable a single interrupt. The interrupt mask register
 204         * access needs locking.
 205         */
 206        spin_lock_irq(&rt2x00dev->irqmask_lock);
 207        reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
 208        rt2x00_set_field32(&reg, irq_field, 1);
 209        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 210        spin_unlock_irq(&rt2x00dev->irqmask_lock);
 211}
 212
 213void rt2800mmio_pretbtt_tasklet(unsigned long data)
 214{
 215        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 216        rt2x00lib_pretbtt(rt2x00dev);
 217        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 218                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
 219}
 220EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
 221
 222void rt2800mmio_tbtt_tasklet(unsigned long data)
 223{
 224        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 225        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
 226        u32 reg;
 227
 228        rt2x00lib_beacondone(rt2x00dev);
 229
 230        if (rt2x00dev->intf_ap_count) {
 231                /*
 232                 * The rt2800pci hardware tbtt timer is off by 1us per tbtt
 233                 * causing beacon skew and as a result causing problems with
 234                 * some powersaving clients over time. Shorten the beacon
 235                 * interval every 64 beacons by 64us to mitigate this effect.
 236                 */
 237                if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
 238                        reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 239                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 240                                           (rt2x00dev->beacon_int * 16) - 1);
 241                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 242                } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
 243                        reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 244                        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
 245                                           (rt2x00dev->beacon_int * 16));
 246                        rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 247                }
 248                drv_data->tbtt_tick++;
 249                drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
 250        }
 251
 252        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 253                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
 254}
 255EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
 256
 257void rt2800mmio_rxdone_tasklet(unsigned long data)
 258{
 259        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 260        if (rt2x00mmio_rxdone(rt2x00dev))
 261                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 262        else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 263                rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
 264}
 265EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
 266
 267void rt2800mmio_autowake_tasklet(unsigned long data)
 268{
 269        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 270        rt2800mmio_wakeup(rt2x00dev);
 271        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 272                rt2800mmio_enable_interrupt(rt2x00dev,
 273                                            INT_MASK_CSR_AUTO_WAKEUP);
 274}
 275EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
 276
 277static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
 278{
 279        u32 status;
 280        unsigned long flags;
 281
 282        /*
 283         * The TX_FIFO_STATUS interrupt needs special care. We should
 284         * read TX_STA_FIFO but we should do it immediately as otherwise
 285         * the register can overflow and we would lose status reports.
 286         *
 287         * Hence, read the TX_STA_FIFO register and copy all tx status
 288         * reports into a kernel FIFO which is handled in the txstatus
 289         * tasklet. We use a tasklet to process the tx status reports
 290         * because we can schedule the tasklet multiple times (when the
 291         * interrupt fires again during tx status processing).
 292         *
 293         * We also read statuses from tx status timeout timer, use
 294         * lock to prevent concurent writes to fifo.
 295         */
 296
 297        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 298
 299        while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
 300                status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
 301                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
 302                        break;
 303
 304                kfifo_put(&rt2x00dev->txstatus_fifo, status);
 305        }
 306
 307        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 308}
 309
 310void rt2800mmio_txstatus_tasklet(unsigned long data)
 311{
 312        struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
 313
 314        rt2800_txdone(rt2x00dev, 16);
 315
 316        if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 317                tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 318
 319}
 320EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
 321
 322irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
 323{
 324        struct rt2x00_dev *rt2x00dev = dev_instance;
 325        u32 reg, mask;
 326
 327        /* Read status and ACK all interrupts */
 328        reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
 329        rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 330
 331        if (!reg)
 332                return IRQ_NONE;
 333
 334        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 335                return IRQ_HANDLED;
 336
 337        /*
 338         * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
 339         * for interrupts and interrupt masks we can just use the value of
 340         * INT_SOURCE_CSR to create the interrupt mask.
 341         */
 342        mask = ~reg;
 343
 344        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
 345                rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 346                rt2800mmio_fetch_txstatus(rt2x00dev);
 347                if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 348                        tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 349        }
 350
 351        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
 352                tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
 353
 354        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
 355                tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
 356
 357        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
 358                tasklet_schedule(&rt2x00dev->rxdone_tasklet);
 359
 360        if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
 361                tasklet_schedule(&rt2x00dev->autowake_tasklet);
 362
 363        /*
 364         * Disable all interrupts for which a tasklet was scheduled right now,
 365         * the tasklet will reenable the appropriate interrupts.
 366         */
 367        spin_lock(&rt2x00dev->irqmask_lock);
 368        reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR);
 369        reg &= mask;
 370        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 371        spin_unlock(&rt2x00dev->irqmask_lock);
 372
 373        return IRQ_HANDLED;
 374}
 375EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
 376
 377void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
 378                           enum dev_state state)
 379{
 380        u32 reg;
 381        unsigned long flags;
 382
 383        /*
 384         * When interrupts are being enabled, the interrupt registers
 385         * should clear the register to assure a clean state.
 386         */
 387        if (state == STATE_RADIO_IRQ_ON) {
 388                reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR);
 389                rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
 390        }
 391
 392        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
 393        reg = 0;
 394        if (state == STATE_RADIO_IRQ_ON) {
 395                rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
 396                rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
 397                rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
 398                rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
 399                rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
 400        }
 401        rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
 402        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 403
 404        if (state == STATE_RADIO_IRQ_OFF) {
 405                /*
 406                 * Wait for possibly running tasklets to finish.
 407                 */
 408                tasklet_kill(&rt2x00dev->txstatus_tasklet);
 409                tasklet_kill(&rt2x00dev->rxdone_tasklet);
 410                tasklet_kill(&rt2x00dev->autowake_tasklet);
 411                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 412                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 413        }
 414}
 415EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
 416
 417/*
 418 * Queue handlers.
 419 */
 420void rt2800mmio_start_queue(struct data_queue *queue)
 421{
 422        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 423        u32 reg;
 424
 425        switch (queue->qid) {
 426        case QID_RX:
 427                reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
 428                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
 429                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 430                break;
 431        case QID_BEACON:
 432                reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 433                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
 434                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
 435                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
 436                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 437
 438                reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
 439                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
 440                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 441                break;
 442        default:
 443                break;
 444        }
 445}
 446EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
 447
 448/* 200 ms */
 449#define TXSTATUS_TIMEOUT 200000000
 450
 451void rt2800mmio_kick_queue(struct data_queue *queue)
 452{
 453        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 454        struct queue_entry *entry;
 455
 456        switch (queue->qid) {
 457        case QID_AC_VO:
 458        case QID_AC_VI:
 459        case QID_AC_BE:
 460        case QID_AC_BK:
 461                WARN_ON_ONCE(rt2x00queue_empty(queue));
 462                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 463                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
 464                                          entry->entry_idx);
 465                hrtimer_start(&rt2x00dev->txstatus_timer,
 466                              TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
 467                break;
 468        case QID_MGMT:
 469                entry = rt2x00queue_get_entry(queue, Q_INDEX);
 470                rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
 471                                          entry->entry_idx);
 472                break;
 473        default:
 474                break;
 475        }
 476}
 477EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
 478
 479void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
 480{
 481        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 482        bool tx_queue = false;
 483        unsigned int i;
 484
 485        switch (queue->qid) {
 486        case QID_AC_VO:
 487        case QID_AC_VI:
 488        case QID_AC_BE:
 489        case QID_AC_BK:
 490                tx_queue = true;
 491                break;
 492        case QID_RX:
 493                break;
 494        default:
 495                return;
 496        }
 497
 498        for (i = 0; i < 5; i++) {
 499                /*
 500                 * Check if the driver is already done, otherwise we
 501                 * have to sleep a little while to give the driver/hw
 502                 * the oppurtunity to complete interrupt process itself.
 503                 */
 504                if (rt2x00queue_empty(queue))
 505                        break;
 506
 507                /*
 508                 * For TX queues schedule completion tasklet to catch
 509                 * tx status timeouts, othewise just wait.
 510                 */
 511                if (tx_queue)
 512                        queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 513
 514                /*
 515                 * Wait for a little while to give the driver
 516                 * the oppurtunity to recover itself.
 517                 */
 518                msleep(50);
 519        }
 520}
 521EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue);
 522
 523void rt2800mmio_stop_queue(struct data_queue *queue)
 524{
 525        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 526        u32 reg;
 527
 528        switch (queue->qid) {
 529        case QID_RX:
 530                reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL);
 531                rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
 532                rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 533                break;
 534        case QID_BEACON:
 535                reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG);
 536                rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
 537                rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
 538                rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
 539                rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 540
 541                reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN);
 542                rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
 543                rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
 544
 545                /*
 546                 * Wait for current invocation to finish. The tasklet
 547                 * won't be scheduled anymore afterwards since we disabled
 548                 * the TBTT and PRE TBTT timer.
 549                 */
 550                tasklet_kill(&rt2x00dev->tbtt_tasklet);
 551                tasklet_kill(&rt2x00dev->pretbtt_tasklet);
 552
 553                break;
 554        default:
 555                break;
 556        }
 557}
 558EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
 559
 560void rt2800mmio_queue_init(struct data_queue *queue)
 561{
 562        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
 563        unsigned short txwi_size, rxwi_size;
 564
 565        rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
 566
 567        switch (queue->qid) {
 568        case QID_RX:
 569                queue->limit = 128;
 570                queue->data_size = AGGREGATION_SIZE;
 571                queue->desc_size = RXD_DESC_SIZE;
 572                queue->winfo_size = rxwi_size;
 573                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 574                break;
 575
 576        case QID_AC_VO:
 577        case QID_AC_VI:
 578        case QID_AC_BE:
 579        case QID_AC_BK:
 580                queue->limit = 64;
 581                queue->data_size = AGGREGATION_SIZE;
 582                queue->desc_size = TXD_DESC_SIZE;
 583                queue->winfo_size = txwi_size;
 584                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 585                break;
 586
 587        case QID_BEACON:
 588                queue->limit = 8;
 589                queue->data_size = 0; /* No DMA required for beacons */
 590                queue->desc_size = TXD_DESC_SIZE;
 591                queue->winfo_size = txwi_size;
 592                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
 593                break;
 594
 595        case QID_ATIM:
 596                /* fallthrough */
 597        default:
 598                BUG();
 599                break;
 600        }
 601}
 602EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
 603
 604/*
 605 * Initialization functions.
 606 */
 607bool rt2800mmio_get_entry_state(struct queue_entry *entry)
 608{
 609        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 610        u32 word;
 611
 612        if (entry->queue->qid == QID_RX) {
 613                word = rt2x00_desc_read(entry_priv->desc, 1);
 614
 615                return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
 616        } else {
 617                word = rt2x00_desc_read(entry_priv->desc, 1);
 618
 619                return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
 620        }
 621}
 622EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
 623
 624void rt2800mmio_clear_entry(struct queue_entry *entry)
 625{
 626        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
 627        struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 628        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 629        u32 word;
 630
 631        if (entry->queue->qid == QID_RX) {
 632                word = rt2x00_desc_read(entry_priv->desc, 0);
 633                rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
 634                rt2x00_desc_write(entry_priv->desc, 0, word);
 635
 636                word = rt2x00_desc_read(entry_priv->desc, 1);
 637                rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
 638                rt2x00_desc_write(entry_priv->desc, 1, word);
 639
 640                /*
 641                 * Set RX IDX in register to inform hardware that we have
 642                 * handled this entry and it is available for reuse again.
 643                 */
 644                rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 645                                          entry->entry_idx);
 646        } else {
 647                word = rt2x00_desc_read(entry_priv->desc, 1);
 648                rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
 649                rt2x00_desc_write(entry_priv->desc, 1, word);
 650
 651                /* If last entry stop txstatus timer */
 652                if (entry->queue->length == 1)
 653                        hrtimer_cancel(&rt2x00dev->txstatus_timer);
 654        }
 655}
 656EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
 657
 658int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
 659{
 660        struct queue_entry_priv_mmio *entry_priv;
 661
 662        /*
 663         * Initialize registers.
 664         */
 665        entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
 666        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
 667                                  entry_priv->desc_dma);
 668        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
 669                                  rt2x00dev->tx[0].limit);
 670        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
 671        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
 672
 673        entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
 674        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
 675                                  entry_priv->desc_dma);
 676        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
 677                                  rt2x00dev->tx[1].limit);
 678        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
 679        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
 680
 681        entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
 682        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
 683                                  entry_priv->desc_dma);
 684        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
 685                                  rt2x00dev->tx[2].limit);
 686        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
 687        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
 688
 689        entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
 690        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
 691                                  entry_priv->desc_dma);
 692        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
 693                                  rt2x00dev->tx[3].limit);
 694        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
 695        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
 696
 697        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
 698        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
 699        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
 700        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
 701
 702        rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
 703        rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
 704        rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
 705        rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
 706
 707        entry_priv = rt2x00dev->rx->entries[0].priv_data;
 708        rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
 709                                  entry_priv->desc_dma);
 710        rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
 711                                  rt2x00dev->rx[0].limit);
 712        rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
 713                                  rt2x00dev->rx[0].limit - 1);
 714        rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
 715
 716        rt2800_disable_wpdma(rt2x00dev);
 717
 718        rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
 719
 720        return 0;
 721}
 722EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
 723
 724int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
 725{
 726        u32 reg;
 727
 728        /*
 729         * Reset DMA indexes
 730         */
 731        reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX);
 732        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
 733        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
 734        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
 735        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
 736        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
 737        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
 738        rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
 739        rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
 740
 741        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
 742        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 743
 744        if (rt2x00_is_pcie(rt2x00dev) &&
 745            (rt2x00_rt(rt2x00dev, RT3090) ||
 746             rt2x00_rt(rt2x00dev, RT3390) ||
 747             rt2x00_rt(rt2x00dev, RT3572) ||
 748             rt2x00_rt(rt2x00dev, RT3593) ||
 749             rt2x00_rt(rt2x00dev, RT5390) ||
 750             rt2x00_rt(rt2x00dev, RT5392) ||
 751             rt2x00_rt(rt2x00dev, RT5592))) {
 752                reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL);
 753                rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
 754                rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
 755                rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
 756        }
 757
 758        rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
 759
 760        reg = 0;
 761        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
 762        rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
 763        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 764
 765        rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
 766
 767        return 0;
 768}
 769EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
 770
 771/*
 772 * Device state switch handlers.
 773 */
 774int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
 775{
 776        /* Wait for DMA, ignore error until we initialize queues. */
 777        rt2800_wait_wpdma_ready(rt2x00dev);
 778
 779        if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
 780                return -EIO;
 781
 782        return rt2800_enable_radio(rt2x00dev);
 783}
 784EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
 785
 786static void rt2800mmio_work_txdone(struct work_struct *work)
 787{
 788        struct rt2x00_dev *rt2x00dev =
 789            container_of(work, struct rt2x00_dev, txdone_work);
 790
 791        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 792                return;
 793
 794        while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
 795               rt2800_txstatus_timeout(rt2x00dev)) {
 796
 797                tasklet_disable(&rt2x00dev->txstatus_tasklet);
 798                rt2800_txdone(rt2x00dev, UINT_MAX);
 799                rt2800_txdone_nostatus(rt2x00dev);
 800                tasklet_enable(&rt2x00dev->txstatus_tasklet);
 801        }
 802
 803        if (rt2800_txstatus_pending(rt2x00dev))
 804                hrtimer_start(&rt2x00dev->txstatus_timer,
 805                              TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
 806}
 807
 808static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer)
 809{
 810        struct rt2x00_dev *rt2x00dev =
 811            container_of(timer, struct rt2x00_dev, txstatus_timer);
 812
 813        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
 814                goto out;
 815
 816        if (!rt2800_txstatus_pending(rt2x00dev))
 817                goto out;
 818
 819        rt2800mmio_fetch_txstatus(rt2x00dev);
 820        if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
 821                tasklet_schedule(&rt2x00dev->txstatus_tasklet);
 822        else
 823                queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 824out:
 825        return HRTIMER_NORESTART;
 826}
 827
 828int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
 829{
 830        int retval;
 831
 832        retval = rt2800_probe_hw(rt2x00dev);
 833        if (retval)
 834                return retval;
 835
 836        /*
 837         * Set txstatus timer function.
 838         */
 839        rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
 840
 841        /*
 842         * Overwrite TX done handler
 843         */
 844        INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone);
 845
 846        return 0;
 847}
 848EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw);
 849
 850MODULE_AUTHOR(DRV_PROJECT);
 851MODULE_VERSION(DRV_VERSION);
 852MODULE_DESCRIPTION("rt2800 MMIO library");
 853MODULE_LICENSE("GPL");
 854