linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include "main.h"
   8#include "pci.h"
   9#include "reg.h"
  10#include "tx.h"
  11#include "rx.h"
  12#include "fw.h"
  13#include "ps.h"
  14#include "debug.h"
  15
  16static bool rtw_disable_msi;
  17static bool rtw_pci_disable_aspm;
  18module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
  19module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
  20MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
  21MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
  22
  23static u32 rtw_pci_tx_queue_idx_addr[] = {
  24        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  25        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  26        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  27        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  28        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  29        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  30        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  31};
  32
  33static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  34{
  35        switch (queue) {
  36        case RTW_TX_QUEUE_BCN:
  37                return TX_DESC_QSEL_BEACON;
  38        case RTW_TX_QUEUE_H2C:
  39                return TX_DESC_QSEL_H2C;
  40        case RTW_TX_QUEUE_MGMT:
  41                return TX_DESC_QSEL_MGMT;
  42        case RTW_TX_QUEUE_HI0:
  43                return TX_DESC_QSEL_HIGH;
  44        default:
  45                return skb->priority;
  46        }
  47};
  48
  49static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  50{
  51        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  52
  53        return readb(rtwpci->mmap + addr);
  54}
  55
  56static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  57{
  58        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  59
  60        return readw(rtwpci->mmap + addr);
  61}
  62
  63static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  64{
  65        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  66
  67        return readl(rtwpci->mmap + addr);
  68}
  69
  70static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  71{
  72        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  73
  74        writeb(val, rtwpci->mmap + addr);
  75}
  76
  77static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  78{
  79        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  80
  81        writew(val, rtwpci->mmap + addr);
  82}
  83
  84static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  85{
  86        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  87
  88        writel(val, rtwpci->mmap + addr);
  89}
  90
  91static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  92{
  93        int offset = tx_ring->r.desc_size * idx;
  94
  95        return tx_ring->r.head + offset;
  96}
  97
  98static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
  99                                      struct rtw_pci_tx_ring *tx_ring)
 100{
 101        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 102        struct rtw_pci_tx_data *tx_data;
 103        struct sk_buff *skb, *tmp;
 104        dma_addr_t dma;
 105
 106        /* free every skb remained in tx list */
 107        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 108                __skb_unlink(skb, &tx_ring->queue);
 109                tx_data = rtw_pci_get_tx_data(skb);
 110                dma = tx_data->dma;
 111
 112                dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
 113                dev_kfree_skb_any(skb);
 114        }
 115}
 116
 117static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
 118                                 struct rtw_pci_tx_ring *tx_ring)
 119{
 120        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 121        u8 *head = tx_ring->r.head;
 122        u32 len = tx_ring->r.len;
 123        int ring_sz = len * tx_ring->r.desc_size;
 124
 125        rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 126
 127        /* free the ring itself */
 128        dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
 129        tx_ring->r.head = NULL;
 130}
 131
 132static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
 133                                      struct rtw_pci_rx_ring *rx_ring)
 134{
 135        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 136        struct sk_buff *skb;
 137        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 138        dma_addr_t dma;
 139        int i;
 140
 141        for (i = 0; i < rx_ring->r.len; i++) {
 142                skb = rx_ring->buf[i];
 143                if (!skb)
 144                        continue;
 145
 146                dma = *((dma_addr_t *)skb->cb);
 147                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 148                dev_kfree_skb(skb);
 149                rx_ring->buf[i] = NULL;
 150        }
 151}
 152
 153static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 154                                 struct rtw_pci_rx_ring *rx_ring)
 155{
 156        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 157        u8 *head = rx_ring->r.head;
 158        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 159
 160        rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 161
 162        dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
 163}
 164
 165static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 166{
 167        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 168        struct rtw_pci_tx_ring *tx_ring;
 169        struct rtw_pci_rx_ring *rx_ring;
 170        int i;
 171
 172        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 173                tx_ring = &rtwpci->tx_rings[i];
 174                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 175        }
 176
 177        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 178                rx_ring = &rtwpci->rx_rings[i];
 179                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 180        }
 181}
 182
 183static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 184                                struct rtw_pci_tx_ring *tx_ring,
 185                                u8 desc_size, u32 len)
 186{
 187        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 188        int ring_sz = desc_size * len;
 189        dma_addr_t dma;
 190        u8 *head;
 191
 192        if (len > TRX_BD_IDX_MASK) {
 193                rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
 194                return -EINVAL;
 195        }
 196
 197        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 198        if (!head) {
 199                rtw_err(rtwdev, "failed to allocate tx ring\n");
 200                return -ENOMEM;
 201        }
 202
 203        skb_queue_head_init(&tx_ring->queue);
 204        tx_ring->r.head = head;
 205        tx_ring->r.dma = dma;
 206        tx_ring->r.len = len;
 207        tx_ring->r.desc_size = desc_size;
 208        tx_ring->r.wp = 0;
 209        tx_ring->r.rp = 0;
 210
 211        return 0;
 212}
 213
 214static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 215                                 struct rtw_pci_rx_ring *rx_ring,
 216                                 u32 idx, u32 desc_sz)
 217{
 218        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 219        struct rtw_pci_rx_buffer_desc *buf_desc;
 220        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 221        dma_addr_t dma;
 222
 223        if (!skb)
 224                return -EINVAL;
 225
 226        dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
 227        if (dma_mapping_error(&pdev->dev, dma))
 228                return -EBUSY;
 229
 230        *((dma_addr_t *)skb->cb) = dma;
 231        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 232                                                     idx * desc_sz);
 233        memset(buf_desc, 0, sizeof(*buf_desc));
 234        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 235        buf_desc->dma = cpu_to_le32(dma);
 236
 237        return 0;
 238}
 239
 240static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 241                                        struct rtw_pci_rx_ring *rx_ring,
 242                                        u32 idx, u32 desc_sz)
 243{
 244        struct device *dev = rtwdev->dev;
 245        struct rtw_pci_rx_buffer_desc *buf_desc;
 246        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 247
 248        dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 249
 250        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 251                                                     idx * desc_sz);
 252        memset(buf_desc, 0, sizeof(*buf_desc));
 253        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 254        buf_desc->dma = cpu_to_le32(dma);
 255}
 256
 257static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 258                                struct rtw_pci_rx_ring *rx_ring,
 259                                u8 desc_size, u32 len)
 260{
 261        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 262        struct sk_buff *skb = NULL;
 263        dma_addr_t dma;
 264        u8 *head;
 265        int ring_sz = desc_size * len;
 266        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 267        int i, allocated;
 268        int ret = 0;
 269
 270        if (len > TRX_BD_IDX_MASK) {
 271                rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
 272                return -EINVAL;
 273        }
 274
 275        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 276        if (!head) {
 277                rtw_err(rtwdev, "failed to allocate rx ring\n");
 278                return -ENOMEM;
 279        }
 280        rx_ring->r.head = head;
 281
 282        for (i = 0; i < len; i++) {
 283                skb = dev_alloc_skb(buf_sz);
 284                if (!skb) {
 285                        allocated = i;
 286                        ret = -ENOMEM;
 287                        goto err_out;
 288                }
 289
 290                memset(skb->data, 0, buf_sz);
 291                rx_ring->buf[i] = skb;
 292                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 293                if (ret) {
 294                        allocated = i;
 295                        dev_kfree_skb_any(skb);
 296                        goto err_out;
 297                }
 298        }
 299
 300        rx_ring->r.dma = dma;
 301        rx_ring->r.len = len;
 302        rx_ring->r.desc_size = desc_size;
 303        rx_ring->r.wp = 0;
 304        rx_ring->r.rp = 0;
 305
 306        return 0;
 307
 308err_out:
 309        for (i = 0; i < allocated; i++) {
 310                skb = rx_ring->buf[i];
 311                if (!skb)
 312                        continue;
 313                dma = *((dma_addr_t *)skb->cb);
 314                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 315                dev_kfree_skb_any(skb);
 316                rx_ring->buf[i] = NULL;
 317        }
 318        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 319
 320        rtw_err(rtwdev, "failed to init rx buffer\n");
 321
 322        return ret;
 323}
 324
 325static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 326{
 327        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 328        struct rtw_pci_tx_ring *tx_ring;
 329        struct rtw_pci_rx_ring *rx_ring;
 330        struct rtw_chip_info *chip = rtwdev->chip;
 331        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 332        int tx_desc_size, rx_desc_size;
 333        u32 len;
 334        int ret;
 335
 336        tx_desc_size = chip->tx_buf_desc_sz;
 337
 338        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 339                tx_ring = &rtwpci->tx_rings[i];
 340                len = max_num_of_tx_queue(i);
 341                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 342                if (ret)
 343                        goto out;
 344        }
 345
 346        rx_desc_size = chip->rx_buf_desc_sz;
 347
 348        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 349                rx_ring = &rtwpci->rx_rings[j];
 350                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 351                                           RTK_MAX_RX_DESC_NUM);
 352                if (ret)
 353                        goto out;
 354        }
 355
 356        return 0;
 357
 358out:
 359        tx_alloced = i;
 360        for (i = 0; i < tx_alloced; i++) {
 361                tx_ring = &rtwpci->tx_rings[i];
 362                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 363        }
 364
 365        rx_alloced = j;
 366        for (j = 0; j < rx_alloced; j++) {
 367                rx_ring = &rtwpci->rx_rings[j];
 368                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 369        }
 370
 371        return ret;
 372}
 373
 374static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 375{
 376        rtw_pci_free_trx_ring(rtwdev);
 377}
 378
 379static int rtw_pci_init(struct rtw_dev *rtwdev)
 380{
 381        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 382        int ret = 0;
 383
 384        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 385                              IMR_MGNTDOK |
 386                              IMR_BKDOK |
 387                              IMR_BEDOK |
 388                              IMR_VIDOK |
 389                              IMR_VODOK |
 390                              IMR_ROK |
 391                              IMR_BCNDMAINT_E |
 392                              IMR_C2HCMD |
 393                              0;
 394        rtwpci->irq_mask[1] = IMR_TXFOVW |
 395                              0;
 396        rtwpci->irq_mask[3] = IMR_H2CDOK |
 397                              0;
 398        spin_lock_init(&rtwpci->irq_lock);
 399        spin_lock_init(&rtwpci->hwirq_lock);
 400        ret = rtw_pci_init_trx_ring(rtwdev);
 401
 402        return ret;
 403}
 404
 405static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 406{
 407        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 408        u32 len;
 409        u8 tmp;
 410        dma_addr_t dma;
 411
 412        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 413        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 414
 415        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 416        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 417
 418        if (!rtw_chip_wcpu_11n(rtwdev)) {
 419                len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 420                dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 421                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 422                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 423                rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
 424                rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 425        }
 426
 427        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 428        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 429        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 430        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 431        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
 432        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 433
 434        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 435        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 436        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 437        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 438        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
 439        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 440
 441        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 442        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 443        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 444        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 445        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
 446        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 447
 448        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 449        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 450        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 451        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 452        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
 453        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 454
 455        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 456        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 457        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 458        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 459        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
 460        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 461
 462        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 463        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 464        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 465        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 466        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
 467        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 468
 469        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 470        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 471        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 472        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 473        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
 474        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 475
 476        /* reset read/write point */
 477        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 478
 479        /* reset H2C Queue index in a single write */
 480        if (rtw_chip_wcpu_11ac(rtwdev))
 481                rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
 482                                BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
 483}
 484
 485static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 486{
 487        rtw_pci_reset_buf_desc(rtwdev);
 488}
 489
 490static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 491                                     struct rtw_pci *rtwpci, bool exclude_rx)
 492{
 493        unsigned long flags;
 494        u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
 495
 496        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 497
 498        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
 499        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 500        if (rtw_chip_wcpu_11ac(rtwdev))
 501                rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 502
 503        rtwpci->irq_enabled = true;
 504
 505        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 506}
 507
 508static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 509                                      struct rtw_pci *rtwpci)
 510{
 511        unsigned long flags;
 512
 513        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 514
 515        if (!rtwpci->irq_enabled)
 516                goto out;
 517
 518        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 519        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 520        if (rtw_chip_wcpu_11ac(rtwdev))
 521                rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 522
 523        rtwpci->irq_enabled = false;
 524
 525out:
 526        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 527}
 528
 529static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 530{
 531        /* reset dma and rx tag */
 532        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 533                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 534        rtwpci->rx_tag = 0;
 535}
 536
 537static int rtw_pci_setup(struct rtw_dev *rtwdev)
 538{
 539        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 540
 541        rtw_pci_reset_trx_ring(rtwdev);
 542        rtw_pci_dma_reset(rtwdev, rtwpci);
 543
 544        return 0;
 545}
 546
 547static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 548{
 549        struct rtw_pci_tx_ring *tx_ring;
 550        u8 queue;
 551
 552        rtw_pci_reset_trx_ring(rtwdev);
 553        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 554                tx_ring = &rtwpci->tx_rings[queue];
 555                rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 556        }
 557}
 558
 559static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
 560{
 561        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 562
 563        if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 564                return;
 565
 566        napi_enable(&rtwpci->napi);
 567}
 568
 569static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
 570{
 571        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 572
 573        if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 574                return;
 575
 576        napi_synchronize(&rtwpci->napi);
 577        napi_disable(&rtwpci->napi);
 578}
 579
 580static int rtw_pci_start(struct rtw_dev *rtwdev)
 581{
 582        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 583
 584        rtw_pci_napi_start(rtwdev);
 585
 586        spin_lock_bh(&rtwpci->irq_lock);
 587        rtwpci->running = true;
 588        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
 589        spin_unlock_bh(&rtwpci->irq_lock);
 590
 591        return 0;
 592}
 593
 594static void rtw_pci_stop(struct rtw_dev *rtwdev)
 595{
 596        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 597        struct pci_dev *pdev = rtwpci->pdev;
 598
 599        spin_lock_bh(&rtwpci->irq_lock);
 600        rtwpci->running = false;
 601        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 602        spin_unlock_bh(&rtwpci->irq_lock);
 603
 604        synchronize_irq(pdev->irq);
 605        rtw_pci_napi_stop(rtwdev);
 606
 607        spin_lock_bh(&rtwpci->irq_lock);
 608        rtw_pci_dma_release(rtwdev, rtwpci);
 609        spin_unlock_bh(&rtwpci->irq_lock);
 610}
 611
 612static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
 613{
 614        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 615        struct rtw_pci_tx_ring *tx_ring;
 616        bool tx_empty = true;
 617        u8 queue;
 618
 619        lockdep_assert_held(&rtwpci->irq_lock);
 620
 621        /* Deep PS state is not allowed to TX-DMA */
 622        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 623                /* BCN queue is rsvd page, does not have DMA interrupt
 624                 * H2C queue is managed by firmware
 625                 */
 626                if (queue == RTW_TX_QUEUE_BCN ||
 627                    queue == RTW_TX_QUEUE_H2C)
 628                        continue;
 629
 630                tx_ring = &rtwpci->tx_rings[queue];
 631
 632                /* check if there is any skb DMAing */
 633                if (skb_queue_len(&tx_ring->queue)) {
 634                        tx_empty = false;
 635                        break;
 636                }
 637        }
 638
 639        if (!tx_empty) {
 640                rtw_dbg(rtwdev, RTW_DBG_PS,
 641                        "TX path not empty, cannot enter deep power save state\n");
 642                return;
 643        }
 644
 645        set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
 646        rtw_power_mode_change(rtwdev, true);
 647}
 648
 649static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
 650{
 651        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 652
 653        lockdep_assert_held(&rtwpci->irq_lock);
 654
 655        if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 656                rtw_power_mode_change(rtwdev, false);
 657}
 658
 659static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
 660{
 661        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 662
 663        spin_lock_bh(&rtwpci->irq_lock);
 664
 665        if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 666                rtw_pci_deep_ps_enter(rtwdev);
 667
 668        if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 669                rtw_pci_deep_ps_leave(rtwdev);
 670
 671        spin_unlock_bh(&rtwpci->irq_lock);
 672}
 673
 674static u8 ac_to_hwq[] = {
 675        [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
 676        [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
 677        [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
 678        [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
 679};
 680
 681static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
 682
 683static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 684{
 685        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 686        __le16 fc = hdr->frame_control;
 687        u8 q_mapping = skb_get_queue_mapping(skb);
 688        u8 queue;
 689
 690        if (unlikely(ieee80211_is_beacon(fc)))
 691                queue = RTW_TX_QUEUE_BCN;
 692        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 693                queue = RTW_TX_QUEUE_MGMT;
 694        else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
 695                queue = ac_to_hwq[IEEE80211_AC_BE];
 696        else
 697                queue = ac_to_hwq[q_mapping];
 698
 699        return queue;
 700}
 701
 702static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 703                                      struct rtw_pci_tx_ring *ring)
 704{
 705        struct sk_buff *prev = skb_dequeue(&ring->queue);
 706        struct rtw_pci_tx_data *tx_data;
 707        dma_addr_t dma;
 708
 709        if (!prev)
 710                return;
 711
 712        tx_data = rtw_pci_get_tx_data(prev);
 713        dma = tx_data->dma;
 714        dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
 715        dev_kfree_skb_any(prev);
 716}
 717
 718static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 719                              struct rtw_pci_rx_ring *rx_ring,
 720                              u32 idx)
 721{
 722        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 723        struct rtw_chip_info *chip = rtwdev->chip;
 724        struct rtw_pci_rx_buffer_desc *buf_desc;
 725        u32 desc_sz = chip->rx_buf_desc_sz;
 726        u16 total_pkt_size;
 727
 728        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 729                                                     idx * desc_sz);
 730        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 731
 732        /* rx tag mismatch, throw a warning */
 733        if (total_pkt_size != rtwpci->rx_tag)
 734                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 735
 736        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 737}
 738
 739static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
 740{
 741        u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
 742        u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
 743
 744        return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
 745}
 746
 747static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
 748{
 749        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 750        struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
 751        u32 cur_rp;
 752        u8 i;
 753
 754        /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
 755         * bit dynamic, it's hard to define a reasonable fixed total timeout to
 756         * use read_poll_timeout* helper. Instead, we can ensure a reasonable
 757         * polling times, so we just use for loop with udelay here.
 758         */
 759        for (i = 0; i < 30; i++) {
 760                cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
 761                if (cur_rp == ring->r.wp)
 762                        return;
 763
 764                udelay(1);
 765        }
 766
 767        if (!drop)
 768                rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
 769}
 770
 771static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
 772                                   bool drop)
 773{
 774        u8 q;
 775
 776        for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
 777                /* It may be not necessary to flush BCN and H2C tx queues. */
 778                if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
 779                        continue;
 780
 781                if (pci_queues & BIT(q))
 782                        __pci_flush_queue(rtwdev, q, drop);
 783        }
 784}
 785
 786static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
 787{
 788        u32 pci_queues = 0;
 789        u8 i;
 790
 791        /* If all of the hardware queues are requested to flush,
 792         * flush all of the pci queues.
 793         */
 794        if (queues == BIT(rtwdev->hw->queues) - 1) {
 795                pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
 796        } else {
 797                for (i = 0; i < rtwdev->hw->queues; i++)
 798                        if (queues & BIT(i))
 799                                pci_queues |= BIT(ac_to_hwq[i]);
 800        }
 801
 802        __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
 803}
 804
 805static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
 806{
 807        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 808        struct rtw_pci_tx_ring *ring;
 809        u32 bd_idx;
 810
 811        ring = &rtwpci->tx_rings[queue];
 812        bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 813
 814        spin_lock_bh(&rtwpci->irq_lock);
 815        rtw_pci_deep_ps_leave(rtwdev);
 816        rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
 817        spin_unlock_bh(&rtwpci->irq_lock);
 818}
 819
 820static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
 821{
 822        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 823        u8 queue;
 824
 825        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
 826                if (test_and_clear_bit(queue, rtwpci->tx_queued))
 827                        rtw_pci_tx_kick_off_queue(rtwdev, queue);
 828}
 829
 830static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
 831                                 struct rtw_tx_pkt_info *pkt_info,
 832                                 struct sk_buff *skb, u8 queue)
 833{
 834        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 835        struct rtw_chip_info *chip = rtwdev->chip;
 836        struct rtw_pci_tx_ring *ring;
 837        struct rtw_pci_tx_data *tx_data;
 838        dma_addr_t dma;
 839        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 840        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 841        u32 size;
 842        u32 psb_len;
 843        u8 *pkt_desc;
 844        struct rtw_pci_tx_buffer_desc *buf_desc;
 845
 846        ring = &rtwpci->tx_rings[queue];
 847
 848        size = skb->len;
 849
 850        if (queue == RTW_TX_QUEUE_BCN)
 851                rtw_pci_release_rsvd_page(rtwpci, ring);
 852        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 853                return -ENOSPC;
 854
 855        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 856        memset(pkt_desc, 0, tx_pkt_desc_sz);
 857        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 858        rtw_tx_fill_tx_desc(pkt_info, skb);
 859        dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
 860                             DMA_TO_DEVICE);
 861        if (dma_mapping_error(&rtwpci->pdev->dev, dma))
 862                return -EBUSY;
 863
 864        /* after this we got dma mapped, there is no way back */
 865        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 866        memset(buf_desc, 0, tx_buf_desc_sz);
 867        psb_len = (skb->len - 1) / 128 + 1;
 868        if (queue == RTW_TX_QUEUE_BCN)
 869                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 870
 871        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 872        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 873        buf_desc[0].dma = cpu_to_le32(dma);
 874        buf_desc[1].buf_size = cpu_to_le16(size);
 875        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 876
 877        tx_data = rtw_pci_get_tx_data(skb);
 878        tx_data->dma = dma;
 879        tx_data->sn = pkt_info->sn;
 880
 881        spin_lock_bh(&rtwpci->irq_lock);
 882
 883        skb_queue_tail(&ring->queue, skb);
 884
 885        if (queue == RTW_TX_QUEUE_BCN)
 886                goto out_unlock;
 887
 888        /* update write-index, and kick it off later */
 889        set_bit(queue, rtwpci->tx_queued);
 890        if (++ring->r.wp >= ring->r.len)
 891                ring->r.wp = 0;
 892
 893out_unlock:
 894        spin_unlock_bh(&rtwpci->irq_lock);
 895
 896        return 0;
 897}
 898
 899static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 900                                        u32 size)
 901{
 902        struct sk_buff *skb;
 903        struct rtw_tx_pkt_info pkt_info = {0};
 904        u8 reg_bcn_work;
 905        int ret;
 906
 907        skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
 908        if (!skb)
 909                return -ENOMEM;
 910
 911        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 912        if (ret) {
 913                rtw_err(rtwdev, "failed to write rsvd page data\n");
 914                return ret;
 915        }
 916
 917        /* reserved pages go through beacon queue */
 918        reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 919        reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 920        rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 921
 922        return 0;
 923}
 924
 925static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 926{
 927        struct sk_buff *skb;
 928        struct rtw_tx_pkt_info pkt_info = {0};
 929        int ret;
 930
 931        skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
 932        if (!skb)
 933                return -ENOMEM;
 934
 935        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 936        if (ret) {
 937                rtw_err(rtwdev, "failed to write h2c data\n");
 938                return ret;
 939        }
 940
 941        rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
 942
 943        return 0;
 944}
 945
 946static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
 947                            struct rtw_tx_pkt_info *pkt_info,
 948                            struct sk_buff *skb)
 949{
 950        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 951        struct rtw_pci_tx_ring *ring;
 952        u8 queue = rtw_hw_queue_mapping(skb);
 953        int ret;
 954
 955        ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
 956        if (ret)
 957                return ret;
 958
 959        ring = &rtwpci->tx_rings[queue];
 960        spin_lock_bh(&rtwpci->irq_lock);
 961        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 962                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 963                ring->queue_stopped = true;
 964        }
 965        spin_unlock_bh(&rtwpci->irq_lock);
 966
 967        return 0;
 968}
 969
 970static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 971                           u8 hw_queue)
 972{
 973        struct ieee80211_hw *hw = rtwdev->hw;
 974        struct ieee80211_tx_info *info;
 975        struct rtw_pci_tx_ring *ring;
 976        struct rtw_pci_tx_data *tx_data;
 977        struct sk_buff *skb;
 978        u32 count;
 979        u32 bd_idx_addr;
 980        u32 bd_idx, cur_rp, rp_idx;
 981        u16 q_map;
 982
 983        ring = &rtwpci->tx_rings[hw_queue];
 984
 985        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 986        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 987        cur_rp = bd_idx >> 16;
 988        cur_rp &= TRX_BD_IDX_MASK;
 989        rp_idx = ring->r.rp;
 990        if (cur_rp >= ring->r.rp)
 991                count = cur_rp - ring->r.rp;
 992        else
 993                count = ring->r.len - (ring->r.rp - cur_rp);
 994
 995        while (count--) {
 996                skb = skb_dequeue(&ring->queue);
 997                if (!skb) {
 998                        rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
 999                                count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1000                        break;
1001                }
1002                tx_data = rtw_pci_get_tx_data(skb);
1003                dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1004                                 DMA_TO_DEVICE);
1005
1006                /* just free command packets from host to card */
1007                if (hw_queue == RTW_TX_QUEUE_H2C) {
1008                        dev_kfree_skb_irq(skb);
1009                        continue;
1010                }
1011
1012                if (ring->queue_stopped &&
1013                    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1014                        q_map = skb_get_queue_mapping(skb);
1015                        ieee80211_wake_queue(hw, q_map);
1016                        ring->queue_stopped = false;
1017                }
1018
1019                if (++rp_idx >= ring->r.len)
1020                        rp_idx = 0;
1021
1022                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1023
1024                info = IEEE80211_SKB_CB(skb);
1025
1026                /* enqueue to wait for tx report */
1027                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1028                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1029                        continue;
1030                }
1031
1032                /* always ACK for others, then they won't be marked as drop */
1033                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1034                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1035                else
1036                        info->flags |= IEEE80211_TX_STAT_ACK;
1037
1038                ieee80211_tx_info_clear_status(info);
1039                ieee80211_tx_status_irqsafe(hw, skb);
1040        }
1041
1042        ring->r.rp = cur_rp;
1043}
1044
1045static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1046{
1047        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1048        struct napi_struct *napi = &rtwpci->napi;
1049
1050        napi_schedule(napi);
1051}
1052
1053static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1054                                     struct rtw_pci *rtwpci)
1055{
1056        struct rtw_pci_rx_ring *ring;
1057        int count = 0;
1058        u32 tmp, cur_wp;
1059
1060        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1061        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1062        cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1063        if (cur_wp >= ring->r.wp)
1064                count = cur_wp - ring->r.wp;
1065        else
1066                count = ring->r.len - (ring->r.wp - cur_wp);
1067
1068        return count;
1069}
1070
1071static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1072                           u8 hw_queue, u32 limit)
1073{
1074        struct rtw_chip_info *chip = rtwdev->chip;
1075        struct napi_struct *napi = &rtwpci->napi;
1076        struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1077        struct rtw_rx_pkt_stat pkt_stat;
1078        struct ieee80211_rx_status rx_status;
1079        struct sk_buff *skb, *new;
1080        u32 cur_rp = ring->r.rp;
1081        u32 count, rx_done = 0;
1082        u32 pkt_offset;
1083        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1084        u32 buf_desc_sz = chip->rx_buf_desc_sz;
1085        u32 new_len;
1086        u8 *rx_desc;
1087        dma_addr_t dma;
1088
1089        count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1090        count = min(count, limit);
1091
1092        while (count--) {
1093                rtw_pci_dma_check(rtwdev, ring, cur_rp);
1094                skb = ring->buf[cur_rp];
1095                dma = *((dma_addr_t *)skb->cb);
1096                dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1097                                        DMA_FROM_DEVICE);
1098                rx_desc = skb->data;
1099                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1100
1101                /* offset from rx_desc to payload */
1102                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1103                             pkt_stat.shift;
1104
1105                /* allocate a new skb for this frame,
1106                 * discard the frame if none available
1107                 */
1108                new_len = pkt_stat.pkt_len + pkt_offset;
1109                new = dev_alloc_skb(new_len);
1110                if (WARN_ONCE(!new, "rx routine starvation\n"))
1111                        goto next_rp;
1112
1113                /* put the DMA data including rx_desc from phy to new skb */
1114                skb_put_data(new, skb->data, new_len);
1115
1116                if (pkt_stat.is_c2h) {
1117                        rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1118                } else {
1119                        /* remove rx_desc */
1120                        skb_pull(new, pkt_offset);
1121
1122                        rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1123                        memcpy(new->cb, &rx_status, sizeof(rx_status));
1124                        ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1125                        rx_done++;
1126                }
1127
1128next_rp:
1129                /* new skb delivered to mac80211, re-enable original skb DMA */
1130                rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1131                                            buf_desc_sz);
1132
1133                /* host read next element in ring */
1134                if (++cur_rp >= ring->r.len)
1135                        cur_rp = 0;
1136        }
1137
1138        ring->r.rp = cur_rp;
1139        /* 'rp', the last position we have read, is seen as previous posistion
1140         * of 'wp' that is used to calculate 'count' next time.
1141         */
1142        ring->r.wp = cur_rp;
1143        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1144
1145        return rx_done;
1146}
1147
1148static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1149                                   struct rtw_pci *rtwpci, u32 *irq_status)
1150{
1151        unsigned long flags;
1152
1153        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1154
1155        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1156        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1157        if (rtw_chip_wcpu_11ac(rtwdev))
1158                irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1159        else
1160                irq_status[3] = 0;
1161        irq_status[0] &= rtwpci->irq_mask[0];
1162        irq_status[1] &= rtwpci->irq_mask[1];
1163        irq_status[3] &= rtwpci->irq_mask[3];
1164        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1165        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1166        if (rtw_chip_wcpu_11ac(rtwdev))
1167                rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1168
1169        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1170}
1171
1172static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1173{
1174        struct rtw_dev *rtwdev = dev;
1175        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1176
1177        /* disable RTW PCI interrupt to avoid more interrupts before the end of
1178         * thread function
1179         *
1180         * disable HIMR here to also avoid new HISR flag being raised before
1181         * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1182         * are cleared, the edge-triggered interrupt will not be generated when
1183         * a new HISR flag is set.
1184         */
1185        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1186
1187        return IRQ_WAKE_THREAD;
1188}
1189
1190static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1191{
1192        struct rtw_dev *rtwdev = dev;
1193        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1194        u32 irq_status[4];
1195        bool rx = false;
1196
1197        spin_lock_bh(&rtwpci->irq_lock);
1198        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1199
1200        if (irq_status[0] & IMR_MGNTDOK)
1201                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1202        if (irq_status[0] & IMR_HIGHDOK)
1203                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1204        if (irq_status[0] & IMR_BEDOK)
1205                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1206        if (irq_status[0] & IMR_BKDOK)
1207                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1208        if (irq_status[0] & IMR_VODOK)
1209                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1210        if (irq_status[0] & IMR_VIDOK)
1211                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1212        if (irq_status[3] & IMR_H2CDOK)
1213                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1214        if (irq_status[0] & IMR_ROK) {
1215                rtw_pci_rx_isr(rtwdev);
1216                rx = true;
1217        }
1218        if (unlikely(irq_status[0] & IMR_C2HCMD))
1219                rtw_fw_c2h_cmd_isr(rtwdev);
1220
1221        /* all of the jobs for this interrupt have been done */
1222        if (rtwpci->running)
1223                rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1224        spin_unlock_bh(&rtwpci->irq_lock);
1225
1226        return IRQ_HANDLED;
1227}
1228
1229static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1230                              struct pci_dev *pdev)
1231{
1232        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1233        unsigned long len;
1234        u8 bar_id = 2;
1235        int ret;
1236
1237        ret = pci_request_regions(pdev, KBUILD_MODNAME);
1238        if (ret) {
1239                rtw_err(rtwdev, "failed to request pci regions\n");
1240                return ret;
1241        }
1242
1243        len = pci_resource_len(pdev, bar_id);
1244        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1245        if (!rtwpci->mmap) {
1246                pci_release_regions(pdev);
1247                rtw_err(rtwdev, "failed to map pci memory\n");
1248                return -ENOMEM;
1249        }
1250
1251        return 0;
1252}
1253
1254static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1255                                 struct pci_dev *pdev)
1256{
1257        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1258
1259        if (rtwpci->mmap) {
1260                pci_iounmap(pdev, rtwpci->mmap);
1261                pci_release_regions(pdev);
1262        }
1263}
1264
1265static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1266{
1267        u16 write_addr;
1268        u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1269        u8 flag;
1270        u8 cnt;
1271
1272        write_addr = addr & BITS_DBI_ADDR_MASK;
1273        write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1274        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1275        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1276        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1277
1278        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1279                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1280                if (flag == 0)
1281                        return;
1282
1283                udelay(10);
1284        }
1285
1286        WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1287}
1288
1289static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1290{
1291        u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1292        u8 flag;
1293        u8 cnt;
1294
1295        rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1296        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1297
1298        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1299                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1300                if (flag == 0) {
1301                        read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1302                        *value = rtw_read8(rtwdev, read_addr);
1303                        return 0;
1304                }
1305
1306                udelay(10);
1307        }
1308
1309        WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1310        return -EIO;
1311}
1312
1313static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1314{
1315        u8 page;
1316        u8 wflag;
1317        u8 cnt;
1318
1319        rtw_write16(rtwdev, REG_MDIO_V1, data);
1320
1321        page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1322        page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1323        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1324        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1325        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1326
1327        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1328                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1329                                        BIT_MDIO_WFLAG_V1);
1330                if (wflag == 0)
1331                        return;
1332
1333                udelay(10);
1334        }
1335
1336        WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1337}
1338
1339static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1340{
1341        u8 value;
1342        int ret;
1343
1344        if (rtw_pci_disable_aspm)
1345                return;
1346
1347        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1348        if (ret) {
1349                rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1350                return;
1351        }
1352
1353        if (enable)
1354                value |= BIT_CLKREQ_SW_EN;
1355        else
1356                value &= ~BIT_CLKREQ_SW_EN;
1357
1358        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1359}
1360
1361static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1362{
1363        u8 value;
1364        int ret;
1365
1366        if (rtw_pci_disable_aspm)
1367                return;
1368
1369        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1370        if (ret) {
1371                rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1372                return;
1373        }
1374
1375        if (enable)
1376                value |= BIT_L1_SW_EN;
1377        else
1378                value &= ~BIT_L1_SW_EN;
1379
1380        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1381}
1382
1383static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1384{
1385        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1386
1387        /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1388         * only be enabled when host supports it.
1389         *
1390         * And ASPM mechanism should be enabled when driver/firmware enters
1391         * power save mode, without having heavy traffic. Because we've
1392         * experienced some inter-operability issues that the link tends
1393         * to enter L1 state on the fly even when driver is having high
1394         * throughput. This is probably because the ASPM behavior slightly
1395         * varies from different SOC.
1396         */
1397        if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1398                rtw_pci_aspm_set(rtwdev, enter);
1399}
1400
1401static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1402{
1403        struct rtw_chip_info *chip = rtwdev->chip;
1404        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1405        struct pci_dev *pdev = rtwpci->pdev;
1406        u16 link_ctrl;
1407        int ret;
1408
1409        /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1410         * to add clock delay to cover the REFCLK timing gap.
1411         */
1412        if (chip->id == RTW_CHIP_TYPE_8822C)
1413                rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1414
1415        /* Though there is standard PCIE configuration space to set the
1416         * link control register, but by Realtek's design, driver should
1417         * check if host supports CLKREQ/ASPM to enable the HW module.
1418         *
1419         * These functions are implemented by two HW modules associated,
1420         * one is responsible to access PCIE configuration space to
1421         * follow the host settings, and another is in charge of doing
1422         * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1423         * the host does not support it, and due to some reasons or wrong
1424         * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1425         * loss if HW misbehaves on the link.
1426         *
1427         * Hence it's designed that driver should first check the PCIE
1428         * configuration space is sync'ed and enabled, then driver can turn
1429         * on the other module that is actually working on the mechanism.
1430         */
1431        ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1432        if (ret) {
1433                rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1434                return;
1435        }
1436
1437        if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1438                rtw_pci_clkreq_set(rtwdev, true);
1439
1440        rtwpci->link_ctrl = link_ctrl;
1441}
1442
1443static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1444{
1445        struct rtw_chip_info *chip = rtwdev->chip;
1446
1447        switch (chip->id) {
1448        case RTW_CHIP_TYPE_8822C:
1449                if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1450                        rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1451                                         BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1452                break;
1453        default:
1454                break;
1455        }
1456}
1457
1458static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1459{
1460        struct rtw_chip_info *chip = rtwdev->chip;
1461        const struct rtw_intf_phy_para *para;
1462        u16 cut;
1463        u16 value;
1464        u16 offset;
1465        int i;
1466
1467        cut = BIT(0) << rtwdev->hal.cut_version;
1468
1469        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1470                para = &chip->intf_table->gen1_para[i];
1471                if (!(para->cut_mask & cut))
1472                        continue;
1473                if (para->offset == 0xffff)
1474                        break;
1475                offset = para->offset;
1476                value = para->value;
1477                if (para->ip_sel == RTW_IP_SEL_PHY)
1478                        rtw_mdio_write(rtwdev, offset, value, true);
1479                else
1480                        rtw_dbi_write8(rtwdev, offset, value);
1481        }
1482
1483        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1484                para = &chip->intf_table->gen2_para[i];
1485                if (!(para->cut_mask & cut))
1486                        continue;
1487                if (para->offset == 0xffff)
1488                        break;
1489                offset = para->offset;
1490                value = para->value;
1491                if (para->ip_sel == RTW_IP_SEL_PHY)
1492                        rtw_mdio_write(rtwdev, offset, value, false);
1493                else
1494                        rtw_dbi_write8(rtwdev, offset, value);
1495        }
1496
1497        rtw_pci_link_cfg(rtwdev);
1498}
1499
1500static int __maybe_unused rtw_pci_suspend(struct device *dev)
1501{
1502        return 0;
1503}
1504
1505static int __maybe_unused rtw_pci_resume(struct device *dev)
1506{
1507        return 0;
1508}
1509
1510SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1511EXPORT_SYMBOL(rtw_pm_ops);
1512
1513static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1514{
1515        int ret;
1516
1517        ret = pci_enable_device(pdev);
1518        if (ret) {
1519                rtw_err(rtwdev, "failed to enable pci device\n");
1520                return ret;
1521        }
1522
1523        pci_set_master(pdev);
1524        pci_set_drvdata(pdev, rtwdev->hw);
1525        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1526
1527        return 0;
1528}
1529
1530static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1531{
1532        pci_clear_master(pdev);
1533        pci_disable_device(pdev);
1534}
1535
1536static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1537{
1538        struct rtw_pci *rtwpci;
1539        int ret;
1540
1541        rtwpci = (struct rtw_pci *)rtwdev->priv;
1542        rtwpci->pdev = pdev;
1543
1544        /* after this driver can access to hw registers */
1545        ret = rtw_pci_io_mapping(rtwdev, pdev);
1546        if (ret) {
1547                rtw_err(rtwdev, "failed to request pci io region\n");
1548                goto err_out;
1549        }
1550
1551        ret = rtw_pci_init(rtwdev);
1552        if (ret) {
1553                rtw_err(rtwdev, "failed to allocate pci resources\n");
1554                goto err_io_unmap;
1555        }
1556
1557        return 0;
1558
1559err_io_unmap:
1560        rtw_pci_io_unmapping(rtwdev, pdev);
1561
1562err_out:
1563        return ret;
1564}
1565
1566static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1567{
1568        rtw_pci_deinit(rtwdev);
1569        rtw_pci_io_unmapping(rtwdev, pdev);
1570}
1571
1572static struct rtw_hci_ops rtw_pci_ops = {
1573        .tx_write = rtw_pci_tx_write,
1574        .tx_kick_off = rtw_pci_tx_kick_off,
1575        .flush_queues = rtw_pci_flush_queues,
1576        .setup = rtw_pci_setup,
1577        .start = rtw_pci_start,
1578        .stop = rtw_pci_stop,
1579        .deep_ps = rtw_pci_deep_ps,
1580        .link_ps = rtw_pci_link_ps,
1581        .interface_cfg = rtw_pci_interface_cfg,
1582
1583        .read8 = rtw_pci_read8,
1584        .read16 = rtw_pci_read16,
1585        .read32 = rtw_pci_read32,
1586        .write8 = rtw_pci_write8,
1587        .write16 = rtw_pci_write16,
1588        .write32 = rtw_pci_write32,
1589        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1590        .write_data_h2c = rtw_pci_write_data_h2c,
1591};
1592
1593static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1594{
1595        unsigned int flags = PCI_IRQ_LEGACY;
1596        int ret;
1597
1598        if (!rtw_disable_msi)
1599                flags |= PCI_IRQ_MSI;
1600
1601        ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1602        if (ret < 0) {
1603                rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1604                return ret;
1605        }
1606
1607        ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1608                                        rtw_pci_interrupt_handler,
1609                                        rtw_pci_interrupt_threadfn,
1610                                        IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1611        if (ret) {
1612                rtw_err(rtwdev, "failed to request irq %d\n", ret);
1613                pci_free_irq_vectors(pdev);
1614        }
1615
1616        return ret;
1617}
1618
1619static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1620{
1621        devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1622        pci_free_irq_vectors(pdev);
1623}
1624
1625static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1626{
1627        struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1628        struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1629                                              priv);
1630        int work_done = 0;
1631
1632        while (work_done < budget) {
1633                u32 work_done_once;
1634
1635                work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1636                                                 budget - work_done);
1637                if (work_done_once == 0)
1638                        break;
1639                work_done += work_done_once;
1640        }
1641        if (work_done < budget) {
1642                napi_complete_done(napi, work_done);
1643                spin_lock_bh(&rtwpci->irq_lock);
1644                if (rtwpci->running)
1645                        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1646                spin_unlock_bh(&rtwpci->irq_lock);
1647                /* When ISR happens during polling and before napi_complete
1648                 * while no further data is received. Data on the dma_ring will
1649                 * not be processed immediately. Check whether dma ring is
1650                 * empty and perform napi_schedule accordingly.
1651                 */
1652                if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1653                        napi_schedule(napi);
1654        }
1655
1656        return work_done;
1657}
1658
1659static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1660{
1661        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1662
1663        init_dummy_netdev(&rtwpci->netdev);
1664        netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
1665                       RTW_NAPI_WEIGHT_NUM);
1666}
1667
1668static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1669{
1670        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1671
1672        rtw_pci_napi_stop(rtwdev);
1673        netif_napi_del(&rtwpci->napi);
1674}
1675
1676int rtw_pci_probe(struct pci_dev *pdev,
1677                  const struct pci_device_id *id)
1678{
1679        struct ieee80211_hw *hw;
1680        struct rtw_dev *rtwdev;
1681        int drv_data_size;
1682        int ret;
1683
1684        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1685        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1686        if (!hw) {
1687                dev_err(&pdev->dev, "failed to allocate hw\n");
1688                return -ENOMEM;
1689        }
1690
1691        rtwdev = hw->priv;
1692        rtwdev->hw = hw;
1693        rtwdev->dev = &pdev->dev;
1694        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1695        rtwdev->hci.ops = &rtw_pci_ops;
1696        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1697
1698        ret = rtw_core_init(rtwdev);
1699        if (ret)
1700                goto err_release_hw;
1701
1702        rtw_dbg(rtwdev, RTW_DBG_PCI,
1703                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1704                pdev->vendor, pdev->device, pdev->revision);
1705
1706        ret = rtw_pci_claim(rtwdev, pdev);
1707        if (ret) {
1708                rtw_err(rtwdev, "failed to claim pci device\n");
1709                goto err_deinit_core;
1710        }
1711
1712        ret = rtw_pci_setup_resource(rtwdev, pdev);
1713        if (ret) {
1714                rtw_err(rtwdev, "failed to setup pci resources\n");
1715                goto err_pci_declaim;
1716        }
1717
1718        rtw_pci_napi_init(rtwdev);
1719
1720        ret = rtw_chip_info_setup(rtwdev);
1721        if (ret) {
1722                rtw_err(rtwdev, "failed to setup chip information\n");
1723                goto err_destroy_pci;
1724        }
1725
1726        rtw_pci_phy_cfg(rtwdev);
1727
1728        ret = rtw_register_hw(rtwdev, hw);
1729        if (ret) {
1730                rtw_err(rtwdev, "failed to register hw\n");
1731                goto err_destroy_pci;
1732        }
1733
1734        ret = rtw_pci_request_irq(rtwdev, pdev);
1735        if (ret) {
1736                ieee80211_unregister_hw(hw);
1737                goto err_destroy_pci;
1738        }
1739
1740        return 0;
1741
1742err_destroy_pci:
1743        rtw_pci_napi_deinit(rtwdev);
1744        rtw_pci_destroy(rtwdev, pdev);
1745
1746err_pci_declaim:
1747        rtw_pci_declaim(rtwdev, pdev);
1748
1749err_deinit_core:
1750        rtw_core_deinit(rtwdev);
1751
1752err_release_hw:
1753        ieee80211_free_hw(hw);
1754
1755        return ret;
1756}
1757EXPORT_SYMBOL(rtw_pci_probe);
1758
1759void rtw_pci_remove(struct pci_dev *pdev)
1760{
1761        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1762        struct rtw_dev *rtwdev;
1763        struct rtw_pci *rtwpci;
1764
1765        if (!hw)
1766                return;
1767
1768        rtwdev = hw->priv;
1769        rtwpci = (struct rtw_pci *)rtwdev->priv;
1770
1771        rtw_unregister_hw(rtwdev, hw);
1772        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1773        rtw_pci_napi_deinit(rtwdev);
1774        rtw_pci_destroy(rtwdev, pdev);
1775        rtw_pci_declaim(rtwdev, pdev);
1776        rtw_pci_free_irq(rtwdev, pdev);
1777        rtw_core_deinit(rtwdev);
1778        ieee80211_free_hw(hw);
1779}
1780EXPORT_SYMBOL(rtw_pci_remove);
1781
1782void rtw_pci_shutdown(struct pci_dev *pdev)
1783{
1784        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1785        struct rtw_dev *rtwdev;
1786        struct rtw_chip_info *chip;
1787
1788        if (!hw)
1789                return;
1790
1791        rtwdev = hw->priv;
1792        chip = rtwdev->chip;
1793
1794        if (chip->ops->shutdown)
1795                chip->ops->shutdown(rtwdev);
1796
1797        pci_set_power_state(pdev, PCI_D3hot);
1798}
1799EXPORT_SYMBOL(rtw_pci_shutdown);
1800
1801MODULE_AUTHOR("Realtek Corporation");
1802MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1803MODULE_LICENSE("Dual BSD/GPL");
1804