linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/dmi.h>
   6#include <linux/module.h>
   7#include <linux/pci.h>
   8#include "main.h"
   9#include "pci.h"
  10#include "reg.h"
  11#include "tx.h"
  12#include "rx.h"
  13#include "fw.h"
  14#include "ps.h"
  15#include "debug.h"
  16
  17static bool rtw_disable_msi;
  18static bool rtw_pci_disable_aspm;
  19module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
  20module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
  21MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
  22MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
  23
  24static u32 rtw_pci_tx_queue_idx_addr[] = {
  25        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  26        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  27        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  28        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  29        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  30        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  31        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  32};
  33
  34static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  35{
  36        switch (queue) {
  37        case RTW_TX_QUEUE_BCN:
  38                return TX_DESC_QSEL_BEACON;
  39        case RTW_TX_QUEUE_H2C:
  40                return TX_DESC_QSEL_H2C;
  41        case RTW_TX_QUEUE_MGMT:
  42                return TX_DESC_QSEL_MGMT;
  43        case RTW_TX_QUEUE_HI0:
  44                return TX_DESC_QSEL_HIGH;
  45        default:
  46                return skb->priority;
  47        }
  48};
  49
  50static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  51{
  52        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  53
  54        return readb(rtwpci->mmap + addr);
  55}
  56
  57static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  58{
  59        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  60
  61        return readw(rtwpci->mmap + addr);
  62}
  63
  64static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  65{
  66        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  67
  68        return readl(rtwpci->mmap + addr);
  69}
  70
  71static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  72{
  73        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  74
  75        writeb(val, rtwpci->mmap + addr);
  76}
  77
  78static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  79{
  80        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  81
  82        writew(val, rtwpci->mmap + addr);
  83}
  84
  85static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  86{
  87        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  88
  89        writel(val, rtwpci->mmap + addr);
  90}
  91
  92static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  93{
  94        int offset = tx_ring->r.desc_size * idx;
  95
  96        return tx_ring->r.head + offset;
  97}
  98
  99static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
 100                                      struct rtw_pci_tx_ring *tx_ring)
 101{
 102        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 103        struct rtw_pci_tx_data *tx_data;
 104        struct sk_buff *skb, *tmp;
 105        dma_addr_t dma;
 106
 107        /* free every skb remained in tx list */
 108        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 109                __skb_unlink(skb, &tx_ring->queue);
 110                tx_data = rtw_pci_get_tx_data(skb);
 111                dma = tx_data->dma;
 112
 113                dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
 114                dev_kfree_skb_any(skb);
 115        }
 116}
 117
 118static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
 119                                 struct rtw_pci_tx_ring *tx_ring)
 120{
 121        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 122        u8 *head = tx_ring->r.head;
 123        u32 len = tx_ring->r.len;
 124        int ring_sz = len * tx_ring->r.desc_size;
 125
 126        rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 127
 128        /* free the ring itself */
 129        dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
 130        tx_ring->r.head = NULL;
 131}
 132
 133static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
 134                                      struct rtw_pci_rx_ring *rx_ring)
 135{
 136        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 137        struct sk_buff *skb;
 138        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 139        dma_addr_t dma;
 140        int i;
 141
 142        for (i = 0; i < rx_ring->r.len; i++) {
 143                skb = rx_ring->buf[i];
 144                if (!skb)
 145                        continue;
 146
 147                dma = *((dma_addr_t *)skb->cb);
 148                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 149                dev_kfree_skb(skb);
 150                rx_ring->buf[i] = NULL;
 151        }
 152}
 153
 154static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 155                                 struct rtw_pci_rx_ring *rx_ring)
 156{
 157        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 158        u8 *head = rx_ring->r.head;
 159        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 160
 161        rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 162
 163        dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
 164}
 165
 166static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 167{
 168        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 169        struct rtw_pci_tx_ring *tx_ring;
 170        struct rtw_pci_rx_ring *rx_ring;
 171        int i;
 172
 173        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 174                tx_ring = &rtwpci->tx_rings[i];
 175                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 176        }
 177
 178        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 179                rx_ring = &rtwpci->rx_rings[i];
 180                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 181        }
 182}
 183
 184static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 185                                struct rtw_pci_tx_ring *tx_ring,
 186                                u8 desc_size, u32 len)
 187{
 188        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 189        int ring_sz = desc_size * len;
 190        dma_addr_t dma;
 191        u8 *head;
 192
 193        if (len > TRX_BD_IDX_MASK) {
 194                rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
 195                return -EINVAL;
 196        }
 197
 198        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 199        if (!head) {
 200                rtw_err(rtwdev, "failed to allocate tx ring\n");
 201                return -ENOMEM;
 202        }
 203
 204        skb_queue_head_init(&tx_ring->queue);
 205        tx_ring->r.head = head;
 206        tx_ring->r.dma = dma;
 207        tx_ring->r.len = len;
 208        tx_ring->r.desc_size = desc_size;
 209        tx_ring->r.wp = 0;
 210        tx_ring->r.rp = 0;
 211
 212        return 0;
 213}
 214
 215static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 216                                 struct rtw_pci_rx_ring *rx_ring,
 217                                 u32 idx, u32 desc_sz)
 218{
 219        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 220        struct rtw_pci_rx_buffer_desc *buf_desc;
 221        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 222        dma_addr_t dma;
 223
 224        if (!skb)
 225                return -EINVAL;
 226
 227        dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
 228        if (dma_mapping_error(&pdev->dev, dma))
 229                return -EBUSY;
 230
 231        *((dma_addr_t *)skb->cb) = dma;
 232        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 233                                                     idx * desc_sz);
 234        memset(buf_desc, 0, sizeof(*buf_desc));
 235        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 236        buf_desc->dma = cpu_to_le32(dma);
 237
 238        return 0;
 239}
 240
 241static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 242                                        struct rtw_pci_rx_ring *rx_ring,
 243                                        u32 idx, u32 desc_sz)
 244{
 245        struct device *dev = rtwdev->dev;
 246        struct rtw_pci_rx_buffer_desc *buf_desc;
 247        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 248
 249        dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 250
 251        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 252                                                     idx * desc_sz);
 253        memset(buf_desc, 0, sizeof(*buf_desc));
 254        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 255        buf_desc->dma = cpu_to_le32(dma);
 256}
 257
 258static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 259                                struct rtw_pci_rx_ring *rx_ring,
 260                                u8 desc_size, u32 len)
 261{
 262        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 263        struct sk_buff *skb = NULL;
 264        dma_addr_t dma;
 265        u8 *head;
 266        int ring_sz = desc_size * len;
 267        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 268        int i, allocated;
 269        int ret = 0;
 270
 271        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 272        if (!head) {
 273                rtw_err(rtwdev, "failed to allocate rx ring\n");
 274                return -ENOMEM;
 275        }
 276        rx_ring->r.head = head;
 277
 278        for (i = 0; i < len; i++) {
 279                skb = dev_alloc_skb(buf_sz);
 280                if (!skb) {
 281                        allocated = i;
 282                        ret = -ENOMEM;
 283                        goto err_out;
 284                }
 285
 286                memset(skb->data, 0, buf_sz);
 287                rx_ring->buf[i] = skb;
 288                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 289                if (ret) {
 290                        allocated = i;
 291                        dev_kfree_skb_any(skb);
 292                        goto err_out;
 293                }
 294        }
 295
 296        rx_ring->r.dma = dma;
 297        rx_ring->r.len = len;
 298        rx_ring->r.desc_size = desc_size;
 299        rx_ring->r.wp = 0;
 300        rx_ring->r.rp = 0;
 301
 302        return 0;
 303
 304err_out:
 305        for (i = 0; i < allocated; i++) {
 306                skb = rx_ring->buf[i];
 307                if (!skb)
 308                        continue;
 309                dma = *((dma_addr_t *)skb->cb);
 310                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 311                dev_kfree_skb_any(skb);
 312                rx_ring->buf[i] = NULL;
 313        }
 314        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 315
 316        rtw_err(rtwdev, "failed to init rx buffer\n");
 317
 318        return ret;
 319}
 320
 321static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 322{
 323        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 324        struct rtw_pci_tx_ring *tx_ring;
 325        struct rtw_pci_rx_ring *rx_ring;
 326        struct rtw_chip_info *chip = rtwdev->chip;
 327        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 328        int tx_desc_size, rx_desc_size;
 329        u32 len;
 330        int ret;
 331
 332        tx_desc_size = chip->tx_buf_desc_sz;
 333
 334        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 335                tx_ring = &rtwpci->tx_rings[i];
 336                len = max_num_of_tx_queue(i);
 337                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 338                if (ret)
 339                        goto out;
 340        }
 341
 342        rx_desc_size = chip->rx_buf_desc_sz;
 343
 344        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 345                rx_ring = &rtwpci->rx_rings[j];
 346                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 347                                           RTK_MAX_RX_DESC_NUM);
 348                if (ret)
 349                        goto out;
 350        }
 351
 352        return 0;
 353
 354out:
 355        tx_alloced = i;
 356        for (i = 0; i < tx_alloced; i++) {
 357                tx_ring = &rtwpci->tx_rings[i];
 358                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 359        }
 360
 361        rx_alloced = j;
 362        for (j = 0; j < rx_alloced; j++) {
 363                rx_ring = &rtwpci->rx_rings[j];
 364                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 365        }
 366
 367        return ret;
 368}
 369
 370static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 371{
 372        rtw_pci_free_trx_ring(rtwdev);
 373}
 374
 375static int rtw_pci_init(struct rtw_dev *rtwdev)
 376{
 377        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 378        int ret = 0;
 379
 380        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 381                              IMR_MGNTDOK |
 382                              IMR_BKDOK |
 383                              IMR_BEDOK |
 384                              IMR_VIDOK |
 385                              IMR_VODOK |
 386                              IMR_ROK |
 387                              IMR_BCNDMAINT_E |
 388                              IMR_C2HCMD |
 389                              0;
 390        rtwpci->irq_mask[1] = IMR_TXFOVW |
 391                              0;
 392        rtwpci->irq_mask[3] = IMR_H2CDOK |
 393                              0;
 394        spin_lock_init(&rtwpci->irq_lock);
 395        spin_lock_init(&rtwpci->hwirq_lock);
 396        ret = rtw_pci_init_trx_ring(rtwdev);
 397
 398        return ret;
 399}
 400
 401static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 402{
 403        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 404        u32 len;
 405        u8 tmp;
 406        dma_addr_t dma;
 407
 408        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 409        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 410
 411        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 412        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 413
 414        if (!rtw_chip_wcpu_11n(rtwdev)) {
 415                len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 416                dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 417                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 418                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 419                rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
 420                rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 421        }
 422
 423        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 424        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 425        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 426        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 427        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
 428        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 429
 430        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 431        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 432        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 433        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 434        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
 435        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 436
 437        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 438        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 439        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 440        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 441        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
 442        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 443
 444        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 445        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 446        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 447        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 448        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
 449        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 450
 451        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 452        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 453        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 454        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 455        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
 456        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 457
 458        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 459        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 460        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 461        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 462        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
 463        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 464
 465        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 466        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 467        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 468        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 469        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
 470        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 471
 472        /* reset read/write point */
 473        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 474
 475        /* reset H2C Queue index in a single write */
 476        if (rtw_chip_wcpu_11ac(rtwdev))
 477                rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
 478                                BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
 479}
 480
 481static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 482{
 483        rtw_pci_reset_buf_desc(rtwdev);
 484}
 485
 486static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 487                                     struct rtw_pci *rtwpci, bool exclude_rx)
 488{
 489        unsigned long flags;
 490        u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
 491
 492        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 493
 494        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
 495        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 496        if (rtw_chip_wcpu_11ac(rtwdev))
 497                rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 498
 499        rtwpci->irq_enabled = true;
 500
 501        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 502}
 503
 504static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 505                                      struct rtw_pci *rtwpci)
 506{
 507        unsigned long flags;
 508
 509        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 510
 511        if (!rtwpci->irq_enabled)
 512                goto out;
 513
 514        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 515        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 516        if (rtw_chip_wcpu_11ac(rtwdev))
 517                rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 518
 519        rtwpci->irq_enabled = false;
 520
 521out:
 522        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 523}
 524
 525static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 526{
 527        /* reset dma and rx tag */
 528        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 529                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 530        rtwpci->rx_tag = 0;
 531}
 532
 533static int rtw_pci_setup(struct rtw_dev *rtwdev)
 534{
 535        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 536
 537        rtw_pci_reset_trx_ring(rtwdev);
 538        rtw_pci_dma_reset(rtwdev, rtwpci);
 539
 540        return 0;
 541}
 542
 543static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 544{
 545        struct rtw_pci_tx_ring *tx_ring;
 546        u8 queue;
 547
 548        rtw_pci_reset_trx_ring(rtwdev);
 549        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 550                tx_ring = &rtwpci->tx_rings[queue];
 551                rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 552        }
 553}
 554
 555static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
 556{
 557        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 558
 559        if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 560                return;
 561
 562        napi_enable(&rtwpci->napi);
 563}
 564
 565static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
 566{
 567        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 568
 569        if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 570                return;
 571
 572        napi_synchronize(&rtwpci->napi);
 573        napi_disable(&rtwpci->napi);
 574}
 575
 576static int rtw_pci_start(struct rtw_dev *rtwdev)
 577{
 578        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 579
 580        rtw_pci_napi_start(rtwdev);
 581
 582        spin_lock_bh(&rtwpci->irq_lock);
 583        rtwpci->running = true;
 584        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
 585        spin_unlock_bh(&rtwpci->irq_lock);
 586
 587        return 0;
 588}
 589
 590static void rtw_pci_stop(struct rtw_dev *rtwdev)
 591{
 592        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 593        struct pci_dev *pdev = rtwpci->pdev;
 594
 595        spin_lock_bh(&rtwpci->irq_lock);
 596        rtwpci->running = false;
 597        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 598        spin_unlock_bh(&rtwpci->irq_lock);
 599
 600        synchronize_irq(pdev->irq);
 601        rtw_pci_napi_stop(rtwdev);
 602
 603        spin_lock_bh(&rtwpci->irq_lock);
 604        rtw_pci_dma_release(rtwdev, rtwpci);
 605        spin_unlock_bh(&rtwpci->irq_lock);
 606}
 607
 608static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
 609{
 610        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 611        struct rtw_pci_tx_ring *tx_ring;
 612        bool tx_empty = true;
 613        u8 queue;
 614
 615        lockdep_assert_held(&rtwpci->irq_lock);
 616
 617        /* Deep PS state is not allowed to TX-DMA */
 618        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 619                /* BCN queue is rsvd page, does not have DMA interrupt
 620                 * H2C queue is managed by firmware
 621                 */
 622                if (queue == RTW_TX_QUEUE_BCN ||
 623                    queue == RTW_TX_QUEUE_H2C)
 624                        continue;
 625
 626                tx_ring = &rtwpci->tx_rings[queue];
 627
 628                /* check if there is any skb DMAing */
 629                if (skb_queue_len(&tx_ring->queue)) {
 630                        tx_empty = false;
 631                        break;
 632                }
 633        }
 634
 635        if (!tx_empty) {
 636                rtw_dbg(rtwdev, RTW_DBG_PS,
 637                        "TX path not empty, cannot enter deep power save state\n");
 638                return;
 639        }
 640
 641        set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
 642        rtw_power_mode_change(rtwdev, true);
 643}
 644
 645static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
 646{
 647        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 648
 649        lockdep_assert_held(&rtwpci->irq_lock);
 650
 651        if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 652                rtw_power_mode_change(rtwdev, false);
 653}
 654
 655static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
 656{
 657        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 658
 659        spin_lock_bh(&rtwpci->irq_lock);
 660
 661        if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 662                rtw_pci_deep_ps_enter(rtwdev);
 663
 664        if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 665                rtw_pci_deep_ps_leave(rtwdev);
 666
 667        spin_unlock_bh(&rtwpci->irq_lock);
 668}
 669
 670static u8 ac_to_hwq[] = {
 671        [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
 672        [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
 673        [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
 674        [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
 675};
 676
 677static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
 678
 679static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 680{
 681        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 682        __le16 fc = hdr->frame_control;
 683        u8 q_mapping = skb_get_queue_mapping(skb);
 684        u8 queue;
 685
 686        if (unlikely(ieee80211_is_beacon(fc)))
 687                queue = RTW_TX_QUEUE_BCN;
 688        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 689                queue = RTW_TX_QUEUE_MGMT;
 690        else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
 691                queue = ac_to_hwq[IEEE80211_AC_BE];
 692        else
 693                queue = ac_to_hwq[q_mapping];
 694
 695        return queue;
 696}
 697
 698static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 699                                      struct rtw_pci_tx_ring *ring)
 700{
 701        struct sk_buff *prev = skb_dequeue(&ring->queue);
 702        struct rtw_pci_tx_data *tx_data;
 703        dma_addr_t dma;
 704
 705        if (!prev)
 706                return;
 707
 708        tx_data = rtw_pci_get_tx_data(prev);
 709        dma = tx_data->dma;
 710        dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
 711        dev_kfree_skb_any(prev);
 712}
 713
 714static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 715                              struct rtw_pci_rx_ring *rx_ring,
 716                              u32 idx)
 717{
 718        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 719        struct rtw_chip_info *chip = rtwdev->chip;
 720        struct rtw_pci_rx_buffer_desc *buf_desc;
 721        u32 desc_sz = chip->rx_buf_desc_sz;
 722        u16 total_pkt_size;
 723
 724        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 725                                                     idx * desc_sz);
 726        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 727
 728        /* rx tag mismatch, throw a warning */
 729        if (total_pkt_size != rtwpci->rx_tag)
 730                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 731
 732        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 733}
 734
 735static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
 736{
 737        u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
 738        u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
 739
 740        return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
 741}
 742
 743static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
 744{
 745        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 746        struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
 747        u32 cur_rp;
 748        u8 i;
 749
 750        /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
 751         * bit dynamic, it's hard to define a reasonable fixed total timeout to
 752         * use read_poll_timeout* helper. Instead, we can ensure a reasonable
 753         * polling times, so we just use for loop with udelay here.
 754         */
 755        for (i = 0; i < 30; i++) {
 756                cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
 757                if (cur_rp == ring->r.wp)
 758                        return;
 759
 760                udelay(1);
 761        }
 762
 763        if (!drop)
 764                rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
 765}
 766
 767static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
 768                                   bool drop)
 769{
 770        u8 q;
 771
 772        for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
 773                /* It may be not necessary to flush BCN and H2C tx queues. */
 774                if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
 775                        continue;
 776
 777                if (pci_queues & BIT(q))
 778                        __pci_flush_queue(rtwdev, q, drop);
 779        }
 780}
 781
 782static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
 783{
 784        u32 pci_queues = 0;
 785        u8 i;
 786
 787        /* If all of the hardware queues are requested to flush,
 788         * flush all of the pci queues.
 789         */
 790        if (queues == BIT(rtwdev->hw->queues) - 1) {
 791                pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
 792        } else {
 793                for (i = 0; i < rtwdev->hw->queues; i++)
 794                        if (queues & BIT(i))
 795                                pci_queues |= BIT(ac_to_hwq[i]);
 796        }
 797
 798        __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
 799}
 800
 801static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
 802{
 803        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 804        struct rtw_pci_tx_ring *ring;
 805        u32 bd_idx;
 806
 807        ring = &rtwpci->tx_rings[queue];
 808        bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 809
 810        spin_lock_bh(&rtwpci->irq_lock);
 811        rtw_pci_deep_ps_leave(rtwdev);
 812        rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
 813        spin_unlock_bh(&rtwpci->irq_lock);
 814}
 815
 816static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
 817{
 818        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 819        u8 queue;
 820
 821        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
 822                if (test_and_clear_bit(queue, rtwpci->tx_queued))
 823                        rtw_pci_tx_kick_off_queue(rtwdev, queue);
 824}
 825
 826static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
 827                                 struct rtw_tx_pkt_info *pkt_info,
 828                                 struct sk_buff *skb, u8 queue)
 829{
 830        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 831        struct rtw_chip_info *chip = rtwdev->chip;
 832        struct rtw_pci_tx_ring *ring;
 833        struct rtw_pci_tx_data *tx_data;
 834        dma_addr_t dma;
 835        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 836        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 837        u32 size;
 838        u32 psb_len;
 839        u8 *pkt_desc;
 840        struct rtw_pci_tx_buffer_desc *buf_desc;
 841
 842        ring = &rtwpci->tx_rings[queue];
 843
 844        size = skb->len;
 845
 846        if (queue == RTW_TX_QUEUE_BCN)
 847                rtw_pci_release_rsvd_page(rtwpci, ring);
 848        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 849                return -ENOSPC;
 850
 851        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 852        memset(pkt_desc, 0, tx_pkt_desc_sz);
 853        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 854        rtw_tx_fill_tx_desc(pkt_info, skb);
 855        dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
 856                             DMA_TO_DEVICE);
 857        if (dma_mapping_error(&rtwpci->pdev->dev, dma))
 858                return -EBUSY;
 859
 860        /* after this we got dma mapped, there is no way back */
 861        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 862        memset(buf_desc, 0, tx_buf_desc_sz);
 863        psb_len = (skb->len - 1) / 128 + 1;
 864        if (queue == RTW_TX_QUEUE_BCN)
 865                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 866
 867        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 868        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 869        buf_desc[0].dma = cpu_to_le32(dma);
 870        buf_desc[1].buf_size = cpu_to_le16(size);
 871        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 872
 873        tx_data = rtw_pci_get_tx_data(skb);
 874        tx_data->dma = dma;
 875        tx_data->sn = pkt_info->sn;
 876
 877        spin_lock_bh(&rtwpci->irq_lock);
 878
 879        skb_queue_tail(&ring->queue, skb);
 880
 881        if (queue == RTW_TX_QUEUE_BCN)
 882                goto out_unlock;
 883
 884        /* update write-index, and kick it off later */
 885        set_bit(queue, rtwpci->tx_queued);
 886        if (++ring->r.wp >= ring->r.len)
 887                ring->r.wp = 0;
 888
 889out_unlock:
 890        spin_unlock_bh(&rtwpci->irq_lock);
 891
 892        return 0;
 893}
 894
 895static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 896                                        u32 size)
 897{
 898        struct sk_buff *skb;
 899        struct rtw_tx_pkt_info pkt_info = {0};
 900        u8 reg_bcn_work;
 901        int ret;
 902
 903        skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
 904        if (!skb)
 905                return -ENOMEM;
 906
 907        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 908        if (ret) {
 909                rtw_err(rtwdev, "failed to write rsvd page data\n");
 910                return ret;
 911        }
 912
 913        /* reserved pages go through beacon queue */
 914        reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 915        reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 916        rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 917
 918        return 0;
 919}
 920
 921static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 922{
 923        struct sk_buff *skb;
 924        struct rtw_tx_pkt_info pkt_info = {0};
 925        int ret;
 926
 927        skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
 928        if (!skb)
 929                return -ENOMEM;
 930
 931        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 932        if (ret) {
 933                rtw_err(rtwdev, "failed to write h2c data\n");
 934                return ret;
 935        }
 936
 937        rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
 938
 939        return 0;
 940}
 941
 942static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
 943                            struct rtw_tx_pkt_info *pkt_info,
 944                            struct sk_buff *skb)
 945{
 946        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 947        struct rtw_pci_tx_ring *ring;
 948        u8 queue = rtw_hw_queue_mapping(skb);
 949        int ret;
 950
 951        ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
 952        if (ret)
 953                return ret;
 954
 955        ring = &rtwpci->tx_rings[queue];
 956        spin_lock_bh(&rtwpci->irq_lock);
 957        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 958                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 959                ring->queue_stopped = true;
 960        }
 961        spin_unlock_bh(&rtwpci->irq_lock);
 962
 963        return 0;
 964}
 965
 966static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 967                           u8 hw_queue)
 968{
 969        struct ieee80211_hw *hw = rtwdev->hw;
 970        struct ieee80211_tx_info *info;
 971        struct rtw_pci_tx_ring *ring;
 972        struct rtw_pci_tx_data *tx_data;
 973        struct sk_buff *skb;
 974        u32 count;
 975        u32 bd_idx_addr;
 976        u32 bd_idx, cur_rp, rp_idx;
 977        u16 q_map;
 978
 979        ring = &rtwpci->tx_rings[hw_queue];
 980
 981        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 982        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 983        cur_rp = bd_idx >> 16;
 984        cur_rp &= TRX_BD_IDX_MASK;
 985        rp_idx = ring->r.rp;
 986        if (cur_rp >= ring->r.rp)
 987                count = cur_rp - ring->r.rp;
 988        else
 989                count = ring->r.len - (ring->r.rp - cur_rp);
 990
 991        while (count--) {
 992                skb = skb_dequeue(&ring->queue);
 993                if (!skb) {
 994                        rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
 995                                count, hw_queue, bd_idx, ring->r.rp, cur_rp);
 996                        break;
 997                }
 998                tx_data = rtw_pci_get_tx_data(skb);
 999                dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1000                                 DMA_TO_DEVICE);
1001
1002                /* just free command packets from host to card */
1003                if (hw_queue == RTW_TX_QUEUE_H2C) {
1004                        dev_kfree_skb_irq(skb);
1005                        continue;
1006                }
1007
1008                if (ring->queue_stopped &&
1009                    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1010                        q_map = skb_get_queue_mapping(skb);
1011                        ieee80211_wake_queue(hw, q_map);
1012                        ring->queue_stopped = false;
1013                }
1014
1015                if (++rp_idx >= ring->r.len)
1016                        rp_idx = 0;
1017
1018                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1019
1020                info = IEEE80211_SKB_CB(skb);
1021
1022                /* enqueue to wait for tx report */
1023                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1024                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1025                        continue;
1026                }
1027
1028                /* always ACK for others, then they won't be marked as drop */
1029                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1030                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1031                else
1032                        info->flags |= IEEE80211_TX_STAT_ACK;
1033
1034                ieee80211_tx_info_clear_status(info);
1035                ieee80211_tx_status_irqsafe(hw, skb);
1036        }
1037
1038        ring->r.rp = cur_rp;
1039}
1040
1041static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1042{
1043        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1044        struct napi_struct *napi = &rtwpci->napi;
1045
1046        napi_schedule(napi);
1047}
1048
1049static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1050                                     struct rtw_pci *rtwpci)
1051{
1052        struct rtw_pci_rx_ring *ring;
1053        int count = 0;
1054        u32 tmp, cur_wp;
1055
1056        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1057        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1058        cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1059        if (cur_wp >= ring->r.wp)
1060                count = cur_wp - ring->r.wp;
1061        else
1062                count = ring->r.len - (ring->r.wp - cur_wp);
1063
1064        return count;
1065}
1066
1067static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1068                           u8 hw_queue, u32 limit)
1069{
1070        struct rtw_chip_info *chip = rtwdev->chip;
1071        struct napi_struct *napi = &rtwpci->napi;
1072        struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1073        struct rtw_rx_pkt_stat pkt_stat;
1074        struct ieee80211_rx_status rx_status;
1075        struct sk_buff *skb, *new;
1076        u32 cur_rp = ring->r.rp;
1077        u32 count, rx_done = 0;
1078        u32 pkt_offset;
1079        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1080        u32 buf_desc_sz = chip->rx_buf_desc_sz;
1081        u32 new_len;
1082        u8 *rx_desc;
1083        dma_addr_t dma;
1084
1085        count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1086        count = min(count, limit);
1087
1088        while (count--) {
1089                rtw_pci_dma_check(rtwdev, ring, cur_rp);
1090                skb = ring->buf[cur_rp];
1091                dma = *((dma_addr_t *)skb->cb);
1092                dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1093                                        DMA_FROM_DEVICE);
1094                rx_desc = skb->data;
1095                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1096
1097                /* offset from rx_desc to payload */
1098                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1099                             pkt_stat.shift;
1100
1101                /* allocate a new skb for this frame,
1102                 * discard the frame if none available
1103                 */
1104                new_len = pkt_stat.pkt_len + pkt_offset;
1105                new = dev_alloc_skb(new_len);
1106                if (WARN_ONCE(!new, "rx routine starvation\n"))
1107                        goto next_rp;
1108
1109                /* put the DMA data including rx_desc from phy to new skb */
1110                skb_put_data(new, skb->data, new_len);
1111
1112                if (pkt_stat.is_c2h) {
1113                        rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1114                } else {
1115                        /* remove rx_desc */
1116                        skb_pull(new, pkt_offset);
1117
1118                        rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1119                        memcpy(new->cb, &rx_status, sizeof(rx_status));
1120                        ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1121                        rx_done++;
1122                }
1123
1124next_rp:
1125                /* new skb delivered to mac80211, re-enable original skb DMA */
1126                rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1127                                            buf_desc_sz);
1128
1129                /* host read next element in ring */
1130                if (++cur_rp >= ring->r.len)
1131                        cur_rp = 0;
1132        }
1133
1134        ring->r.rp = cur_rp;
1135        /* 'rp', the last position we have read, is seen as previous posistion
1136         * of 'wp' that is used to calculate 'count' next time.
1137         */
1138        ring->r.wp = cur_rp;
1139        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1140
1141        return rx_done;
1142}
1143
1144static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1145                                   struct rtw_pci *rtwpci, u32 *irq_status)
1146{
1147        unsigned long flags;
1148
1149        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1150
1151        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1152        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1153        if (rtw_chip_wcpu_11ac(rtwdev))
1154                irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1155        else
1156                irq_status[3] = 0;
1157        irq_status[0] &= rtwpci->irq_mask[0];
1158        irq_status[1] &= rtwpci->irq_mask[1];
1159        irq_status[3] &= rtwpci->irq_mask[3];
1160        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1161        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1162        if (rtw_chip_wcpu_11ac(rtwdev))
1163                rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1164
1165        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1166}
1167
1168static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1169{
1170        struct rtw_dev *rtwdev = dev;
1171        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1172
1173        /* disable RTW PCI interrupt to avoid more interrupts before the end of
1174         * thread function
1175         *
1176         * disable HIMR here to also avoid new HISR flag being raised before
1177         * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1178         * are cleared, the edge-triggered interrupt will not be generated when
1179         * a new HISR flag is set.
1180         */
1181        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1182
1183        return IRQ_WAKE_THREAD;
1184}
1185
1186static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1187{
1188        struct rtw_dev *rtwdev = dev;
1189        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1190        u32 irq_status[4];
1191        bool rx = false;
1192
1193        spin_lock_bh(&rtwpci->irq_lock);
1194        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1195
1196        if (irq_status[0] & IMR_MGNTDOK)
1197                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1198        if (irq_status[0] & IMR_HIGHDOK)
1199                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1200        if (irq_status[0] & IMR_BEDOK)
1201                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1202        if (irq_status[0] & IMR_BKDOK)
1203                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1204        if (irq_status[0] & IMR_VODOK)
1205                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1206        if (irq_status[0] & IMR_VIDOK)
1207                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1208        if (irq_status[3] & IMR_H2CDOK)
1209                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1210        if (irq_status[0] & IMR_ROK) {
1211                rtw_pci_rx_isr(rtwdev);
1212                rx = true;
1213        }
1214        if (unlikely(irq_status[0] & IMR_C2HCMD))
1215                rtw_fw_c2h_cmd_isr(rtwdev);
1216
1217        /* all of the jobs for this interrupt have been done */
1218        if (rtwpci->running)
1219                rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1220        spin_unlock_bh(&rtwpci->irq_lock);
1221
1222        return IRQ_HANDLED;
1223}
1224
1225static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1226                              struct pci_dev *pdev)
1227{
1228        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1229        unsigned long len;
1230        u8 bar_id = 2;
1231        int ret;
1232
1233        ret = pci_request_regions(pdev, KBUILD_MODNAME);
1234        if (ret) {
1235                rtw_err(rtwdev, "failed to request pci regions\n");
1236                return ret;
1237        }
1238
1239        len = pci_resource_len(pdev, bar_id);
1240        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1241        if (!rtwpci->mmap) {
1242                pci_release_regions(pdev);
1243                rtw_err(rtwdev, "failed to map pci memory\n");
1244                return -ENOMEM;
1245        }
1246
1247        return 0;
1248}
1249
1250static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1251                                 struct pci_dev *pdev)
1252{
1253        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1254
1255        if (rtwpci->mmap) {
1256                pci_iounmap(pdev, rtwpci->mmap);
1257                pci_release_regions(pdev);
1258        }
1259}
1260
1261static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1262{
1263        u16 write_addr;
1264        u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1265        u8 flag;
1266        u8 cnt;
1267
1268        write_addr = addr & BITS_DBI_ADDR_MASK;
1269        write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1270        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1271        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1272        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1273
1274        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1275                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1276                if (flag == 0)
1277                        return;
1278
1279                udelay(10);
1280        }
1281
1282        WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1283}
1284
1285static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1286{
1287        u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1288        u8 flag;
1289        u8 cnt;
1290
1291        rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1292        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1293
1294        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1295                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1296                if (flag == 0) {
1297                        read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1298                        *value = rtw_read8(rtwdev, read_addr);
1299                        return 0;
1300                }
1301
1302                udelay(10);
1303        }
1304
1305        WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1306        return -EIO;
1307}
1308
1309static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1310{
1311        u8 page;
1312        u8 wflag;
1313        u8 cnt;
1314
1315        rtw_write16(rtwdev, REG_MDIO_V1, data);
1316
1317        page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1318        page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1319        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1320        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1321        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1322
1323        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1324                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1325                                        BIT_MDIO_WFLAG_V1);
1326                if (wflag == 0)
1327                        return;
1328
1329                udelay(10);
1330        }
1331
1332        WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1333}
1334
1335static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1336{
1337        u8 value;
1338        int ret;
1339
1340        if (rtw_pci_disable_aspm)
1341                return;
1342
1343        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1344        if (ret) {
1345                rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1346                return;
1347        }
1348
1349        if (enable)
1350                value |= BIT_CLKREQ_SW_EN;
1351        else
1352                value &= ~BIT_CLKREQ_SW_EN;
1353
1354        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1355}
1356
1357static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1358{
1359        u8 value;
1360        int ret;
1361
1362        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1363        if (ret) {
1364                rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1365                return;
1366        }
1367
1368        if (enable)
1369                value &= ~BIT_CLKREQ_N_PAD;
1370        else
1371                value |= BIT_CLKREQ_N_PAD;
1372
1373        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1374}
1375
1376static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1377{
1378        u8 value;
1379        int ret;
1380
1381        if (rtw_pci_disable_aspm)
1382                return;
1383
1384        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1385        if (ret) {
1386                rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1387                return;
1388        }
1389
1390        if (enable)
1391                value |= BIT_L1_SW_EN;
1392        else
1393                value &= ~BIT_L1_SW_EN;
1394
1395        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1396}
1397
1398static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1399{
1400        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1401
1402        /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1403         * only be enabled when host supports it.
1404         *
1405         * And ASPM mechanism should be enabled when driver/firmware enters
1406         * power save mode, without having heavy traffic. Because we've
1407         * experienced some inter-operability issues that the link tends
1408         * to enter L1 state on the fly even when driver is having high
1409         * throughput. This is probably because the ASPM behavior slightly
1410         * varies from different SOC.
1411         */
1412        if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1413                rtw_pci_aspm_set(rtwdev, enter);
1414}
1415
1416static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1417{
1418        struct rtw_chip_info *chip = rtwdev->chip;
1419        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1420        struct pci_dev *pdev = rtwpci->pdev;
1421        u16 link_ctrl;
1422        int ret;
1423
1424        /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1425         * to add clock delay to cover the REFCLK timing gap.
1426         */
1427        if (chip->id == RTW_CHIP_TYPE_8822C)
1428                rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1429
1430        /* Though there is standard PCIE configuration space to set the
1431         * link control register, but by Realtek's design, driver should
1432         * check if host supports CLKREQ/ASPM to enable the HW module.
1433         *
1434         * These functions are implemented by two HW modules associated,
1435         * one is responsible to access PCIE configuration space to
1436         * follow the host settings, and another is in charge of doing
1437         * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1438         * the host does not support it, and due to some reasons or wrong
1439         * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1440         * loss if HW misbehaves on the link.
1441         *
1442         * Hence it's designed that driver should first check the PCIE
1443         * configuration space is sync'ed and enabled, then driver can turn
1444         * on the other module that is actually working on the mechanism.
1445         */
1446        ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1447        if (ret) {
1448                rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1449                return;
1450        }
1451
1452        if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1453                rtw_pci_clkreq_set(rtwdev, true);
1454
1455        rtwpci->link_ctrl = link_ctrl;
1456}
1457
1458static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1459{
1460        struct rtw_chip_info *chip = rtwdev->chip;
1461
1462        switch (chip->id) {
1463        case RTW_CHIP_TYPE_8822C:
1464                if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1465                        rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1466                                         BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1467                break;
1468        default:
1469                break;
1470        }
1471}
1472
1473static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1474{
1475        struct rtw_chip_info *chip = rtwdev->chip;
1476        const struct rtw_intf_phy_para *para;
1477        u16 cut;
1478        u16 value;
1479        u16 offset;
1480        int i;
1481
1482        cut = BIT(0) << rtwdev->hal.cut_version;
1483
1484        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1485                para = &chip->intf_table->gen1_para[i];
1486                if (!(para->cut_mask & cut))
1487                        continue;
1488                if (para->offset == 0xffff)
1489                        break;
1490                offset = para->offset;
1491                value = para->value;
1492                if (para->ip_sel == RTW_IP_SEL_PHY)
1493                        rtw_mdio_write(rtwdev, offset, value, true);
1494                else
1495                        rtw_dbi_write8(rtwdev, offset, value);
1496        }
1497
1498        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1499                para = &chip->intf_table->gen2_para[i];
1500                if (!(para->cut_mask & cut))
1501                        continue;
1502                if (para->offset == 0xffff)
1503                        break;
1504                offset = para->offset;
1505                value = para->value;
1506                if (para->ip_sel == RTW_IP_SEL_PHY)
1507                        rtw_mdio_write(rtwdev, offset, value, false);
1508                else
1509                        rtw_dbi_write8(rtwdev, offset, value);
1510        }
1511
1512        rtw_pci_link_cfg(rtwdev);
1513}
1514
1515static int __maybe_unused rtw_pci_suspend(struct device *dev)
1516{
1517        struct ieee80211_hw *hw = dev_get_drvdata(dev);
1518        struct rtw_dev *rtwdev = hw->priv;
1519        struct rtw_chip_info *chip = rtwdev->chip;
1520        struct rtw_efuse *efuse = &rtwdev->efuse;
1521
1522        if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1523                rtw_pci_clkreq_pad_low(rtwdev, true);
1524        return 0;
1525}
1526
1527static int __maybe_unused rtw_pci_resume(struct device *dev)
1528{
1529        struct ieee80211_hw *hw = dev_get_drvdata(dev);
1530        struct rtw_dev *rtwdev = hw->priv;
1531        struct rtw_chip_info *chip = rtwdev->chip;
1532        struct rtw_efuse *efuse = &rtwdev->efuse;
1533
1534        if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1535                rtw_pci_clkreq_pad_low(rtwdev, false);
1536        return 0;
1537}
1538
1539SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1540EXPORT_SYMBOL(rtw_pm_ops);
1541
1542static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1543{
1544        int ret;
1545
1546        ret = pci_enable_device(pdev);
1547        if (ret) {
1548                rtw_err(rtwdev, "failed to enable pci device\n");
1549                return ret;
1550        }
1551
1552        pci_set_master(pdev);
1553        pci_set_drvdata(pdev, rtwdev->hw);
1554        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1555
1556        return 0;
1557}
1558
1559static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1560{
1561        pci_clear_master(pdev);
1562        pci_disable_device(pdev);
1563}
1564
1565static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1566{
1567        struct rtw_pci *rtwpci;
1568        int ret;
1569
1570        rtwpci = (struct rtw_pci *)rtwdev->priv;
1571        rtwpci->pdev = pdev;
1572
1573        /* after this driver can access to hw registers */
1574        ret = rtw_pci_io_mapping(rtwdev, pdev);
1575        if (ret) {
1576                rtw_err(rtwdev, "failed to request pci io region\n");
1577                goto err_out;
1578        }
1579
1580        ret = rtw_pci_init(rtwdev);
1581        if (ret) {
1582                rtw_err(rtwdev, "failed to allocate pci resources\n");
1583                goto err_io_unmap;
1584        }
1585
1586        return 0;
1587
1588err_io_unmap:
1589        rtw_pci_io_unmapping(rtwdev, pdev);
1590
1591err_out:
1592        return ret;
1593}
1594
1595static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1596{
1597        rtw_pci_deinit(rtwdev);
1598        rtw_pci_io_unmapping(rtwdev, pdev);
1599}
1600
1601static struct rtw_hci_ops rtw_pci_ops = {
1602        .tx_write = rtw_pci_tx_write,
1603        .tx_kick_off = rtw_pci_tx_kick_off,
1604        .flush_queues = rtw_pci_flush_queues,
1605        .setup = rtw_pci_setup,
1606        .start = rtw_pci_start,
1607        .stop = rtw_pci_stop,
1608        .deep_ps = rtw_pci_deep_ps,
1609        .link_ps = rtw_pci_link_ps,
1610        .interface_cfg = rtw_pci_interface_cfg,
1611
1612        .read8 = rtw_pci_read8,
1613        .read16 = rtw_pci_read16,
1614        .read32 = rtw_pci_read32,
1615        .write8 = rtw_pci_write8,
1616        .write16 = rtw_pci_write16,
1617        .write32 = rtw_pci_write32,
1618        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1619        .write_data_h2c = rtw_pci_write_data_h2c,
1620};
1621
1622static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1623{
1624        unsigned int flags = PCI_IRQ_LEGACY;
1625        int ret;
1626
1627        if (!rtw_disable_msi)
1628                flags |= PCI_IRQ_MSI;
1629
1630        ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1631        if (ret < 0) {
1632                rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1633                return ret;
1634        }
1635
1636        ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1637                                        rtw_pci_interrupt_handler,
1638                                        rtw_pci_interrupt_threadfn,
1639                                        IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1640        if (ret) {
1641                rtw_err(rtwdev, "failed to request irq %d\n", ret);
1642                pci_free_irq_vectors(pdev);
1643        }
1644
1645        return ret;
1646}
1647
1648static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1649{
1650        devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1651        pci_free_irq_vectors(pdev);
1652}
1653
1654static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1655{
1656        struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1657        struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1658                                              priv);
1659        int work_done = 0;
1660
1661        while (work_done < budget) {
1662                u32 work_done_once;
1663
1664                work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1665                                                 budget - work_done);
1666                if (work_done_once == 0)
1667                        break;
1668                work_done += work_done_once;
1669        }
1670        if (work_done < budget) {
1671                napi_complete_done(napi, work_done);
1672                spin_lock_bh(&rtwpci->irq_lock);
1673                if (rtwpci->running)
1674                        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1675                spin_unlock_bh(&rtwpci->irq_lock);
1676                /* When ISR happens during polling and before napi_complete
1677                 * while no further data is received. Data on the dma_ring will
1678                 * not be processed immediately. Check whether dma ring is
1679                 * empty and perform napi_schedule accordingly.
1680                 */
1681                if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1682                        napi_schedule(napi);
1683        }
1684
1685        return work_done;
1686}
1687
1688static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1689{
1690        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1691
1692        init_dummy_netdev(&rtwpci->netdev);
1693        netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
1694                       RTW_NAPI_WEIGHT_NUM);
1695}
1696
1697static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1698{
1699        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1700
1701        rtw_pci_napi_stop(rtwdev);
1702        netif_napi_del(&rtwpci->napi);
1703}
1704
1705enum rtw88_quirk_dis_pci_caps {
1706        QUIRK_DIS_PCI_CAP_MSI,
1707        QUIRK_DIS_PCI_CAP_ASPM,
1708};
1709
1710static int disable_pci_caps(const struct dmi_system_id *dmi)
1711{
1712        uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
1713
1714        if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
1715                rtw_disable_msi = true;
1716        if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
1717                rtw_pci_disable_aspm = true;
1718
1719        return 1;
1720}
1721
1722static const struct dmi_system_id rtw88_pci_quirks[] = {
1723        {
1724                .callback = disable_pci_caps,
1725                .ident = "Protempo Ltd L116HTN6SPW",
1726                .matches = {
1727                        DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
1728                        DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
1729                },
1730                .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
1731        },
1732        {
1733                .callback = disable_pci_caps,
1734                .ident = "HP HP Pavilion Laptop 14-ce0xxx",
1735                .matches = {
1736                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1737                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"),
1738                },
1739                .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
1740        },
1741        {}
1742};
1743
1744int rtw_pci_probe(struct pci_dev *pdev,
1745                  const struct pci_device_id *id)
1746{
1747        struct ieee80211_hw *hw;
1748        struct rtw_dev *rtwdev;
1749        int drv_data_size;
1750        int ret;
1751
1752        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1753        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1754        if (!hw) {
1755                dev_err(&pdev->dev, "failed to allocate hw\n");
1756                return -ENOMEM;
1757        }
1758
1759        rtwdev = hw->priv;
1760        rtwdev->hw = hw;
1761        rtwdev->dev = &pdev->dev;
1762        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1763        rtwdev->hci.ops = &rtw_pci_ops;
1764        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1765
1766        ret = rtw_core_init(rtwdev);
1767        if (ret)
1768                goto err_release_hw;
1769
1770        rtw_dbg(rtwdev, RTW_DBG_PCI,
1771                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1772                pdev->vendor, pdev->device, pdev->revision);
1773
1774        ret = rtw_pci_claim(rtwdev, pdev);
1775        if (ret) {
1776                rtw_err(rtwdev, "failed to claim pci device\n");
1777                goto err_deinit_core;
1778        }
1779
1780        ret = rtw_pci_setup_resource(rtwdev, pdev);
1781        if (ret) {
1782                rtw_err(rtwdev, "failed to setup pci resources\n");
1783                goto err_pci_declaim;
1784        }
1785
1786        rtw_pci_napi_init(rtwdev);
1787
1788        ret = rtw_chip_info_setup(rtwdev);
1789        if (ret) {
1790                rtw_err(rtwdev, "failed to setup chip information\n");
1791                goto err_destroy_pci;
1792        }
1793
1794        dmi_check_system(rtw88_pci_quirks);
1795        rtw_pci_phy_cfg(rtwdev);
1796
1797        ret = rtw_register_hw(rtwdev, hw);
1798        if (ret) {
1799                rtw_err(rtwdev, "failed to register hw\n");
1800                goto err_destroy_pci;
1801        }
1802
1803        ret = rtw_pci_request_irq(rtwdev, pdev);
1804        if (ret) {
1805                ieee80211_unregister_hw(hw);
1806                goto err_destroy_pci;
1807        }
1808
1809        return 0;
1810
1811err_destroy_pci:
1812        rtw_pci_napi_deinit(rtwdev);
1813        rtw_pci_destroy(rtwdev, pdev);
1814
1815err_pci_declaim:
1816        rtw_pci_declaim(rtwdev, pdev);
1817
1818err_deinit_core:
1819        rtw_core_deinit(rtwdev);
1820
1821err_release_hw:
1822        ieee80211_free_hw(hw);
1823
1824        return ret;
1825}
1826EXPORT_SYMBOL(rtw_pci_probe);
1827
1828void rtw_pci_remove(struct pci_dev *pdev)
1829{
1830        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1831        struct rtw_dev *rtwdev;
1832        struct rtw_pci *rtwpci;
1833
1834        if (!hw)
1835                return;
1836
1837        rtwdev = hw->priv;
1838        rtwpci = (struct rtw_pci *)rtwdev->priv;
1839
1840        rtw_unregister_hw(rtwdev, hw);
1841        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1842        rtw_pci_napi_deinit(rtwdev);
1843        rtw_pci_destroy(rtwdev, pdev);
1844        rtw_pci_declaim(rtwdev, pdev);
1845        rtw_pci_free_irq(rtwdev, pdev);
1846        rtw_core_deinit(rtwdev);
1847        ieee80211_free_hw(hw);
1848}
1849EXPORT_SYMBOL(rtw_pci_remove);
1850
1851void rtw_pci_shutdown(struct pci_dev *pdev)
1852{
1853        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1854        struct rtw_dev *rtwdev;
1855        struct rtw_chip_info *chip;
1856
1857        if (!hw)
1858                return;
1859
1860        rtwdev = hw->priv;
1861        chip = rtwdev->chip;
1862
1863        if (chip->ops->shutdown)
1864                chip->ops->shutdown(rtwdev);
1865
1866        pci_set_power_state(pdev, PCI_D3hot);
1867}
1868EXPORT_SYMBOL(rtw_pci_shutdown);
1869
1870MODULE_AUTHOR("Realtek Corporation");
1871MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1872MODULE_LICENSE("Dual BSD/GPL");
1873