linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include "main.h"
   8#include "pci.h"
   9#include "tx.h"
  10#include "rx.h"
  11#include "debug.h"
  12
  13static u32 rtw_pci_tx_queue_idx_addr[] = {
  14        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  15        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  16        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  17        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  18        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  19        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  20        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  21};
  22
  23static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  24{
  25        switch (queue) {
  26        case RTW_TX_QUEUE_BCN:
  27                return TX_DESC_QSEL_BEACON;
  28        case RTW_TX_QUEUE_H2C:
  29                return TX_DESC_QSEL_H2C;
  30        case RTW_TX_QUEUE_MGMT:
  31                return TX_DESC_QSEL_MGMT;
  32        case RTW_TX_QUEUE_HI0:
  33                return TX_DESC_QSEL_HIGH;
  34        default:
  35                return skb->priority;
  36        }
  37};
  38
  39static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  40{
  41        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  42
  43        return readb(rtwpci->mmap + addr);
  44}
  45
  46static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  47{
  48        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  49
  50        return readw(rtwpci->mmap + addr);
  51}
  52
  53static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  54{
  55        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  56
  57        return readl(rtwpci->mmap + addr);
  58}
  59
  60static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  61{
  62        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  63
  64        writeb(val, rtwpci->mmap + addr);
  65}
  66
  67static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  68{
  69        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  70
  71        writew(val, rtwpci->mmap + addr);
  72}
  73
  74static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  75{
  76        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  77
  78        writel(val, rtwpci->mmap + addr);
  79}
  80
  81static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  82{
  83        int offset = tx_ring->r.desc_size * idx;
  84
  85        return tx_ring->r.head + offset;
  86}
  87
  88static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
  89                                 struct rtw_pci_tx_ring *tx_ring)
  90{
  91        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
  92        struct rtw_pci_tx_data *tx_data;
  93        struct sk_buff *skb, *tmp;
  94        dma_addr_t dma;
  95        u8 *head = tx_ring->r.head;
  96        u32 len = tx_ring->r.len;
  97        int ring_sz = len * tx_ring->r.desc_size;
  98
  99        /* free every skb remained in tx list */
 100        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 101                __skb_unlink(skb, &tx_ring->queue);
 102                tx_data = rtw_pci_get_tx_data(skb);
 103                dma = tx_data->dma;
 104
 105                pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
 106                dev_kfree_skb_any(skb);
 107        }
 108
 109        /* free the ring itself */
 110        pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
 111        tx_ring->r.head = NULL;
 112}
 113
 114static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 115                                 struct rtw_pci_rx_ring *rx_ring)
 116{
 117        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 118        struct sk_buff *skb;
 119        dma_addr_t dma;
 120        u8 *head = rx_ring->r.head;
 121        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 122        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 123        int i;
 124
 125        for (i = 0; i < rx_ring->r.len; i++) {
 126                skb = rx_ring->buf[i];
 127                if (!skb)
 128                        continue;
 129
 130                dma = *((dma_addr_t *)skb->cb);
 131                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 132                dev_kfree_skb(skb);
 133                rx_ring->buf[i] = NULL;
 134        }
 135
 136        pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
 137}
 138
 139static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 140{
 141        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 142        struct rtw_pci_tx_ring *tx_ring;
 143        struct rtw_pci_rx_ring *rx_ring;
 144        int i;
 145
 146        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 147                tx_ring = &rtwpci->tx_rings[i];
 148                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 149        }
 150
 151        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 152                rx_ring = &rtwpci->rx_rings[i];
 153                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 154        }
 155}
 156
 157static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 158                                struct rtw_pci_tx_ring *tx_ring,
 159                                u8 desc_size, u32 len)
 160{
 161        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 162        int ring_sz = desc_size * len;
 163        dma_addr_t dma;
 164        u8 *head;
 165
 166        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 167        if (!head) {
 168                rtw_err(rtwdev, "failed to allocate tx ring\n");
 169                return -ENOMEM;
 170        }
 171
 172        skb_queue_head_init(&tx_ring->queue);
 173        tx_ring->r.head = head;
 174        tx_ring->r.dma = dma;
 175        tx_ring->r.len = len;
 176        tx_ring->r.desc_size = desc_size;
 177        tx_ring->r.wp = 0;
 178        tx_ring->r.rp = 0;
 179
 180        return 0;
 181}
 182
 183static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 184                                 struct rtw_pci_rx_ring *rx_ring,
 185                                 u32 idx, u32 desc_sz)
 186{
 187        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 188        struct rtw_pci_rx_buffer_desc *buf_desc;
 189        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 190        dma_addr_t dma;
 191
 192        if (!skb)
 193                return -EINVAL;
 194
 195        dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
 196        if (pci_dma_mapping_error(pdev, dma))
 197                return -EBUSY;
 198
 199        *((dma_addr_t *)skb->cb) = dma;
 200        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 201                                                     idx * desc_sz);
 202        memset(buf_desc, 0, sizeof(*buf_desc));
 203        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 204        buf_desc->dma = cpu_to_le32(dma);
 205
 206        return 0;
 207}
 208
 209static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 210                                struct rtw_pci_rx_ring *rx_ring,
 211                                u8 desc_size, u32 len)
 212{
 213        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 214        struct sk_buff *skb = NULL;
 215        dma_addr_t dma;
 216        u8 *head;
 217        int ring_sz = desc_size * len;
 218        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 219        int i, allocated;
 220        int ret = 0;
 221
 222        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 223        if (!head) {
 224                rtw_err(rtwdev, "failed to allocate rx ring\n");
 225                return -ENOMEM;
 226        }
 227        rx_ring->r.head = head;
 228
 229        for (i = 0; i < len; i++) {
 230                skb = dev_alloc_skb(buf_sz);
 231                if (!skb) {
 232                        allocated = i;
 233                        ret = -ENOMEM;
 234                        goto err_out;
 235                }
 236
 237                memset(skb->data, 0, buf_sz);
 238                rx_ring->buf[i] = skb;
 239                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 240                if (ret) {
 241                        allocated = i;
 242                        dev_kfree_skb_any(skb);
 243                        goto err_out;
 244                }
 245        }
 246
 247        rx_ring->r.dma = dma;
 248        rx_ring->r.len = len;
 249        rx_ring->r.desc_size = desc_size;
 250        rx_ring->r.wp = 0;
 251        rx_ring->r.rp = 0;
 252
 253        return 0;
 254
 255err_out:
 256        for (i = 0; i < allocated; i++) {
 257                skb = rx_ring->buf[i];
 258                if (!skb)
 259                        continue;
 260                dma = *((dma_addr_t *)skb->cb);
 261                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 262                dev_kfree_skb_any(skb);
 263                rx_ring->buf[i] = NULL;
 264        }
 265        pci_free_consistent(pdev, ring_sz, head, dma);
 266
 267        rtw_err(rtwdev, "failed to init rx buffer\n");
 268
 269        return ret;
 270}
 271
 272static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 273{
 274        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 275        struct rtw_pci_tx_ring *tx_ring;
 276        struct rtw_pci_rx_ring *rx_ring;
 277        struct rtw_chip_info *chip = rtwdev->chip;
 278        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 279        int tx_desc_size, rx_desc_size;
 280        u32 len;
 281        int ret;
 282
 283        tx_desc_size = chip->tx_buf_desc_sz;
 284
 285        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 286                tx_ring = &rtwpci->tx_rings[i];
 287                len = max_num_of_tx_queue(i);
 288                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 289                if (ret)
 290                        goto out;
 291        }
 292
 293        rx_desc_size = chip->rx_buf_desc_sz;
 294
 295        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 296                rx_ring = &rtwpci->rx_rings[j];
 297                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 298                                           RTK_MAX_RX_DESC_NUM);
 299                if (ret)
 300                        goto out;
 301        }
 302
 303        return 0;
 304
 305out:
 306        tx_alloced = i;
 307        for (i = 0; i < tx_alloced; i++) {
 308                tx_ring = &rtwpci->tx_rings[i];
 309                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 310        }
 311
 312        rx_alloced = j;
 313        for (j = 0; j < rx_alloced; j++) {
 314                rx_ring = &rtwpci->rx_rings[j];
 315                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 316        }
 317
 318        return ret;
 319}
 320
 321static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 322{
 323        rtw_pci_free_trx_ring(rtwdev);
 324}
 325
 326static int rtw_pci_init(struct rtw_dev *rtwdev)
 327{
 328        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 329        int ret = 0;
 330
 331        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 332                              IMR_MGNTDOK |
 333                              IMR_BKDOK |
 334                              IMR_BEDOK |
 335                              IMR_VIDOK |
 336                              IMR_VODOK |
 337                              IMR_ROK |
 338                              IMR_BCNDMAINT_E |
 339                              0;
 340        rtwpci->irq_mask[1] = IMR_TXFOVW |
 341                              0;
 342        rtwpci->irq_mask[3] = IMR_H2CDOK |
 343                              0;
 344        spin_lock_init(&rtwpci->irq_lock);
 345        ret = rtw_pci_init_trx_ring(rtwdev);
 346
 347        return ret;
 348}
 349
 350static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 351{
 352        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 353        u32 len;
 354        u8 tmp;
 355        dma_addr_t dma;
 356
 357        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 358        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 359
 360        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 361        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 362
 363        len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 364        dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 365        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 366        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 367        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
 368        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 369
 370        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 371        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 372        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 373        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 374        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
 375        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 376
 377        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 378        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 379        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 380        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 381        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
 382        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 383
 384        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 385        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 386        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 387        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 388        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
 389        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 390
 391        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 392        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 393        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 394        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 395        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
 396        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 397
 398        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 399        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 400        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 401        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 402        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
 403        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 404
 405        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 406        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 407        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 408        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 409        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
 410        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 411
 412        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 413        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 414        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 415        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 416        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
 417        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 418
 419        /* reset read/write point */
 420        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 421
 422        /* rest H2C Queue index */
 423        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
 424        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
 425}
 426
 427static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 428{
 429        rtw_pci_reset_buf_desc(rtwdev);
 430}
 431
 432static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 433                                     struct rtw_pci *rtwpci)
 434{
 435        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
 436        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 437        rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 438        rtwpci->irq_enabled = true;
 439}
 440
 441static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 442                                      struct rtw_pci *rtwpci)
 443{
 444        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 445        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 446        rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 447        rtwpci->irq_enabled = false;
 448}
 449
 450static int rtw_pci_setup(struct rtw_dev *rtwdev)
 451{
 452        rtw_pci_reset_trx_ring(rtwdev);
 453
 454        return 0;
 455}
 456
 457static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 458{
 459        /* reset dma and rx tag */
 460        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 461                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 462        rtwpci->rx_tag = 0;
 463}
 464
 465static int rtw_pci_start(struct rtw_dev *rtwdev)
 466{
 467        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 468        unsigned long flags;
 469
 470        rtw_pci_dma_reset(rtwdev, rtwpci);
 471
 472        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 473        rtw_pci_enable_interrupt(rtwdev, rtwpci);
 474        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 475
 476        return 0;
 477}
 478
 479static void rtw_pci_stop(struct rtw_dev *rtwdev)
 480{
 481        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 482        unsigned long flags;
 483
 484        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 485        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 486        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 487}
 488
 489static u8 ac_to_hwq[] = {
 490        [0] = RTW_TX_QUEUE_VO,
 491        [1] = RTW_TX_QUEUE_VI,
 492        [2] = RTW_TX_QUEUE_BE,
 493        [3] = RTW_TX_QUEUE_BK,
 494};
 495
 496static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 497{
 498        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 499        __le16 fc = hdr->frame_control;
 500        u8 q_mapping = skb_get_queue_mapping(skb);
 501        u8 queue;
 502
 503        if (unlikely(ieee80211_is_beacon(fc)))
 504                queue = RTW_TX_QUEUE_BCN;
 505        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 506                queue = RTW_TX_QUEUE_MGMT;
 507        else
 508                queue = ac_to_hwq[q_mapping];
 509
 510        return queue;
 511}
 512
 513static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 514                                      struct rtw_pci_tx_ring *ring)
 515{
 516        struct sk_buff *prev = skb_dequeue(&ring->queue);
 517        struct rtw_pci_tx_data *tx_data;
 518        dma_addr_t dma;
 519
 520        if (!prev)
 521                return;
 522
 523        tx_data = rtw_pci_get_tx_data(prev);
 524        dma = tx_data->dma;
 525        pci_unmap_single(rtwpci->pdev, dma, prev->len,
 526                         PCI_DMA_TODEVICE);
 527        dev_kfree_skb_any(prev);
 528}
 529
 530static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 531                              struct rtw_pci_rx_ring *rx_ring,
 532                              u32 idx)
 533{
 534        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 535        struct rtw_chip_info *chip = rtwdev->chip;
 536        struct rtw_pci_rx_buffer_desc *buf_desc;
 537        u32 desc_sz = chip->rx_buf_desc_sz;
 538        u16 total_pkt_size;
 539
 540        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 541                                                     idx * desc_sz);
 542        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 543
 544        /* rx tag mismatch, throw a warning */
 545        if (total_pkt_size != rtwpci->rx_tag)
 546                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 547
 548        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 549}
 550
 551static int rtw_pci_xmit(struct rtw_dev *rtwdev,
 552                        struct rtw_tx_pkt_info *pkt_info,
 553                        struct sk_buff *skb, u8 queue)
 554{
 555        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 556        struct rtw_chip_info *chip = rtwdev->chip;
 557        struct rtw_pci_tx_ring *ring;
 558        struct rtw_pci_tx_data *tx_data;
 559        dma_addr_t dma;
 560        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 561        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 562        u32 size;
 563        u32 psb_len;
 564        u8 *pkt_desc;
 565        struct rtw_pci_tx_buffer_desc *buf_desc;
 566        u32 bd_idx;
 567
 568        ring = &rtwpci->tx_rings[queue];
 569
 570        size = skb->len;
 571
 572        if (queue == RTW_TX_QUEUE_BCN)
 573                rtw_pci_release_rsvd_page(rtwpci, ring);
 574        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 575                return -ENOSPC;
 576
 577        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 578        memset(pkt_desc, 0, tx_pkt_desc_sz);
 579        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 580        rtw_tx_fill_tx_desc(pkt_info, skb);
 581        dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
 582                             PCI_DMA_TODEVICE);
 583        if (pci_dma_mapping_error(rtwpci->pdev, dma))
 584                return -EBUSY;
 585
 586        /* after this we got dma mapped, there is no way back */
 587        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 588        memset(buf_desc, 0, tx_buf_desc_sz);
 589        psb_len = (skb->len - 1) / 128 + 1;
 590        if (queue == RTW_TX_QUEUE_BCN)
 591                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 592
 593        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 594        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 595        buf_desc[0].dma = cpu_to_le32(dma);
 596        buf_desc[1].buf_size = cpu_to_le16(size);
 597        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 598
 599        tx_data = rtw_pci_get_tx_data(skb);
 600        tx_data->dma = dma;
 601        tx_data->sn = pkt_info->sn;
 602        skb_queue_tail(&ring->queue, skb);
 603
 604        /* kick off tx queue */
 605        if (queue != RTW_TX_QUEUE_BCN) {
 606                if (++ring->r.wp >= ring->r.len)
 607                        ring->r.wp = 0;
 608                bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 609                rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
 610        } else {
 611                u32 reg_bcn_work;
 612
 613                reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 614                reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 615                rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 616        }
 617
 618        return 0;
 619}
 620
 621static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 622                                        u32 size)
 623{
 624        struct sk_buff *skb;
 625        struct rtw_tx_pkt_info pkt_info;
 626        u32 tx_pkt_desc_sz;
 627        u32 length;
 628
 629        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 630        length = size + tx_pkt_desc_sz;
 631        skb = dev_alloc_skb(length);
 632        if (!skb)
 633                return -ENOMEM;
 634
 635        skb_reserve(skb, tx_pkt_desc_sz);
 636        memcpy((u8 *)skb_put(skb, size), buf, size);
 637        memset(&pkt_info, 0, sizeof(pkt_info));
 638        pkt_info.tx_pkt_size = size;
 639        pkt_info.offset = tx_pkt_desc_sz;
 640
 641        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 642}
 643
 644static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 645{
 646        struct sk_buff *skb;
 647        struct rtw_tx_pkt_info pkt_info;
 648        u32 tx_pkt_desc_sz;
 649        u32 length;
 650
 651        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 652        length = size + tx_pkt_desc_sz;
 653        skb = dev_alloc_skb(length);
 654        if (!skb)
 655                return -ENOMEM;
 656
 657        skb_reserve(skb, tx_pkt_desc_sz);
 658        memcpy((u8 *)skb_put(skb, size), buf, size);
 659        memset(&pkt_info, 0, sizeof(pkt_info));
 660        pkt_info.tx_pkt_size = size;
 661
 662        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 663}
 664
 665static int rtw_pci_tx(struct rtw_dev *rtwdev,
 666                      struct rtw_tx_pkt_info *pkt_info,
 667                      struct sk_buff *skb)
 668{
 669        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 670        struct rtw_pci_tx_ring *ring;
 671        u8 queue = rtw_hw_queue_mapping(skb);
 672        int ret;
 673
 674        ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
 675        if (ret)
 676                return ret;
 677
 678        ring = &rtwpci->tx_rings[queue];
 679        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 680                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 681                ring->queue_stopped = true;
 682        }
 683
 684        return 0;
 685}
 686
 687static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 688                           u8 hw_queue)
 689{
 690        struct ieee80211_hw *hw = rtwdev->hw;
 691        struct ieee80211_tx_info *info;
 692        struct rtw_pci_tx_ring *ring;
 693        struct rtw_pci_tx_data *tx_data;
 694        struct sk_buff *skb;
 695        u32 count;
 696        u32 bd_idx_addr;
 697        u32 bd_idx, cur_rp;
 698        u16 q_map;
 699
 700        ring = &rtwpci->tx_rings[hw_queue];
 701
 702        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 703        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 704        cur_rp = bd_idx >> 16;
 705        cur_rp &= 0xfff;
 706        if (cur_rp >= ring->r.rp)
 707                count = cur_rp - ring->r.rp;
 708        else
 709                count = ring->r.len - (ring->r.rp - cur_rp);
 710
 711        while (count--) {
 712                skb = skb_dequeue(&ring->queue);
 713                tx_data = rtw_pci_get_tx_data(skb);
 714                pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
 715                                 PCI_DMA_TODEVICE);
 716
 717                /* just free command packets from host to card */
 718                if (hw_queue == RTW_TX_QUEUE_H2C) {
 719                        dev_kfree_skb_irq(skb);
 720                        continue;
 721                }
 722
 723                if (ring->queue_stopped &&
 724                    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
 725                        q_map = skb_get_queue_mapping(skb);
 726                        ieee80211_wake_queue(hw, q_map);
 727                        ring->queue_stopped = false;
 728                }
 729
 730                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
 731
 732                info = IEEE80211_SKB_CB(skb);
 733
 734                /* enqueue to wait for tx report */
 735                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 736                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
 737                        continue;
 738                }
 739
 740                /* always ACK for others, then they won't be marked as drop */
 741                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 742                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 743                else
 744                        info->flags |= IEEE80211_TX_STAT_ACK;
 745
 746                ieee80211_tx_info_clear_status(info);
 747                ieee80211_tx_status_irqsafe(hw, skb);
 748        }
 749
 750        ring->r.rp = cur_rp;
 751}
 752
 753static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 754                           u8 hw_queue)
 755{
 756        struct rtw_chip_info *chip = rtwdev->chip;
 757        struct rtw_pci_rx_ring *ring;
 758        struct rtw_rx_pkt_stat pkt_stat;
 759        struct ieee80211_rx_status rx_status;
 760        struct sk_buff *skb, *new;
 761        u32 cur_wp, cur_rp, tmp;
 762        u32 count;
 763        u32 pkt_offset;
 764        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
 765        u32 buf_desc_sz = chip->rx_buf_desc_sz;
 766        u8 *rx_desc;
 767        dma_addr_t dma;
 768
 769        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
 770
 771        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
 772        cur_wp = tmp >> 16;
 773        cur_wp &= 0xfff;
 774        if (cur_wp >= ring->r.wp)
 775                count = cur_wp - ring->r.wp;
 776        else
 777                count = ring->r.len - (ring->r.wp - cur_wp);
 778
 779        cur_rp = ring->r.rp;
 780        while (count--) {
 781                rtw_pci_dma_check(rtwdev, ring, cur_rp);
 782                skb = ring->buf[cur_rp];
 783                dma = *((dma_addr_t *)skb->cb);
 784                pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
 785                                 PCI_DMA_FROMDEVICE);
 786                rx_desc = skb->data;
 787                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
 788
 789                /* offset from rx_desc to payload */
 790                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
 791                             pkt_stat.shift;
 792
 793                if (pkt_stat.is_c2h) {
 794                        /* keep rx_desc, halmac needs it */
 795                        skb_put(skb, pkt_stat.pkt_len + pkt_offset);
 796
 797                        /* pass offset for further operation */
 798                        *((u32 *)skb->cb) = pkt_offset;
 799                        skb_queue_tail(&rtwdev->c2h_queue, skb);
 800                        ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
 801                } else {
 802                        /* remove rx_desc, maybe use skb_pull? */
 803                        skb_put(skb, pkt_stat.pkt_len);
 804                        skb_reserve(skb, pkt_offset);
 805
 806                        /* alloc a smaller skb to mac80211 */
 807                        new = dev_alloc_skb(pkt_stat.pkt_len);
 808                        if (!new) {
 809                                new = skb;
 810                        } else {
 811                                skb_put_data(new, skb->data, skb->len);
 812                                dev_kfree_skb_any(skb);
 813                        }
 814                        /* TODO: merge into rx.c */
 815                        rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
 816                        memcpy(new->cb, &rx_status, sizeof(rx_status));
 817                        ieee80211_rx_irqsafe(rtwdev->hw, new);
 818                }
 819
 820                /* skb delivered to mac80211, alloc a new one in rx ring */
 821                new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
 822                if (WARN(!new, "rx routine starvation\n"))
 823                        return;
 824
 825                ring->buf[cur_rp] = new;
 826                rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
 827
 828                /* host read next element in ring */
 829                if (++cur_rp >= ring->r.len)
 830                        cur_rp = 0;
 831        }
 832
 833        ring->r.rp = cur_rp;
 834        ring->r.wp = cur_wp;
 835        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
 836}
 837
 838static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
 839                                   struct rtw_pci *rtwpci, u32 *irq_status)
 840{
 841        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
 842        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
 843        irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
 844        irq_status[0] &= rtwpci->irq_mask[0];
 845        irq_status[1] &= rtwpci->irq_mask[1];
 846        irq_status[3] &= rtwpci->irq_mask[3];
 847        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
 848        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
 849        rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
 850}
 851
 852static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
 853{
 854        struct rtw_dev *rtwdev = dev;
 855        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 856        u32 irq_status[4];
 857
 858        spin_lock(&rtwpci->irq_lock);
 859        if (!rtwpci->irq_enabled)
 860                goto out;
 861
 862        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
 863
 864        if (irq_status[0] & IMR_MGNTDOK)
 865                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
 866        if (irq_status[0] & IMR_HIGHDOK)
 867                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
 868        if (irq_status[0] & IMR_BEDOK)
 869                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
 870        if (irq_status[0] & IMR_BKDOK)
 871                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
 872        if (irq_status[0] & IMR_VODOK)
 873                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
 874        if (irq_status[0] & IMR_VIDOK)
 875                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
 876        if (irq_status[3] & IMR_H2CDOK)
 877                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
 878        if (irq_status[0] & IMR_ROK)
 879                rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
 880
 881out:
 882        spin_unlock(&rtwpci->irq_lock);
 883
 884        return IRQ_HANDLED;
 885}
 886
 887static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
 888                              struct pci_dev *pdev)
 889{
 890        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 891        unsigned long len;
 892        u8 bar_id = 2;
 893        int ret;
 894
 895        ret = pci_request_regions(pdev, KBUILD_MODNAME);
 896        if (ret) {
 897                rtw_err(rtwdev, "failed to request pci regions\n");
 898                return ret;
 899        }
 900
 901        len = pci_resource_len(pdev, bar_id);
 902        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
 903        if (!rtwpci->mmap) {
 904                rtw_err(rtwdev, "failed to map pci memory\n");
 905                return -ENOMEM;
 906        }
 907
 908        return 0;
 909}
 910
 911static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
 912                                 struct pci_dev *pdev)
 913{
 914        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 915
 916        if (rtwpci->mmap) {
 917                pci_iounmap(pdev, rtwpci->mmap);
 918                pci_release_regions(pdev);
 919        }
 920}
 921
 922static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
 923{
 924        u16 write_addr;
 925        u16 remainder = addr & 0x3;
 926        u8 flag;
 927        u8 cnt = 20;
 928
 929        write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
 930        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
 931        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
 932        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
 933
 934        flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
 935        while (flag && (cnt != 0)) {
 936                udelay(10);
 937                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
 938                cnt--;
 939        }
 940
 941        WARN(flag, "DBI write fail\n");
 942}
 943
 944static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
 945{
 946        u8 page;
 947        u8 wflag;
 948        u8 cnt;
 949
 950        rtw_write16(rtwdev, REG_MDIO_V1, data);
 951
 952        page = addr < 0x20 ? 0 : 1;
 953        page += g1 ? 0 : 2;
 954        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
 955        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
 956
 957        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
 958        wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
 959
 960        cnt = 20;
 961        while (wflag && (cnt != 0)) {
 962                udelay(10);
 963                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
 964                                        BIT_MDIO_WFLAG_V1);
 965                cnt--;
 966        }
 967
 968        WARN(wflag, "MDIO write fail\n");
 969}
 970
 971static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
 972{
 973        struct rtw_chip_info *chip = rtwdev->chip;
 974        struct rtw_intf_phy_para *para;
 975        u16 cut;
 976        u16 value;
 977        u16 offset;
 978        u16 ip_sel;
 979        int i;
 980
 981        cut = BIT(0) << rtwdev->hal.cut_version;
 982
 983        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
 984                para = &chip->intf_table->gen1_para[i];
 985                if (!(para->cut_mask & cut))
 986                        continue;
 987                if (para->offset == 0xffff)
 988                        break;
 989                offset = para->offset;
 990                value = para->value;
 991                ip_sel = para->ip_sel;
 992                if (para->ip_sel == RTW_IP_SEL_PHY)
 993                        rtw_mdio_write(rtwdev, offset, value, true);
 994                else
 995                        rtw_dbi_write8(rtwdev, offset, value);
 996        }
 997
 998        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
 999                para = &chip->intf_table->gen2_para[i];
1000                if (!(para->cut_mask & cut))
1001                        continue;
1002                if (para->offset == 0xffff)
1003                        break;
1004                offset = para->offset;
1005                value = para->value;
1006                ip_sel = para->ip_sel;
1007                if (para->ip_sel == RTW_IP_SEL_PHY)
1008                        rtw_mdio_write(rtwdev, offset, value, false);
1009                else
1010                        rtw_dbi_write8(rtwdev, offset, value);
1011        }
1012}
1013
1014static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1015{
1016        int ret;
1017
1018        ret = pci_enable_device(pdev);
1019        if (ret) {
1020                rtw_err(rtwdev, "failed to enable pci device\n");
1021                return ret;
1022        }
1023
1024        pci_set_master(pdev);
1025        pci_set_drvdata(pdev, rtwdev->hw);
1026        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1027
1028        return 0;
1029}
1030
1031static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1032{
1033        pci_clear_master(pdev);
1034        pci_disable_device(pdev);
1035}
1036
1037static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1038{
1039        struct rtw_pci *rtwpci;
1040        int ret;
1041
1042        rtwpci = (struct rtw_pci *)rtwdev->priv;
1043        rtwpci->pdev = pdev;
1044
1045        /* after this driver can access to hw registers */
1046        ret = rtw_pci_io_mapping(rtwdev, pdev);
1047        if (ret) {
1048                rtw_err(rtwdev, "failed to request pci io region\n");
1049                goto err_out;
1050        }
1051
1052        ret = rtw_pci_init(rtwdev);
1053        if (ret) {
1054                rtw_err(rtwdev, "failed to allocate pci resources\n");
1055                goto err_io_unmap;
1056        }
1057
1058        rtw_pci_phy_cfg(rtwdev);
1059
1060        return 0;
1061
1062err_io_unmap:
1063        rtw_pci_io_unmapping(rtwdev, pdev);
1064
1065err_out:
1066        return ret;
1067}
1068
1069static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1070{
1071        rtw_pci_deinit(rtwdev);
1072        rtw_pci_io_unmapping(rtwdev, pdev);
1073}
1074
1075static struct rtw_hci_ops rtw_pci_ops = {
1076        .tx = rtw_pci_tx,
1077        .setup = rtw_pci_setup,
1078        .start = rtw_pci_start,
1079        .stop = rtw_pci_stop,
1080
1081        .read8 = rtw_pci_read8,
1082        .read16 = rtw_pci_read16,
1083        .read32 = rtw_pci_read32,
1084        .write8 = rtw_pci_write8,
1085        .write16 = rtw_pci_write16,
1086        .write32 = rtw_pci_write32,
1087        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1088        .write_data_h2c = rtw_pci_write_data_h2c,
1089};
1090
1091static int rtw_pci_probe(struct pci_dev *pdev,
1092                         const struct pci_device_id *id)
1093{
1094        struct ieee80211_hw *hw;
1095        struct rtw_dev *rtwdev;
1096        int drv_data_size;
1097        int ret;
1098
1099        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1100        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1101        if (!hw) {
1102                dev_err(&pdev->dev, "failed to allocate hw\n");
1103                return -ENOMEM;
1104        }
1105
1106        rtwdev = hw->priv;
1107        rtwdev->hw = hw;
1108        rtwdev->dev = &pdev->dev;
1109        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1110        rtwdev->hci.ops = &rtw_pci_ops;
1111        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1112
1113        ret = rtw_core_init(rtwdev);
1114        if (ret)
1115                goto err_release_hw;
1116
1117        rtw_dbg(rtwdev, RTW_DBG_PCI,
1118                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1119                pdev->vendor, pdev->device, pdev->revision);
1120
1121        ret = rtw_pci_claim(rtwdev, pdev);
1122        if (ret) {
1123                rtw_err(rtwdev, "failed to claim pci device\n");
1124                goto err_deinit_core;
1125        }
1126
1127        ret = rtw_pci_setup_resource(rtwdev, pdev);
1128        if (ret) {
1129                rtw_err(rtwdev, "failed to setup pci resources\n");
1130                goto err_pci_declaim;
1131        }
1132
1133        ret = rtw_chip_info_setup(rtwdev);
1134        if (ret) {
1135                rtw_err(rtwdev, "failed to setup chip information\n");
1136                goto err_destroy_pci;
1137        }
1138
1139        ret = rtw_register_hw(rtwdev, hw);
1140        if (ret) {
1141                rtw_err(rtwdev, "failed to register hw\n");
1142                goto err_destroy_pci;
1143        }
1144
1145        ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler,
1146                          IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1147        if (ret) {
1148                ieee80211_unregister_hw(hw);
1149                goto err_destroy_pci;
1150        }
1151
1152        return 0;
1153
1154err_destroy_pci:
1155        rtw_pci_destroy(rtwdev, pdev);
1156
1157err_pci_declaim:
1158        rtw_pci_declaim(rtwdev, pdev);
1159
1160err_deinit_core:
1161        rtw_core_deinit(rtwdev);
1162
1163err_release_hw:
1164        ieee80211_free_hw(hw);
1165
1166        return ret;
1167}
1168
1169static void rtw_pci_remove(struct pci_dev *pdev)
1170{
1171        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1172        struct rtw_dev *rtwdev;
1173        struct rtw_pci *rtwpci;
1174
1175        if (!hw)
1176                return;
1177
1178        rtwdev = hw->priv;
1179        rtwpci = (struct rtw_pci *)rtwdev->priv;
1180
1181        rtw_unregister_hw(rtwdev, hw);
1182        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1183        rtw_pci_destroy(rtwdev, pdev);
1184        rtw_pci_declaim(rtwdev, pdev);
1185        free_irq(rtwpci->pdev->irq, rtwdev);
1186        rtw_core_deinit(rtwdev);
1187        ieee80211_free_hw(hw);
1188}
1189
1190static const struct pci_device_id rtw_pci_id_table[] = {
1191#ifdef CONFIG_RTW88_8822BE
1192        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1193#endif
1194#ifdef CONFIG_RTW88_8822CE
1195        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1196#endif
1197        {},
1198};
1199MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1200
1201static struct pci_driver rtw_pci_driver = {
1202        .name = "rtw_pci",
1203        .id_table = rtw_pci_id_table,
1204        .probe = rtw_pci_probe,
1205        .remove = rtw_pci_remove,
1206};
1207module_pci_driver(rtw_pci_driver);
1208
1209MODULE_AUTHOR("Realtek Corporation");
1210MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1211MODULE_LICENSE("Dual BSD/GPL");
1212