linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include "main.h"
   8#include "pci.h"
   9#include "tx.h"
  10#include "rx.h"
  11#include "debug.h"
  12
  13static u32 rtw_pci_tx_queue_idx_addr[] = {
  14        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  15        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  16        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  17        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  18        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  19        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  20        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  21};
  22
  23static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  24{
  25        switch (queue) {
  26        case RTW_TX_QUEUE_BCN:
  27                return TX_DESC_QSEL_BEACON;
  28        case RTW_TX_QUEUE_H2C:
  29                return TX_DESC_QSEL_H2C;
  30        case RTW_TX_QUEUE_MGMT:
  31                return TX_DESC_QSEL_MGMT;
  32        case RTW_TX_QUEUE_HI0:
  33                return TX_DESC_QSEL_HIGH;
  34        default:
  35                return skb->priority;
  36        }
  37};
  38
  39static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  40{
  41        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  42
  43        return readb(rtwpci->mmap + addr);
  44}
  45
  46static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  47{
  48        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  49
  50        return readw(rtwpci->mmap + addr);
  51}
  52
  53static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  54{
  55        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  56
  57        return readl(rtwpci->mmap + addr);
  58}
  59
  60static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  61{
  62        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  63
  64        writeb(val, rtwpci->mmap + addr);
  65}
  66
  67static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  68{
  69        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  70
  71        writew(val, rtwpci->mmap + addr);
  72}
  73
  74static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  75{
  76        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  77
  78        writel(val, rtwpci->mmap + addr);
  79}
  80
  81static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  82{
  83        int offset = tx_ring->r.desc_size * idx;
  84
  85        return tx_ring->r.head + offset;
  86}
  87
  88static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
  89                                 struct rtw_pci_tx_ring *tx_ring)
  90{
  91        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
  92        struct rtw_pci_tx_data *tx_data;
  93        struct sk_buff *skb, *tmp;
  94        dma_addr_t dma;
  95        u8 *head = tx_ring->r.head;
  96        u32 len = tx_ring->r.len;
  97        int ring_sz = len * tx_ring->r.desc_size;
  98
  99        /* free every skb remained in tx list */
 100        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 101                __skb_unlink(skb, &tx_ring->queue);
 102                tx_data = rtw_pci_get_tx_data(skb);
 103                dma = tx_data->dma;
 104
 105                pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
 106                dev_kfree_skb_any(skb);
 107        }
 108
 109        /* free the ring itself */
 110        pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
 111        tx_ring->r.head = NULL;
 112}
 113
 114static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 115                                 struct rtw_pci_rx_ring *rx_ring)
 116{
 117        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 118        struct sk_buff *skb;
 119        dma_addr_t dma;
 120        u8 *head = rx_ring->r.head;
 121        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 122        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 123        int i;
 124
 125        for (i = 0; i < rx_ring->r.len; i++) {
 126                skb = rx_ring->buf[i];
 127                if (!skb)
 128                        continue;
 129
 130                dma = *((dma_addr_t *)skb->cb);
 131                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 132                dev_kfree_skb(skb);
 133                rx_ring->buf[i] = NULL;
 134        }
 135
 136        pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
 137}
 138
 139static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 140{
 141        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 142        struct rtw_pci_tx_ring *tx_ring;
 143        struct rtw_pci_rx_ring *rx_ring;
 144        int i;
 145
 146        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 147                tx_ring = &rtwpci->tx_rings[i];
 148                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 149        }
 150
 151        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 152                rx_ring = &rtwpci->rx_rings[i];
 153                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 154        }
 155}
 156
 157static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 158                                struct rtw_pci_tx_ring *tx_ring,
 159                                u8 desc_size, u32 len)
 160{
 161        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 162        int ring_sz = desc_size * len;
 163        dma_addr_t dma;
 164        u8 *head;
 165
 166        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 167        if (!head) {
 168                rtw_err(rtwdev, "failed to allocate tx ring\n");
 169                return -ENOMEM;
 170        }
 171
 172        skb_queue_head_init(&tx_ring->queue);
 173        tx_ring->r.head = head;
 174        tx_ring->r.dma = dma;
 175        tx_ring->r.len = len;
 176        tx_ring->r.desc_size = desc_size;
 177        tx_ring->r.wp = 0;
 178        tx_ring->r.rp = 0;
 179
 180        return 0;
 181}
 182
 183static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 184                                 struct rtw_pci_rx_ring *rx_ring,
 185                                 u32 idx, u32 desc_sz)
 186{
 187        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 188        struct rtw_pci_rx_buffer_desc *buf_desc;
 189        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 190        dma_addr_t dma;
 191
 192        if (!skb)
 193                return -EINVAL;
 194
 195        dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
 196        if (pci_dma_mapping_error(pdev, dma))
 197                return -EBUSY;
 198
 199        *((dma_addr_t *)skb->cb) = dma;
 200        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 201                                                     idx * desc_sz);
 202        memset(buf_desc, 0, sizeof(*buf_desc));
 203        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 204        buf_desc->dma = cpu_to_le32(dma);
 205
 206        return 0;
 207}
 208
 209static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 210                                        struct rtw_pci_rx_ring *rx_ring,
 211                                        u32 idx, u32 desc_sz)
 212{
 213        struct device *dev = rtwdev->dev;
 214        struct rtw_pci_rx_buffer_desc *buf_desc;
 215        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 216
 217        dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 218
 219        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 220                                                     idx * desc_sz);
 221        memset(buf_desc, 0, sizeof(*buf_desc));
 222        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 223        buf_desc->dma = cpu_to_le32(dma);
 224}
 225
 226static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 227                                struct rtw_pci_rx_ring *rx_ring,
 228                                u8 desc_size, u32 len)
 229{
 230        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 231        struct sk_buff *skb = NULL;
 232        dma_addr_t dma;
 233        u8 *head;
 234        int ring_sz = desc_size * len;
 235        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 236        int i, allocated;
 237        int ret = 0;
 238
 239        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 240        if (!head) {
 241                rtw_err(rtwdev, "failed to allocate rx ring\n");
 242                return -ENOMEM;
 243        }
 244        rx_ring->r.head = head;
 245
 246        for (i = 0; i < len; i++) {
 247                skb = dev_alloc_skb(buf_sz);
 248                if (!skb) {
 249                        allocated = i;
 250                        ret = -ENOMEM;
 251                        goto err_out;
 252                }
 253
 254                memset(skb->data, 0, buf_sz);
 255                rx_ring->buf[i] = skb;
 256                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 257                if (ret) {
 258                        allocated = i;
 259                        dev_kfree_skb_any(skb);
 260                        goto err_out;
 261                }
 262        }
 263
 264        rx_ring->r.dma = dma;
 265        rx_ring->r.len = len;
 266        rx_ring->r.desc_size = desc_size;
 267        rx_ring->r.wp = 0;
 268        rx_ring->r.rp = 0;
 269
 270        return 0;
 271
 272err_out:
 273        for (i = 0; i < allocated; i++) {
 274                skb = rx_ring->buf[i];
 275                if (!skb)
 276                        continue;
 277                dma = *((dma_addr_t *)skb->cb);
 278                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 279                dev_kfree_skb_any(skb);
 280                rx_ring->buf[i] = NULL;
 281        }
 282        pci_free_consistent(pdev, ring_sz, head, dma);
 283
 284        rtw_err(rtwdev, "failed to init rx buffer\n");
 285
 286        return ret;
 287}
 288
 289static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 290{
 291        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 292        struct rtw_pci_tx_ring *tx_ring;
 293        struct rtw_pci_rx_ring *rx_ring;
 294        struct rtw_chip_info *chip = rtwdev->chip;
 295        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 296        int tx_desc_size, rx_desc_size;
 297        u32 len;
 298        int ret;
 299
 300        tx_desc_size = chip->tx_buf_desc_sz;
 301
 302        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 303                tx_ring = &rtwpci->tx_rings[i];
 304                len = max_num_of_tx_queue(i);
 305                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 306                if (ret)
 307                        goto out;
 308        }
 309
 310        rx_desc_size = chip->rx_buf_desc_sz;
 311
 312        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 313                rx_ring = &rtwpci->rx_rings[j];
 314                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 315                                           RTK_MAX_RX_DESC_NUM);
 316                if (ret)
 317                        goto out;
 318        }
 319
 320        return 0;
 321
 322out:
 323        tx_alloced = i;
 324        for (i = 0; i < tx_alloced; i++) {
 325                tx_ring = &rtwpci->tx_rings[i];
 326                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 327        }
 328
 329        rx_alloced = j;
 330        for (j = 0; j < rx_alloced; j++) {
 331                rx_ring = &rtwpci->rx_rings[j];
 332                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 333        }
 334
 335        return ret;
 336}
 337
 338static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 339{
 340        rtw_pci_free_trx_ring(rtwdev);
 341}
 342
 343static int rtw_pci_init(struct rtw_dev *rtwdev)
 344{
 345        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 346        int ret = 0;
 347
 348        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 349                              IMR_MGNTDOK |
 350                              IMR_BKDOK |
 351                              IMR_BEDOK |
 352                              IMR_VIDOK |
 353                              IMR_VODOK |
 354                              IMR_ROK |
 355                              IMR_BCNDMAINT_E |
 356                              0;
 357        rtwpci->irq_mask[1] = IMR_TXFOVW |
 358                              0;
 359        rtwpci->irq_mask[3] = IMR_H2CDOK |
 360                              0;
 361        spin_lock_init(&rtwpci->irq_lock);
 362        ret = rtw_pci_init_trx_ring(rtwdev);
 363
 364        return ret;
 365}
 366
 367static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 368{
 369        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 370        u32 len;
 371        u8 tmp;
 372        dma_addr_t dma;
 373
 374        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 375        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 376
 377        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 378        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 379
 380        len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 381        dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 382        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 383        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 384        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
 385        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 386
 387        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 388        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 389        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 390        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 391        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
 392        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 393
 394        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 395        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 396        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 397        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 398        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
 399        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 400
 401        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 402        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 403        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 404        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 405        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
 406        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 407
 408        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 409        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 410        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 411        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 412        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
 413        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 414
 415        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 416        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 417        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 418        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 419        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
 420        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 421
 422        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 423        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 424        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 425        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 426        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
 427        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 428
 429        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 430        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 431        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 432        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 433        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
 434        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 435
 436        /* reset read/write point */
 437        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 438
 439        /* rest H2C Queue index */
 440        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
 441        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
 442}
 443
 444static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 445{
 446        rtw_pci_reset_buf_desc(rtwdev);
 447}
 448
 449static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 450                                     struct rtw_pci *rtwpci)
 451{
 452        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
 453        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 454        rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 455        rtwpci->irq_enabled = true;
 456}
 457
 458static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 459                                      struct rtw_pci *rtwpci)
 460{
 461        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 462        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 463        rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 464        rtwpci->irq_enabled = false;
 465}
 466
 467static int rtw_pci_setup(struct rtw_dev *rtwdev)
 468{
 469        rtw_pci_reset_trx_ring(rtwdev);
 470
 471        return 0;
 472}
 473
 474static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 475{
 476        /* reset dma and rx tag */
 477        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 478                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 479        rtwpci->rx_tag = 0;
 480}
 481
 482static int rtw_pci_start(struct rtw_dev *rtwdev)
 483{
 484        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 485        unsigned long flags;
 486
 487        rtw_pci_dma_reset(rtwdev, rtwpci);
 488
 489        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 490        rtw_pci_enable_interrupt(rtwdev, rtwpci);
 491        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 492
 493        return 0;
 494}
 495
 496static void rtw_pci_stop(struct rtw_dev *rtwdev)
 497{
 498        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 499        unsigned long flags;
 500
 501        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 502        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 503        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 504}
 505
 506static u8 ac_to_hwq[] = {
 507        [0] = RTW_TX_QUEUE_VO,
 508        [1] = RTW_TX_QUEUE_VI,
 509        [2] = RTW_TX_QUEUE_BE,
 510        [3] = RTW_TX_QUEUE_BK,
 511};
 512
 513static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 514{
 515        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 516        __le16 fc = hdr->frame_control;
 517        u8 q_mapping = skb_get_queue_mapping(skb);
 518        u8 queue;
 519
 520        if (unlikely(ieee80211_is_beacon(fc)))
 521                queue = RTW_TX_QUEUE_BCN;
 522        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 523                queue = RTW_TX_QUEUE_MGMT;
 524        else
 525                queue = ac_to_hwq[q_mapping];
 526
 527        return queue;
 528}
 529
 530static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 531                                      struct rtw_pci_tx_ring *ring)
 532{
 533        struct sk_buff *prev = skb_dequeue(&ring->queue);
 534        struct rtw_pci_tx_data *tx_data;
 535        dma_addr_t dma;
 536
 537        if (!prev)
 538                return;
 539
 540        tx_data = rtw_pci_get_tx_data(prev);
 541        dma = tx_data->dma;
 542        pci_unmap_single(rtwpci->pdev, dma, prev->len,
 543                         PCI_DMA_TODEVICE);
 544        dev_kfree_skb_any(prev);
 545}
 546
 547static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 548                              struct rtw_pci_rx_ring *rx_ring,
 549                              u32 idx)
 550{
 551        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 552        struct rtw_chip_info *chip = rtwdev->chip;
 553        struct rtw_pci_rx_buffer_desc *buf_desc;
 554        u32 desc_sz = chip->rx_buf_desc_sz;
 555        u16 total_pkt_size;
 556
 557        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 558                                                     idx * desc_sz);
 559        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 560
 561        /* rx tag mismatch, throw a warning */
 562        if (total_pkt_size != rtwpci->rx_tag)
 563                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 564
 565        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 566}
 567
 568static int rtw_pci_xmit(struct rtw_dev *rtwdev,
 569                        struct rtw_tx_pkt_info *pkt_info,
 570                        struct sk_buff *skb, u8 queue)
 571{
 572        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 573        struct rtw_chip_info *chip = rtwdev->chip;
 574        struct rtw_pci_tx_ring *ring;
 575        struct rtw_pci_tx_data *tx_data;
 576        dma_addr_t dma;
 577        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 578        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 579        u32 size;
 580        u32 psb_len;
 581        u8 *pkt_desc;
 582        struct rtw_pci_tx_buffer_desc *buf_desc;
 583        u32 bd_idx;
 584
 585        ring = &rtwpci->tx_rings[queue];
 586
 587        size = skb->len;
 588
 589        if (queue == RTW_TX_QUEUE_BCN)
 590                rtw_pci_release_rsvd_page(rtwpci, ring);
 591        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 592                return -ENOSPC;
 593
 594        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 595        memset(pkt_desc, 0, tx_pkt_desc_sz);
 596        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 597        rtw_tx_fill_tx_desc(pkt_info, skb);
 598        dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
 599                             PCI_DMA_TODEVICE);
 600        if (pci_dma_mapping_error(rtwpci->pdev, dma))
 601                return -EBUSY;
 602
 603        /* after this we got dma mapped, there is no way back */
 604        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 605        memset(buf_desc, 0, tx_buf_desc_sz);
 606        psb_len = (skb->len - 1) / 128 + 1;
 607        if (queue == RTW_TX_QUEUE_BCN)
 608                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 609
 610        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 611        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 612        buf_desc[0].dma = cpu_to_le32(dma);
 613        buf_desc[1].buf_size = cpu_to_le16(size);
 614        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 615
 616        tx_data = rtw_pci_get_tx_data(skb);
 617        tx_data->dma = dma;
 618        tx_data->sn = pkt_info->sn;
 619        skb_queue_tail(&ring->queue, skb);
 620
 621        /* kick off tx queue */
 622        if (queue != RTW_TX_QUEUE_BCN) {
 623                if (++ring->r.wp >= ring->r.len)
 624                        ring->r.wp = 0;
 625                bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 626                rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
 627        } else {
 628                u32 reg_bcn_work;
 629
 630                reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 631                reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 632                rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 633        }
 634
 635        return 0;
 636}
 637
 638static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 639                                        u32 size)
 640{
 641        struct sk_buff *skb;
 642        struct rtw_tx_pkt_info pkt_info;
 643        u32 tx_pkt_desc_sz;
 644        u32 length;
 645
 646        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 647        length = size + tx_pkt_desc_sz;
 648        skb = dev_alloc_skb(length);
 649        if (!skb)
 650                return -ENOMEM;
 651
 652        skb_reserve(skb, tx_pkt_desc_sz);
 653        memcpy((u8 *)skb_put(skb, size), buf, size);
 654        memset(&pkt_info, 0, sizeof(pkt_info));
 655        pkt_info.tx_pkt_size = size;
 656        pkt_info.offset = tx_pkt_desc_sz;
 657
 658        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 659}
 660
 661static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 662{
 663        struct sk_buff *skb;
 664        struct rtw_tx_pkt_info pkt_info;
 665        u32 tx_pkt_desc_sz;
 666        u32 length;
 667
 668        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 669        length = size + tx_pkt_desc_sz;
 670        skb = dev_alloc_skb(length);
 671        if (!skb)
 672                return -ENOMEM;
 673
 674        skb_reserve(skb, tx_pkt_desc_sz);
 675        memcpy((u8 *)skb_put(skb, size), buf, size);
 676        memset(&pkt_info, 0, sizeof(pkt_info));
 677        pkt_info.tx_pkt_size = size;
 678
 679        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 680}
 681
 682static int rtw_pci_tx(struct rtw_dev *rtwdev,
 683                      struct rtw_tx_pkt_info *pkt_info,
 684                      struct sk_buff *skb)
 685{
 686        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 687        struct rtw_pci_tx_ring *ring;
 688        u8 queue = rtw_hw_queue_mapping(skb);
 689        int ret;
 690
 691        ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
 692        if (ret)
 693                return ret;
 694
 695        ring = &rtwpci->tx_rings[queue];
 696        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 697                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 698                ring->queue_stopped = true;
 699        }
 700
 701        return 0;
 702}
 703
 704static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 705                           u8 hw_queue)
 706{
 707        struct ieee80211_hw *hw = rtwdev->hw;
 708        struct ieee80211_tx_info *info;
 709        struct rtw_pci_tx_ring *ring;
 710        struct rtw_pci_tx_data *tx_data;
 711        struct sk_buff *skb;
 712        u32 count;
 713        u32 bd_idx_addr;
 714        u32 bd_idx, cur_rp;
 715        u16 q_map;
 716
 717        ring = &rtwpci->tx_rings[hw_queue];
 718
 719        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 720        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 721        cur_rp = bd_idx >> 16;
 722        cur_rp &= 0xfff;
 723        if (cur_rp >= ring->r.rp)
 724                count = cur_rp - ring->r.rp;
 725        else
 726                count = ring->r.len - (ring->r.rp - cur_rp);
 727
 728        while (count--) {
 729                skb = skb_dequeue(&ring->queue);
 730                tx_data = rtw_pci_get_tx_data(skb);
 731                pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
 732                                 PCI_DMA_TODEVICE);
 733
 734                /* just free command packets from host to card */
 735                if (hw_queue == RTW_TX_QUEUE_H2C) {
 736                        dev_kfree_skb_irq(skb);
 737                        continue;
 738                }
 739
 740                if (ring->queue_stopped &&
 741                    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
 742                        q_map = skb_get_queue_mapping(skb);
 743                        ieee80211_wake_queue(hw, q_map);
 744                        ring->queue_stopped = false;
 745                }
 746
 747                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
 748
 749                info = IEEE80211_SKB_CB(skb);
 750
 751                /* enqueue to wait for tx report */
 752                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 753                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
 754                        continue;
 755                }
 756
 757                /* always ACK for others, then they won't be marked as drop */
 758                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 759                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 760                else
 761                        info->flags |= IEEE80211_TX_STAT_ACK;
 762
 763                ieee80211_tx_info_clear_status(info);
 764                ieee80211_tx_status_irqsafe(hw, skb);
 765        }
 766
 767        ring->r.rp = cur_rp;
 768}
 769
 770static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 771                           u8 hw_queue)
 772{
 773        struct rtw_chip_info *chip = rtwdev->chip;
 774        struct rtw_pci_rx_ring *ring;
 775        struct rtw_rx_pkt_stat pkt_stat;
 776        struct ieee80211_rx_status rx_status;
 777        struct sk_buff *skb, *new;
 778        u32 cur_wp, cur_rp, tmp;
 779        u32 count;
 780        u32 pkt_offset;
 781        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
 782        u32 buf_desc_sz = chip->rx_buf_desc_sz;
 783        u32 new_len;
 784        u8 *rx_desc;
 785        dma_addr_t dma;
 786
 787        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
 788
 789        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
 790        cur_wp = tmp >> 16;
 791        cur_wp &= 0xfff;
 792        if (cur_wp >= ring->r.wp)
 793                count = cur_wp - ring->r.wp;
 794        else
 795                count = ring->r.len - (ring->r.wp - cur_wp);
 796
 797        cur_rp = ring->r.rp;
 798        while (count--) {
 799                rtw_pci_dma_check(rtwdev, ring, cur_rp);
 800                skb = ring->buf[cur_rp];
 801                dma = *((dma_addr_t *)skb->cb);
 802                dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
 803                                        DMA_FROM_DEVICE);
 804                rx_desc = skb->data;
 805                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
 806
 807                /* offset from rx_desc to payload */
 808                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
 809                             pkt_stat.shift;
 810
 811                /* allocate a new skb for this frame,
 812                 * discard the frame if none available
 813                 */
 814                new_len = pkt_stat.pkt_len + pkt_offset;
 815                new = dev_alloc_skb(new_len);
 816                if (WARN_ONCE(!new, "rx routine starvation\n"))
 817                        goto next_rp;
 818
 819                /* put the DMA data including rx_desc from phy to new skb */
 820                skb_put_data(new, skb->data, new_len);
 821
 822                if (pkt_stat.is_c2h) {
 823                         /* pass rx_desc & offset for further operation */
 824                        *((u32 *)new->cb) = pkt_offset;
 825                        skb_queue_tail(&rtwdev->c2h_queue, new);
 826                        ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
 827                } else {
 828                        /* remove rx_desc */
 829                        skb_pull(new, pkt_offset);
 830
 831                        rtw_rx_stats(rtwdev, pkt_stat.vif, new);
 832                        memcpy(new->cb, &rx_status, sizeof(rx_status));
 833                        ieee80211_rx_irqsafe(rtwdev->hw, new);
 834                }
 835
 836next_rp:
 837                /* new skb delivered to mac80211, re-enable original skb DMA */
 838                rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
 839                                            buf_desc_sz);
 840
 841                /* host read next element in ring */
 842                if (++cur_rp >= ring->r.len)
 843                        cur_rp = 0;
 844        }
 845
 846        ring->r.rp = cur_rp;
 847        ring->r.wp = cur_wp;
 848        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
 849}
 850
 851static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
 852                                   struct rtw_pci *rtwpci, u32 *irq_status)
 853{
 854        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
 855        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
 856        irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
 857        irq_status[0] &= rtwpci->irq_mask[0];
 858        irq_status[1] &= rtwpci->irq_mask[1];
 859        irq_status[3] &= rtwpci->irq_mask[3];
 860        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
 861        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
 862        rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
 863}
 864
 865static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
 866{
 867        struct rtw_dev *rtwdev = dev;
 868        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 869        u32 irq_status[4];
 870
 871        spin_lock(&rtwpci->irq_lock);
 872        if (!rtwpci->irq_enabled)
 873                goto out;
 874
 875        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
 876
 877        if (irq_status[0] & IMR_MGNTDOK)
 878                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
 879        if (irq_status[0] & IMR_HIGHDOK)
 880                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
 881        if (irq_status[0] & IMR_BEDOK)
 882                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
 883        if (irq_status[0] & IMR_BKDOK)
 884                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
 885        if (irq_status[0] & IMR_VODOK)
 886                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
 887        if (irq_status[0] & IMR_VIDOK)
 888                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
 889        if (irq_status[3] & IMR_H2CDOK)
 890                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
 891        if (irq_status[0] & IMR_ROK)
 892                rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
 893
 894out:
 895        spin_unlock(&rtwpci->irq_lock);
 896
 897        return IRQ_HANDLED;
 898}
 899
 900static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
 901                              struct pci_dev *pdev)
 902{
 903        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 904        unsigned long len;
 905        u8 bar_id = 2;
 906        int ret;
 907
 908        ret = pci_request_regions(pdev, KBUILD_MODNAME);
 909        if (ret) {
 910                rtw_err(rtwdev, "failed to request pci regions\n");
 911                return ret;
 912        }
 913
 914        len = pci_resource_len(pdev, bar_id);
 915        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
 916        if (!rtwpci->mmap) {
 917                rtw_err(rtwdev, "failed to map pci memory\n");
 918                return -ENOMEM;
 919        }
 920
 921        return 0;
 922}
 923
 924static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
 925                                 struct pci_dev *pdev)
 926{
 927        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 928
 929        if (rtwpci->mmap) {
 930                pci_iounmap(pdev, rtwpci->mmap);
 931                pci_release_regions(pdev);
 932        }
 933}
 934
 935static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
 936{
 937        u16 write_addr;
 938        u16 remainder = addr & 0x3;
 939        u8 flag;
 940        u8 cnt = 20;
 941
 942        write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
 943        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
 944        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
 945        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
 946
 947        flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
 948        while (flag && (cnt != 0)) {
 949                udelay(10);
 950                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
 951                cnt--;
 952        }
 953
 954        WARN(flag, "DBI write fail\n");
 955}
 956
 957static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
 958{
 959        u8 page;
 960        u8 wflag;
 961        u8 cnt;
 962
 963        rtw_write16(rtwdev, REG_MDIO_V1, data);
 964
 965        page = addr < 0x20 ? 0 : 1;
 966        page += g1 ? 0 : 2;
 967        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
 968        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
 969
 970        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
 971        wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
 972
 973        cnt = 20;
 974        while (wflag && (cnt != 0)) {
 975                udelay(10);
 976                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
 977                                        BIT_MDIO_WFLAG_V1);
 978                cnt--;
 979        }
 980
 981        WARN(wflag, "MDIO write fail\n");
 982}
 983
 984static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
 985{
 986        struct rtw_chip_info *chip = rtwdev->chip;
 987        struct rtw_intf_phy_para *para;
 988        u16 cut;
 989        u16 value;
 990        u16 offset;
 991        u16 ip_sel;
 992        int i;
 993
 994        cut = BIT(0) << rtwdev->hal.cut_version;
 995
 996        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
 997                para = &chip->intf_table->gen1_para[i];
 998                if (!(para->cut_mask & cut))
 999                        continue;
1000                if (para->offset == 0xffff)
1001                        break;
1002                offset = para->offset;
1003                value = para->value;
1004                ip_sel = para->ip_sel;
1005                if (para->ip_sel == RTW_IP_SEL_PHY)
1006                        rtw_mdio_write(rtwdev, offset, value, true);
1007                else
1008                        rtw_dbi_write8(rtwdev, offset, value);
1009        }
1010
1011        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1012                para = &chip->intf_table->gen2_para[i];
1013                if (!(para->cut_mask & cut))
1014                        continue;
1015                if (para->offset == 0xffff)
1016                        break;
1017                offset = para->offset;
1018                value = para->value;
1019                ip_sel = para->ip_sel;
1020                if (para->ip_sel == RTW_IP_SEL_PHY)
1021                        rtw_mdio_write(rtwdev, offset, value, false);
1022                else
1023                        rtw_dbi_write8(rtwdev, offset, value);
1024        }
1025}
1026
1027static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1028{
1029        int ret;
1030
1031        ret = pci_enable_device(pdev);
1032        if (ret) {
1033                rtw_err(rtwdev, "failed to enable pci device\n");
1034                return ret;
1035        }
1036
1037        pci_set_master(pdev);
1038        pci_set_drvdata(pdev, rtwdev->hw);
1039        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1040
1041        return 0;
1042}
1043
1044static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1045{
1046        pci_clear_master(pdev);
1047        pci_disable_device(pdev);
1048}
1049
1050static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1051{
1052        struct rtw_pci *rtwpci;
1053        int ret;
1054
1055        rtwpci = (struct rtw_pci *)rtwdev->priv;
1056        rtwpci->pdev = pdev;
1057
1058        /* after this driver can access to hw registers */
1059        ret = rtw_pci_io_mapping(rtwdev, pdev);
1060        if (ret) {
1061                rtw_err(rtwdev, "failed to request pci io region\n");
1062                goto err_out;
1063        }
1064
1065        ret = rtw_pci_init(rtwdev);
1066        if (ret) {
1067                rtw_err(rtwdev, "failed to allocate pci resources\n");
1068                goto err_io_unmap;
1069        }
1070
1071        rtw_pci_phy_cfg(rtwdev);
1072
1073        return 0;
1074
1075err_io_unmap:
1076        rtw_pci_io_unmapping(rtwdev, pdev);
1077
1078err_out:
1079        return ret;
1080}
1081
1082static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1083{
1084        rtw_pci_deinit(rtwdev);
1085        rtw_pci_io_unmapping(rtwdev, pdev);
1086}
1087
1088static struct rtw_hci_ops rtw_pci_ops = {
1089        .tx = rtw_pci_tx,
1090        .setup = rtw_pci_setup,
1091        .start = rtw_pci_start,
1092        .stop = rtw_pci_stop,
1093
1094        .read8 = rtw_pci_read8,
1095        .read16 = rtw_pci_read16,
1096        .read32 = rtw_pci_read32,
1097        .write8 = rtw_pci_write8,
1098        .write16 = rtw_pci_write16,
1099        .write32 = rtw_pci_write32,
1100        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1101        .write_data_h2c = rtw_pci_write_data_h2c,
1102};
1103
1104static int rtw_pci_probe(struct pci_dev *pdev,
1105                         const struct pci_device_id *id)
1106{
1107        struct ieee80211_hw *hw;
1108        struct rtw_dev *rtwdev;
1109        int drv_data_size;
1110        int ret;
1111
1112        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1113        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1114        if (!hw) {
1115                dev_err(&pdev->dev, "failed to allocate hw\n");
1116                return -ENOMEM;
1117        }
1118
1119        rtwdev = hw->priv;
1120        rtwdev->hw = hw;
1121        rtwdev->dev = &pdev->dev;
1122        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1123        rtwdev->hci.ops = &rtw_pci_ops;
1124        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1125
1126        ret = rtw_core_init(rtwdev);
1127        if (ret)
1128                goto err_release_hw;
1129
1130        rtw_dbg(rtwdev, RTW_DBG_PCI,
1131                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1132                pdev->vendor, pdev->device, pdev->revision);
1133
1134        ret = rtw_pci_claim(rtwdev, pdev);
1135        if (ret) {
1136                rtw_err(rtwdev, "failed to claim pci device\n");
1137                goto err_deinit_core;
1138        }
1139
1140        ret = rtw_pci_setup_resource(rtwdev, pdev);
1141        if (ret) {
1142                rtw_err(rtwdev, "failed to setup pci resources\n");
1143                goto err_pci_declaim;
1144        }
1145
1146        ret = rtw_chip_info_setup(rtwdev);
1147        if (ret) {
1148                rtw_err(rtwdev, "failed to setup chip information\n");
1149                goto err_destroy_pci;
1150        }
1151
1152        ret = rtw_register_hw(rtwdev, hw);
1153        if (ret) {
1154                rtw_err(rtwdev, "failed to register hw\n");
1155                goto err_destroy_pci;
1156        }
1157
1158        ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler,
1159                          IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1160        if (ret) {
1161                ieee80211_unregister_hw(hw);
1162                goto err_destroy_pci;
1163        }
1164
1165        return 0;
1166
1167err_destroy_pci:
1168        rtw_pci_destroy(rtwdev, pdev);
1169
1170err_pci_declaim:
1171        rtw_pci_declaim(rtwdev, pdev);
1172
1173err_deinit_core:
1174        rtw_core_deinit(rtwdev);
1175
1176err_release_hw:
1177        ieee80211_free_hw(hw);
1178
1179        return ret;
1180}
1181
1182static void rtw_pci_remove(struct pci_dev *pdev)
1183{
1184        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1185        struct rtw_dev *rtwdev;
1186        struct rtw_pci *rtwpci;
1187
1188        if (!hw)
1189                return;
1190
1191        rtwdev = hw->priv;
1192        rtwpci = (struct rtw_pci *)rtwdev->priv;
1193
1194        rtw_unregister_hw(rtwdev, hw);
1195        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1196        rtw_pci_destroy(rtwdev, pdev);
1197        rtw_pci_declaim(rtwdev, pdev);
1198        free_irq(rtwpci->pdev->irq, rtwdev);
1199        rtw_core_deinit(rtwdev);
1200        ieee80211_free_hw(hw);
1201}
1202
1203static const struct pci_device_id rtw_pci_id_table[] = {
1204#ifdef CONFIG_RTW88_8822BE
1205        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1206#endif
1207#ifdef CONFIG_RTW88_8822CE
1208        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1209#endif
1210        {},
1211};
1212MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1213
1214static struct pci_driver rtw_pci_driver = {
1215        .name = "rtw_pci",
1216        .id_table = rtw_pci_id_table,
1217        .probe = rtw_pci_probe,
1218        .remove = rtw_pci_remove,
1219};
1220module_pci_driver(rtw_pci_driver);
1221
1222MODULE_AUTHOR("Realtek Corporation");
1223MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1224MODULE_LICENSE("Dual BSD/GPL");
1225