linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include "main.h"
   8#include "pci.h"
   9#include "tx.h"
  10#include "rx.h"
  11#include "fw.h"
  12#include "debug.h"
  13
  14static bool rtw_disable_msi;
  15module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
  16MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
  17
  18static u32 rtw_pci_tx_queue_idx_addr[] = {
  19        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  20        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  21        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  22        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  23        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  24        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  25        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  26};
  27
  28static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  29{
  30        switch (queue) {
  31        case RTW_TX_QUEUE_BCN:
  32                return TX_DESC_QSEL_BEACON;
  33        case RTW_TX_QUEUE_H2C:
  34                return TX_DESC_QSEL_H2C;
  35        case RTW_TX_QUEUE_MGMT:
  36                return TX_DESC_QSEL_MGMT;
  37        case RTW_TX_QUEUE_HI0:
  38                return TX_DESC_QSEL_HIGH;
  39        default:
  40                return skb->priority;
  41        }
  42};
  43
  44static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  45{
  46        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  47
  48        return readb(rtwpci->mmap + addr);
  49}
  50
  51static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  52{
  53        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  54
  55        return readw(rtwpci->mmap + addr);
  56}
  57
  58static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  59{
  60        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  61
  62        return readl(rtwpci->mmap + addr);
  63}
  64
  65static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  66{
  67        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  68
  69        writeb(val, rtwpci->mmap + addr);
  70}
  71
  72static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  73{
  74        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  75
  76        writew(val, rtwpci->mmap + addr);
  77}
  78
  79static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  80{
  81        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  82
  83        writel(val, rtwpci->mmap + addr);
  84}
  85
  86static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  87{
  88        int offset = tx_ring->r.desc_size * idx;
  89
  90        return tx_ring->r.head + offset;
  91}
  92
  93static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
  94                                      struct rtw_pci_tx_ring *tx_ring)
  95{
  96        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
  97        struct rtw_pci_tx_data *tx_data;
  98        struct sk_buff *skb, *tmp;
  99        dma_addr_t dma;
 100
 101        /* free every skb remained in tx list */
 102        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 103                __skb_unlink(skb, &tx_ring->queue);
 104                tx_data = rtw_pci_get_tx_data(skb);
 105                dma = tx_data->dma;
 106
 107                pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
 108                dev_kfree_skb_any(skb);
 109        }
 110}
 111
 112static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
 113                                 struct rtw_pci_tx_ring *tx_ring)
 114{
 115        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 116        u8 *head = tx_ring->r.head;
 117        u32 len = tx_ring->r.len;
 118        int ring_sz = len * tx_ring->r.desc_size;
 119
 120        rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 121
 122        /* free the ring itself */
 123        pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
 124        tx_ring->r.head = NULL;
 125}
 126
 127static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
 128                                      struct rtw_pci_rx_ring *rx_ring)
 129{
 130        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 131        struct sk_buff *skb;
 132        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 133        dma_addr_t dma;
 134        int i;
 135
 136        for (i = 0; i < rx_ring->r.len; i++) {
 137                skb = rx_ring->buf[i];
 138                if (!skb)
 139                        continue;
 140
 141                dma = *((dma_addr_t *)skb->cb);
 142                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 143                dev_kfree_skb(skb);
 144                rx_ring->buf[i] = NULL;
 145        }
 146}
 147
 148static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 149                                 struct rtw_pci_rx_ring *rx_ring)
 150{
 151        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 152        u8 *head = rx_ring->r.head;
 153        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 154
 155        rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 156
 157        pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
 158}
 159
 160static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 161{
 162        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 163        struct rtw_pci_tx_ring *tx_ring;
 164        struct rtw_pci_rx_ring *rx_ring;
 165        int i;
 166
 167        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 168                tx_ring = &rtwpci->tx_rings[i];
 169                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 170        }
 171
 172        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 173                rx_ring = &rtwpci->rx_rings[i];
 174                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 175        }
 176}
 177
 178static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 179                                struct rtw_pci_tx_ring *tx_ring,
 180                                u8 desc_size, u32 len)
 181{
 182        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 183        int ring_sz = desc_size * len;
 184        dma_addr_t dma;
 185        u8 *head;
 186
 187        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 188        if (!head) {
 189                rtw_err(rtwdev, "failed to allocate tx ring\n");
 190                return -ENOMEM;
 191        }
 192
 193        skb_queue_head_init(&tx_ring->queue);
 194        tx_ring->r.head = head;
 195        tx_ring->r.dma = dma;
 196        tx_ring->r.len = len;
 197        tx_ring->r.desc_size = desc_size;
 198        tx_ring->r.wp = 0;
 199        tx_ring->r.rp = 0;
 200
 201        return 0;
 202}
 203
 204static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 205                                 struct rtw_pci_rx_ring *rx_ring,
 206                                 u32 idx, u32 desc_sz)
 207{
 208        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 209        struct rtw_pci_rx_buffer_desc *buf_desc;
 210        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 211        dma_addr_t dma;
 212
 213        if (!skb)
 214                return -EINVAL;
 215
 216        dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
 217        if (pci_dma_mapping_error(pdev, dma))
 218                return -EBUSY;
 219
 220        *((dma_addr_t *)skb->cb) = dma;
 221        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 222                                                     idx * desc_sz);
 223        memset(buf_desc, 0, sizeof(*buf_desc));
 224        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 225        buf_desc->dma = cpu_to_le32(dma);
 226
 227        return 0;
 228}
 229
 230static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 231                                        struct rtw_pci_rx_ring *rx_ring,
 232                                        u32 idx, u32 desc_sz)
 233{
 234        struct device *dev = rtwdev->dev;
 235        struct rtw_pci_rx_buffer_desc *buf_desc;
 236        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 237
 238        dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 239
 240        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 241                                                     idx * desc_sz);
 242        memset(buf_desc, 0, sizeof(*buf_desc));
 243        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 244        buf_desc->dma = cpu_to_le32(dma);
 245}
 246
 247static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 248                                struct rtw_pci_rx_ring *rx_ring,
 249                                u8 desc_size, u32 len)
 250{
 251        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 252        struct sk_buff *skb = NULL;
 253        dma_addr_t dma;
 254        u8 *head;
 255        int ring_sz = desc_size * len;
 256        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 257        int i, allocated;
 258        int ret = 0;
 259
 260        head = pci_zalloc_consistent(pdev, ring_sz, &dma);
 261        if (!head) {
 262                rtw_err(rtwdev, "failed to allocate rx ring\n");
 263                return -ENOMEM;
 264        }
 265        rx_ring->r.head = head;
 266
 267        for (i = 0; i < len; i++) {
 268                skb = dev_alloc_skb(buf_sz);
 269                if (!skb) {
 270                        allocated = i;
 271                        ret = -ENOMEM;
 272                        goto err_out;
 273                }
 274
 275                memset(skb->data, 0, buf_sz);
 276                rx_ring->buf[i] = skb;
 277                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 278                if (ret) {
 279                        allocated = i;
 280                        dev_kfree_skb_any(skb);
 281                        goto err_out;
 282                }
 283        }
 284
 285        rx_ring->r.dma = dma;
 286        rx_ring->r.len = len;
 287        rx_ring->r.desc_size = desc_size;
 288        rx_ring->r.wp = 0;
 289        rx_ring->r.rp = 0;
 290
 291        return 0;
 292
 293err_out:
 294        for (i = 0; i < allocated; i++) {
 295                skb = rx_ring->buf[i];
 296                if (!skb)
 297                        continue;
 298                dma = *((dma_addr_t *)skb->cb);
 299                pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
 300                dev_kfree_skb_any(skb);
 301                rx_ring->buf[i] = NULL;
 302        }
 303        pci_free_consistent(pdev, ring_sz, head, dma);
 304
 305        rtw_err(rtwdev, "failed to init rx buffer\n");
 306
 307        return ret;
 308}
 309
 310static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 311{
 312        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 313        struct rtw_pci_tx_ring *tx_ring;
 314        struct rtw_pci_rx_ring *rx_ring;
 315        struct rtw_chip_info *chip = rtwdev->chip;
 316        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 317        int tx_desc_size, rx_desc_size;
 318        u32 len;
 319        int ret;
 320
 321        tx_desc_size = chip->tx_buf_desc_sz;
 322
 323        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 324                tx_ring = &rtwpci->tx_rings[i];
 325                len = max_num_of_tx_queue(i);
 326                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 327                if (ret)
 328                        goto out;
 329        }
 330
 331        rx_desc_size = chip->rx_buf_desc_sz;
 332
 333        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 334                rx_ring = &rtwpci->rx_rings[j];
 335                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 336                                           RTK_MAX_RX_DESC_NUM);
 337                if (ret)
 338                        goto out;
 339        }
 340
 341        return 0;
 342
 343out:
 344        tx_alloced = i;
 345        for (i = 0; i < tx_alloced; i++) {
 346                tx_ring = &rtwpci->tx_rings[i];
 347                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 348        }
 349
 350        rx_alloced = j;
 351        for (j = 0; j < rx_alloced; j++) {
 352                rx_ring = &rtwpci->rx_rings[j];
 353                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 354        }
 355
 356        return ret;
 357}
 358
 359static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 360{
 361        rtw_pci_free_trx_ring(rtwdev);
 362}
 363
 364static int rtw_pci_init(struct rtw_dev *rtwdev)
 365{
 366        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 367        int ret = 0;
 368
 369        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 370                              IMR_MGNTDOK |
 371                              IMR_BKDOK |
 372                              IMR_BEDOK |
 373                              IMR_VIDOK |
 374                              IMR_VODOK |
 375                              IMR_ROK |
 376                              IMR_BCNDMAINT_E |
 377                              0;
 378        rtwpci->irq_mask[1] = IMR_TXFOVW |
 379                              0;
 380        rtwpci->irq_mask[3] = IMR_H2CDOK |
 381                              0;
 382        spin_lock_init(&rtwpci->irq_lock);
 383        ret = rtw_pci_init_trx_ring(rtwdev);
 384
 385        return ret;
 386}
 387
 388static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 389{
 390        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 391        u32 len;
 392        u8 tmp;
 393        dma_addr_t dma;
 394
 395        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 396        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 397
 398        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 399        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 400
 401        len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 402        dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 403        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 404        rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 405        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
 406        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 407
 408        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 409        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 410        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 411        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 412        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
 413        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 414
 415        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 416        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 417        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 418        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 419        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
 420        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 421
 422        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 423        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 424        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 425        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 426        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
 427        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 428
 429        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 430        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 431        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 432        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 433        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
 434        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 435
 436        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 437        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 438        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 439        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 440        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
 441        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 442
 443        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 444        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 445        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 446        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 447        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
 448        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 449
 450        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 451        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 452        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 453        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 454        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
 455        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 456
 457        /* reset read/write point */
 458        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 459
 460        /* rest H2C Queue index */
 461        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
 462        rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
 463}
 464
 465static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 466{
 467        rtw_pci_reset_buf_desc(rtwdev);
 468}
 469
 470static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 471                                     struct rtw_pci *rtwpci)
 472{
 473        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
 474        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 475        rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 476        rtwpci->irq_enabled = true;
 477}
 478
 479static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 480                                      struct rtw_pci *rtwpci)
 481{
 482        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 483        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 484        rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 485        rtwpci->irq_enabled = false;
 486}
 487
 488static int rtw_pci_setup(struct rtw_dev *rtwdev)
 489{
 490        rtw_pci_reset_trx_ring(rtwdev);
 491
 492        return 0;
 493}
 494
 495static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 496{
 497        /* reset dma and rx tag */
 498        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 499                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 500        rtwpci->rx_tag = 0;
 501}
 502
 503static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 504{
 505        struct rtw_pci_tx_ring *tx_ring;
 506        u8 queue;
 507
 508        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 509                tx_ring = &rtwpci->tx_rings[queue];
 510                rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 511        }
 512}
 513
 514static int rtw_pci_start(struct rtw_dev *rtwdev)
 515{
 516        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 517        unsigned long flags;
 518
 519        rtw_pci_dma_reset(rtwdev, rtwpci);
 520
 521        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 522        rtw_pci_enable_interrupt(rtwdev, rtwpci);
 523        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 524
 525        return 0;
 526}
 527
 528static void rtw_pci_stop(struct rtw_dev *rtwdev)
 529{
 530        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 531        unsigned long flags;
 532
 533        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 534        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 535        rtw_pci_dma_release(rtwdev, rtwpci);
 536        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 537}
 538
 539static u8 ac_to_hwq[] = {
 540        [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
 541        [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
 542        [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
 543        [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
 544};
 545
 546static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 547{
 548        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 549        __le16 fc = hdr->frame_control;
 550        u8 q_mapping = skb_get_queue_mapping(skb);
 551        u8 queue;
 552
 553        if (unlikely(ieee80211_is_beacon(fc)))
 554                queue = RTW_TX_QUEUE_BCN;
 555        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 556                queue = RTW_TX_QUEUE_MGMT;
 557        else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
 558                queue = ac_to_hwq[IEEE80211_AC_BE];
 559        else
 560                queue = ac_to_hwq[q_mapping];
 561
 562        return queue;
 563}
 564
 565static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 566                                      struct rtw_pci_tx_ring *ring)
 567{
 568        struct sk_buff *prev = skb_dequeue(&ring->queue);
 569        struct rtw_pci_tx_data *tx_data;
 570        dma_addr_t dma;
 571
 572        if (!prev)
 573                return;
 574
 575        tx_data = rtw_pci_get_tx_data(prev);
 576        dma = tx_data->dma;
 577        pci_unmap_single(rtwpci->pdev, dma, prev->len,
 578                         PCI_DMA_TODEVICE);
 579        dev_kfree_skb_any(prev);
 580}
 581
 582static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 583                              struct rtw_pci_rx_ring *rx_ring,
 584                              u32 idx)
 585{
 586        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 587        struct rtw_chip_info *chip = rtwdev->chip;
 588        struct rtw_pci_rx_buffer_desc *buf_desc;
 589        u32 desc_sz = chip->rx_buf_desc_sz;
 590        u16 total_pkt_size;
 591
 592        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 593                                                     idx * desc_sz);
 594        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 595
 596        /* rx tag mismatch, throw a warning */
 597        if (total_pkt_size != rtwpci->rx_tag)
 598                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 599
 600        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 601}
 602
 603static int rtw_pci_xmit(struct rtw_dev *rtwdev,
 604                        struct rtw_tx_pkt_info *pkt_info,
 605                        struct sk_buff *skb, u8 queue)
 606{
 607        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 608        struct rtw_chip_info *chip = rtwdev->chip;
 609        struct rtw_pci_tx_ring *ring;
 610        struct rtw_pci_tx_data *tx_data;
 611        dma_addr_t dma;
 612        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 613        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 614        u32 size;
 615        u32 psb_len;
 616        u8 *pkt_desc;
 617        struct rtw_pci_tx_buffer_desc *buf_desc;
 618        u32 bd_idx;
 619
 620        ring = &rtwpci->tx_rings[queue];
 621
 622        size = skb->len;
 623
 624        if (queue == RTW_TX_QUEUE_BCN)
 625                rtw_pci_release_rsvd_page(rtwpci, ring);
 626        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 627                return -ENOSPC;
 628
 629        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 630        memset(pkt_desc, 0, tx_pkt_desc_sz);
 631        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 632        rtw_tx_fill_tx_desc(pkt_info, skb);
 633        dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
 634                             PCI_DMA_TODEVICE);
 635        if (pci_dma_mapping_error(rtwpci->pdev, dma))
 636                return -EBUSY;
 637
 638        /* after this we got dma mapped, there is no way back */
 639        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 640        memset(buf_desc, 0, tx_buf_desc_sz);
 641        psb_len = (skb->len - 1) / 128 + 1;
 642        if (queue == RTW_TX_QUEUE_BCN)
 643                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 644
 645        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 646        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 647        buf_desc[0].dma = cpu_to_le32(dma);
 648        buf_desc[1].buf_size = cpu_to_le16(size);
 649        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 650
 651        tx_data = rtw_pci_get_tx_data(skb);
 652        tx_data->dma = dma;
 653        tx_data->sn = pkt_info->sn;
 654        skb_queue_tail(&ring->queue, skb);
 655
 656        /* kick off tx queue */
 657        if (queue != RTW_TX_QUEUE_BCN) {
 658                if (++ring->r.wp >= ring->r.len)
 659                        ring->r.wp = 0;
 660                bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 661                rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
 662        } else {
 663                u32 reg_bcn_work;
 664
 665                reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 666                reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 667                rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 668        }
 669
 670        return 0;
 671}
 672
 673static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 674                                        u32 size)
 675{
 676        struct sk_buff *skb;
 677        struct rtw_tx_pkt_info pkt_info;
 678        u32 tx_pkt_desc_sz;
 679        u32 length;
 680
 681        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 682        length = size + tx_pkt_desc_sz;
 683        skb = dev_alloc_skb(length);
 684        if (!skb)
 685                return -ENOMEM;
 686
 687        skb_reserve(skb, tx_pkt_desc_sz);
 688        memcpy((u8 *)skb_put(skb, size), buf, size);
 689        memset(&pkt_info, 0, sizeof(pkt_info));
 690        pkt_info.tx_pkt_size = size;
 691        pkt_info.offset = tx_pkt_desc_sz;
 692
 693        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 694}
 695
 696static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 697{
 698        struct sk_buff *skb;
 699        struct rtw_tx_pkt_info pkt_info;
 700        u32 tx_pkt_desc_sz;
 701        u32 length;
 702
 703        tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
 704        length = size + tx_pkt_desc_sz;
 705        skb = dev_alloc_skb(length);
 706        if (!skb)
 707                return -ENOMEM;
 708
 709        skb_reserve(skb, tx_pkt_desc_sz);
 710        memcpy((u8 *)skb_put(skb, size), buf, size);
 711        memset(&pkt_info, 0, sizeof(pkt_info));
 712        pkt_info.tx_pkt_size = size;
 713
 714        return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 715}
 716
 717static int rtw_pci_tx(struct rtw_dev *rtwdev,
 718                      struct rtw_tx_pkt_info *pkt_info,
 719                      struct sk_buff *skb)
 720{
 721        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 722        struct rtw_pci_tx_ring *ring;
 723        u8 queue = rtw_hw_queue_mapping(skb);
 724        int ret;
 725
 726        ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
 727        if (ret)
 728                return ret;
 729
 730        ring = &rtwpci->tx_rings[queue];
 731        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 732                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 733                ring->queue_stopped = true;
 734        }
 735
 736        return 0;
 737}
 738
 739static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 740                           u8 hw_queue)
 741{
 742        struct ieee80211_hw *hw = rtwdev->hw;
 743        struct ieee80211_tx_info *info;
 744        struct rtw_pci_tx_ring *ring;
 745        struct rtw_pci_tx_data *tx_data;
 746        struct sk_buff *skb;
 747        u32 count;
 748        u32 bd_idx_addr;
 749        u32 bd_idx, cur_rp;
 750        u16 q_map;
 751
 752        ring = &rtwpci->tx_rings[hw_queue];
 753
 754        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 755        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 756        cur_rp = bd_idx >> 16;
 757        cur_rp &= 0xfff;
 758        if (cur_rp >= ring->r.rp)
 759                count = cur_rp - ring->r.rp;
 760        else
 761                count = ring->r.len - (ring->r.rp - cur_rp);
 762
 763        while (count--) {
 764                skb = skb_dequeue(&ring->queue);
 765                tx_data = rtw_pci_get_tx_data(skb);
 766                pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
 767                                 PCI_DMA_TODEVICE);
 768
 769                /* just free command packets from host to card */
 770                if (hw_queue == RTW_TX_QUEUE_H2C) {
 771                        dev_kfree_skb_irq(skb);
 772                        continue;
 773                }
 774
 775                if (ring->queue_stopped &&
 776                    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
 777                        q_map = skb_get_queue_mapping(skb);
 778                        ieee80211_wake_queue(hw, q_map);
 779                        ring->queue_stopped = false;
 780                }
 781
 782                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
 783
 784                info = IEEE80211_SKB_CB(skb);
 785
 786                /* enqueue to wait for tx report */
 787                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
 788                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
 789                        continue;
 790                }
 791
 792                /* always ACK for others, then they won't be marked as drop */
 793                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 794                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 795                else
 796                        info->flags |= IEEE80211_TX_STAT_ACK;
 797
 798                ieee80211_tx_info_clear_status(info);
 799                ieee80211_tx_status_irqsafe(hw, skb);
 800        }
 801
 802        ring->r.rp = cur_rp;
 803}
 804
 805static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 806                           u8 hw_queue)
 807{
 808        struct rtw_chip_info *chip = rtwdev->chip;
 809        struct rtw_pci_rx_ring *ring;
 810        struct rtw_rx_pkt_stat pkt_stat;
 811        struct ieee80211_rx_status rx_status;
 812        struct sk_buff *skb, *new;
 813        u32 cur_wp, cur_rp, tmp;
 814        u32 count;
 815        u32 pkt_offset;
 816        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
 817        u32 buf_desc_sz = chip->rx_buf_desc_sz;
 818        u32 new_len;
 819        u8 *rx_desc;
 820        dma_addr_t dma;
 821
 822        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
 823
 824        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
 825        cur_wp = tmp >> 16;
 826        cur_wp &= 0xfff;
 827        if (cur_wp >= ring->r.wp)
 828                count = cur_wp - ring->r.wp;
 829        else
 830                count = ring->r.len - (ring->r.wp - cur_wp);
 831
 832        cur_rp = ring->r.rp;
 833        while (count--) {
 834                rtw_pci_dma_check(rtwdev, ring, cur_rp);
 835                skb = ring->buf[cur_rp];
 836                dma = *((dma_addr_t *)skb->cb);
 837                dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
 838                                        DMA_FROM_DEVICE);
 839                rx_desc = skb->data;
 840                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
 841
 842                /* offset from rx_desc to payload */
 843                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
 844                             pkt_stat.shift;
 845
 846                /* allocate a new skb for this frame,
 847                 * discard the frame if none available
 848                 */
 849                new_len = pkt_stat.pkt_len + pkt_offset;
 850                new = dev_alloc_skb(new_len);
 851                if (WARN_ONCE(!new, "rx routine starvation\n"))
 852                        goto next_rp;
 853
 854                /* put the DMA data including rx_desc from phy to new skb */
 855                skb_put_data(new, skb->data, new_len);
 856
 857                if (pkt_stat.is_c2h) {
 858                        rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
 859                } else {
 860                        /* remove rx_desc */
 861                        skb_pull(new, pkt_offset);
 862
 863                        rtw_rx_stats(rtwdev, pkt_stat.vif, new);
 864                        memcpy(new->cb, &rx_status, sizeof(rx_status));
 865                        ieee80211_rx_irqsafe(rtwdev->hw, new);
 866                }
 867
 868next_rp:
 869                /* new skb delivered to mac80211, re-enable original skb DMA */
 870                rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
 871                                            buf_desc_sz);
 872
 873                /* host read next element in ring */
 874                if (++cur_rp >= ring->r.len)
 875                        cur_rp = 0;
 876        }
 877
 878        ring->r.rp = cur_rp;
 879        ring->r.wp = cur_wp;
 880        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
 881}
 882
 883static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
 884                                   struct rtw_pci *rtwpci, u32 *irq_status)
 885{
 886        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
 887        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
 888        irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
 889        irq_status[0] &= rtwpci->irq_mask[0];
 890        irq_status[1] &= rtwpci->irq_mask[1];
 891        irq_status[3] &= rtwpci->irq_mask[3];
 892        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
 893        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
 894        rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
 895}
 896
 897static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
 898{
 899        struct rtw_dev *rtwdev = dev;
 900        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 901
 902        spin_lock(&rtwpci->irq_lock);
 903        if (!rtwpci->irq_enabled)
 904                goto out;
 905
 906        /* disable RTW PCI interrupt to avoid more interrupts before the end of
 907         * thread function
 908         *
 909         * disable HIMR here to also avoid new HISR flag being raised before
 910         * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
 911         * are cleared, the edge-triggered interrupt will not be generated when
 912         * a new HISR flag is set.
 913         */
 914        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 915out:
 916        spin_unlock(&rtwpci->irq_lock);
 917
 918        return IRQ_WAKE_THREAD;
 919}
 920
 921static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
 922{
 923        struct rtw_dev *rtwdev = dev;
 924        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 925        unsigned long flags;
 926        u32 irq_status[4];
 927
 928        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 929        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
 930
 931        if (irq_status[0] & IMR_MGNTDOK)
 932                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
 933        if (irq_status[0] & IMR_HIGHDOK)
 934                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
 935        if (irq_status[0] & IMR_BEDOK)
 936                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
 937        if (irq_status[0] & IMR_BKDOK)
 938                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
 939        if (irq_status[0] & IMR_VODOK)
 940                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
 941        if (irq_status[0] & IMR_VIDOK)
 942                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
 943        if (irq_status[3] & IMR_H2CDOK)
 944                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
 945        if (irq_status[0] & IMR_ROK)
 946                rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
 947
 948        /* all of the jobs for this interrupt have been done */
 949        rtw_pci_enable_interrupt(rtwdev, rtwpci);
 950        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 951
 952        return IRQ_HANDLED;
 953}
 954
 955static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
 956                              struct pci_dev *pdev)
 957{
 958        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 959        unsigned long len;
 960        u8 bar_id = 2;
 961        int ret;
 962
 963        ret = pci_request_regions(pdev, KBUILD_MODNAME);
 964        if (ret) {
 965                rtw_err(rtwdev, "failed to request pci regions\n");
 966                return ret;
 967        }
 968
 969        len = pci_resource_len(pdev, bar_id);
 970        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
 971        if (!rtwpci->mmap) {
 972                rtw_err(rtwdev, "failed to map pci memory\n");
 973                return -ENOMEM;
 974        }
 975
 976        return 0;
 977}
 978
 979static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
 980                                 struct pci_dev *pdev)
 981{
 982        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 983
 984        if (rtwpci->mmap) {
 985                pci_iounmap(pdev, rtwpci->mmap);
 986                pci_release_regions(pdev);
 987        }
 988}
 989
 990static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
 991{
 992        u16 write_addr;
 993        u16 remainder = addr & 0x3;
 994        u8 flag;
 995        u8 cnt = 20;
 996
 997        write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
 998        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
 999        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1000        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
1001
1002        flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1003        while (flag && (cnt != 0)) {
1004                udelay(10);
1005                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1006                cnt--;
1007        }
1008
1009        WARN(flag, "DBI write fail\n");
1010}
1011
1012static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1013{
1014        u8 page;
1015        u8 wflag;
1016        u8 cnt;
1017
1018        rtw_write16(rtwdev, REG_MDIO_V1, data);
1019
1020        page = addr < 0x20 ? 0 : 1;
1021        page += g1 ? 0 : 2;
1022        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
1023        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1024
1025        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1026        wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
1027
1028        cnt = 20;
1029        while (wflag && (cnt != 0)) {
1030                udelay(10);
1031                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1032                                        BIT_MDIO_WFLAG_V1);
1033                cnt--;
1034        }
1035
1036        WARN(wflag, "MDIO write fail\n");
1037}
1038
1039static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1040{
1041        struct rtw_chip_info *chip = rtwdev->chip;
1042        struct rtw_intf_phy_para *para;
1043        u16 cut;
1044        u16 value;
1045        u16 offset;
1046        int i;
1047
1048        cut = BIT(0) << rtwdev->hal.cut_version;
1049
1050        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1051                para = &chip->intf_table->gen1_para[i];
1052                if (!(para->cut_mask & cut))
1053                        continue;
1054                if (para->offset == 0xffff)
1055                        break;
1056                offset = para->offset;
1057                value = para->value;
1058                if (para->ip_sel == RTW_IP_SEL_PHY)
1059                        rtw_mdio_write(rtwdev, offset, value, true);
1060                else
1061                        rtw_dbi_write8(rtwdev, offset, value);
1062        }
1063
1064        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1065                para = &chip->intf_table->gen2_para[i];
1066                if (!(para->cut_mask & cut))
1067                        continue;
1068                if (para->offset == 0xffff)
1069                        break;
1070                offset = para->offset;
1071                value = para->value;
1072                if (para->ip_sel == RTW_IP_SEL_PHY)
1073                        rtw_mdio_write(rtwdev, offset, value, false);
1074                else
1075                        rtw_dbi_write8(rtwdev, offset, value);
1076        }
1077}
1078
1079static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1080{
1081        int ret;
1082
1083        ret = pci_enable_device(pdev);
1084        if (ret) {
1085                rtw_err(rtwdev, "failed to enable pci device\n");
1086                return ret;
1087        }
1088
1089        pci_set_master(pdev);
1090        pci_set_drvdata(pdev, rtwdev->hw);
1091        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1092
1093        return 0;
1094}
1095
1096static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1097{
1098        pci_clear_master(pdev);
1099        pci_disable_device(pdev);
1100}
1101
1102static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1103{
1104        struct rtw_pci *rtwpci;
1105        int ret;
1106
1107        rtwpci = (struct rtw_pci *)rtwdev->priv;
1108        rtwpci->pdev = pdev;
1109
1110        /* after this driver can access to hw registers */
1111        ret = rtw_pci_io_mapping(rtwdev, pdev);
1112        if (ret) {
1113                rtw_err(rtwdev, "failed to request pci io region\n");
1114                goto err_out;
1115        }
1116
1117        ret = rtw_pci_init(rtwdev);
1118        if (ret) {
1119                rtw_err(rtwdev, "failed to allocate pci resources\n");
1120                goto err_io_unmap;
1121        }
1122
1123        rtw_pci_phy_cfg(rtwdev);
1124
1125        return 0;
1126
1127err_io_unmap:
1128        rtw_pci_io_unmapping(rtwdev, pdev);
1129
1130err_out:
1131        return ret;
1132}
1133
1134static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1135{
1136        rtw_pci_deinit(rtwdev);
1137        rtw_pci_io_unmapping(rtwdev, pdev);
1138}
1139
1140static struct rtw_hci_ops rtw_pci_ops = {
1141        .tx = rtw_pci_tx,
1142        .setup = rtw_pci_setup,
1143        .start = rtw_pci_start,
1144        .stop = rtw_pci_stop,
1145
1146        .read8 = rtw_pci_read8,
1147        .read16 = rtw_pci_read16,
1148        .read32 = rtw_pci_read32,
1149        .write8 = rtw_pci_write8,
1150        .write16 = rtw_pci_write16,
1151        .write32 = rtw_pci_write32,
1152        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1153        .write_data_h2c = rtw_pci_write_data_h2c,
1154};
1155
1156static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1157{
1158        unsigned int flags = PCI_IRQ_LEGACY;
1159        int ret;
1160
1161        if (!rtw_disable_msi)
1162                flags |= PCI_IRQ_MSI;
1163
1164        ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1165        if (ret < 0) {
1166                rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1167                return ret;
1168        }
1169
1170        ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1171                                        rtw_pci_interrupt_handler,
1172                                        rtw_pci_interrupt_threadfn,
1173                                        IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1174        if (ret) {
1175                rtw_err(rtwdev, "failed to request irq %d\n", ret);
1176                pci_free_irq_vectors(pdev);
1177        }
1178
1179        return ret;
1180}
1181
1182static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1183{
1184        devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1185        pci_free_irq_vectors(pdev);
1186}
1187
1188static int rtw_pci_probe(struct pci_dev *pdev,
1189                         const struct pci_device_id *id)
1190{
1191        struct ieee80211_hw *hw;
1192        struct rtw_dev *rtwdev;
1193        int drv_data_size;
1194        int ret;
1195
1196        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1197        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1198        if (!hw) {
1199                dev_err(&pdev->dev, "failed to allocate hw\n");
1200                return -ENOMEM;
1201        }
1202
1203        rtwdev = hw->priv;
1204        rtwdev->hw = hw;
1205        rtwdev->dev = &pdev->dev;
1206        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1207        rtwdev->hci.ops = &rtw_pci_ops;
1208        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1209
1210        ret = rtw_core_init(rtwdev);
1211        if (ret)
1212                goto err_release_hw;
1213
1214        rtw_dbg(rtwdev, RTW_DBG_PCI,
1215                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1216                pdev->vendor, pdev->device, pdev->revision);
1217
1218        ret = rtw_pci_claim(rtwdev, pdev);
1219        if (ret) {
1220                rtw_err(rtwdev, "failed to claim pci device\n");
1221                goto err_deinit_core;
1222        }
1223
1224        ret = rtw_pci_setup_resource(rtwdev, pdev);
1225        if (ret) {
1226                rtw_err(rtwdev, "failed to setup pci resources\n");
1227                goto err_pci_declaim;
1228        }
1229
1230        ret = rtw_chip_info_setup(rtwdev);
1231        if (ret) {
1232                rtw_err(rtwdev, "failed to setup chip information\n");
1233                goto err_destroy_pci;
1234        }
1235
1236        ret = rtw_register_hw(rtwdev, hw);
1237        if (ret) {
1238                rtw_err(rtwdev, "failed to register hw\n");
1239                goto err_destroy_pci;
1240        }
1241
1242        ret = rtw_pci_request_irq(rtwdev, pdev);
1243        if (ret) {
1244                ieee80211_unregister_hw(hw);
1245                goto err_destroy_pci;
1246        }
1247
1248        return 0;
1249
1250err_destroy_pci:
1251        rtw_pci_destroy(rtwdev, pdev);
1252
1253err_pci_declaim:
1254        rtw_pci_declaim(rtwdev, pdev);
1255
1256err_deinit_core:
1257        rtw_core_deinit(rtwdev);
1258
1259err_release_hw:
1260        ieee80211_free_hw(hw);
1261
1262        return ret;
1263}
1264
1265static void rtw_pci_remove(struct pci_dev *pdev)
1266{
1267        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1268        struct rtw_dev *rtwdev;
1269        struct rtw_pci *rtwpci;
1270
1271        if (!hw)
1272                return;
1273
1274        rtwdev = hw->priv;
1275        rtwpci = (struct rtw_pci *)rtwdev->priv;
1276
1277        rtw_unregister_hw(rtwdev, hw);
1278        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1279        rtw_pci_destroy(rtwdev, pdev);
1280        rtw_pci_declaim(rtwdev, pdev);
1281        rtw_pci_free_irq(rtwdev, pdev);
1282        rtw_core_deinit(rtwdev);
1283        ieee80211_free_hw(hw);
1284}
1285
1286static const struct pci_device_id rtw_pci_id_table[] = {
1287#ifdef CONFIG_RTW88_8822BE
1288        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1289#endif
1290#ifdef CONFIG_RTW88_8822CE
1291        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1292#endif
1293        {},
1294};
1295MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1296
1297static struct pci_driver rtw_pci_driver = {
1298        .name = "rtw_pci",
1299        .id_table = rtw_pci_id_table,
1300        .probe = rtw_pci_probe,
1301        .remove = rtw_pci_remove,
1302};
1303module_pci_driver(rtw_pci_driver);
1304
1305MODULE_AUTHOR("Realtek Corporation");
1306MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1307MODULE_LICENSE("Dual BSD/GPL");
1308