linux/drivers/net/wireless/realtek/rtw89/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2020  Realtek Corporation
   3 */
   4
   5#include <linux/pci.h>
   6
   7#include "mac.h"
   8#include "pci.h"
   9#include "reg.h"
  10#include "ser.h"
  11
  12static bool rtw89_pci_disable_clkreq;
  13static bool rtw89_pci_disable_aspm_l1;
  14static bool rtw89_pci_disable_l1ss;
  15module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
  16module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
  17module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
  18MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
  19MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
  20MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
  21
  22static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
  23{
  24        u32 val;
  25        int ret;
  26
  27        rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
  28                      rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
  29
  30        ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
  31                                       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
  32                                       rtwdev, R_AX_PCIE_INIT_CFG1);
  33
  34        if (ret)
  35                return -EBUSY;
  36
  37        return 0;
  38}
  39
  40static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
  41                                struct rtw89_pci_dma_ring *bd_ring,
  42                                u32 cur_idx, bool tx)
  43{
  44        u32 cnt, cur_rp, wp, rp, len;
  45
  46        rp = bd_ring->rp;
  47        wp = bd_ring->wp;
  48        len = bd_ring->len;
  49
  50        cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
  51        if (tx)
  52                cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
  53        else
  54                cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
  55
  56        bd_ring->rp = cur_rp;
  57
  58        return cnt;
  59}
  60
  61static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
  62                                 struct rtw89_pci_tx_ring *tx_ring)
  63{
  64        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
  65        u32 addr_idx = bd_ring->addr.idx;
  66        u32 cnt, idx;
  67
  68        idx = rtw89_read32(rtwdev, addr_idx);
  69        cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
  70
  71        return cnt;
  72}
  73
  74static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
  75                                    struct rtw89_pci *rtwpci,
  76                                    u32 cnt, bool release_all)
  77{
  78        struct rtw89_pci_tx_data *tx_data;
  79        struct sk_buff *skb;
  80        u32 qlen;
  81
  82        while (cnt--) {
  83                skb = skb_dequeue(&rtwpci->h2c_queue);
  84                if (!skb) {
  85                        rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
  86                        return;
  87                }
  88                skb_queue_tail(&rtwpci->h2c_release_queue, skb);
  89        }
  90
  91        qlen = skb_queue_len(&rtwpci->h2c_release_queue);
  92        if (!release_all)
  93               qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
  94
  95        while (qlen--) {
  96                skb = skb_dequeue(&rtwpci->h2c_release_queue);
  97                if (!skb) {
  98                        rtw89_err(rtwdev, "failed to release fwcmd\n");
  99                        return;
 100                }
 101                tx_data = RTW89_PCI_TX_SKB_CB(skb);
 102                dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
 103                                 DMA_TO_DEVICE);
 104                dev_kfree_skb_any(skb);
 105        }
 106}
 107
 108static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
 109                                       struct rtw89_pci *rtwpci)
 110{
 111        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
 112        u32 cnt;
 113
 114        cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
 115        if (!cnt)
 116                return;
 117        rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
 118}
 119
 120static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
 121                                 struct rtw89_pci_rx_ring *rx_ring)
 122{
 123        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
 124        u32 addr_idx = bd_ring->addr.idx;
 125        u32 cnt, idx;
 126
 127        idx = rtw89_read32(rtwdev, addr_idx);
 128        cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
 129
 130        return cnt;
 131}
 132
 133static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
 134                                       struct sk_buff *skb)
 135{
 136        struct rtw89_pci_rx_info *rx_info;
 137        dma_addr_t dma;
 138
 139        rx_info = RTW89_PCI_RX_SKB_CB(skb);
 140        dma = rx_info->dma;
 141        dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
 142                                DMA_FROM_DEVICE);
 143}
 144
 145static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
 146                                          struct sk_buff *skb)
 147{
 148        struct rtw89_pci_rx_info *rx_info;
 149        dma_addr_t dma;
 150
 151        rx_info = RTW89_PCI_RX_SKB_CB(skb);
 152        dma = rx_info->dma;
 153        dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
 154                                   DMA_FROM_DEVICE);
 155}
 156
 157static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
 158                                      struct sk_buff *skb)
 159{
 160        struct rtw89_pci_rxbd_info *rxbd_info;
 161        struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
 162
 163        rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
 164        rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
 165        rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
 166        rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
 167        rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
 168
 169        return 0;
 170}
 171
 172static bool
 173rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
 174                      struct sk_buff *new,
 175                      const struct sk_buff *skb, u32 offset,
 176                      const struct rtw89_pci_rx_info *rx_info,
 177                      const struct rtw89_rx_desc_info *desc_info)
 178{
 179        u32 copy_len = rx_info->len - offset;
 180
 181        if (unlikely(skb_tailroom(new) < copy_len)) {
 182                rtw89_debug(rtwdev, RTW89_DBG_TXRX,
 183                            "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
 184                            rx_info->len, desc_info->pkt_size, offset, fs, ls);
 185                rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
 186                               skb->data, rx_info->len);
 187                /* length of a single segment skb is desc_info->pkt_size */
 188                if (fs && ls) {
 189                        copy_len = desc_info->pkt_size;
 190                } else {
 191                        rtw89_info(rtwdev, "drop rx data due to invalid length\n");
 192                        return false;
 193                }
 194        }
 195
 196        skb_put_data(new, skb->data + offset, copy_len);
 197
 198        return true;
 199}
 200
 201static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
 202                                       struct rtw89_pci_rx_ring *rx_ring)
 203{
 204        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
 205        struct rtw89_pci_rx_info *rx_info;
 206        struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
 207        struct sk_buff *new = rx_ring->diliver_skb;
 208        struct sk_buff *skb;
 209        u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
 210        u32 offset;
 211        u32 cnt = 1;
 212        bool fs, ls;
 213        int ret;
 214
 215        skb = rx_ring->buf[bd_ring->wp];
 216        rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
 217
 218        ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
 219        if (ret) {
 220                rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
 221                          bd_ring->wp, ret);
 222                goto err_sync_device;
 223        }
 224
 225        rx_info = RTW89_PCI_RX_SKB_CB(skb);
 226        fs = rx_info->fs;
 227        ls = rx_info->ls;
 228
 229        if (fs) {
 230                if (new) {
 231                        rtw89_err(rtwdev, "skb should not be ready before first segment start\n");
 232                        goto err_sync_device;
 233                }
 234                if (desc_info->ready) {
 235                        rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
 236                        goto err_sync_device;
 237                }
 238
 239                rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
 240
 241                new = dev_alloc_skb(desc_info->pkt_size);
 242                if (!new)
 243                        goto err_sync_device;
 244
 245                rx_ring->diliver_skb = new;
 246
 247                /* first segment has RX desc */
 248                offset = desc_info->offset;
 249                offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
 250                          sizeof(struct rtw89_rxdesc_short);
 251        } else {
 252                offset = sizeof(struct rtw89_pci_rxbd_info);
 253                if (!new) {
 254                        rtw89_warn(rtwdev, "no last skb\n");
 255                        goto err_sync_device;
 256                }
 257        }
 258        if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
 259                goto err_sync_device;
 260        rtw89_pci_sync_skb_for_device(rtwdev, skb);
 261        rtw89_pci_rxbd_increase(rx_ring, 1);
 262
 263        if (!desc_info->ready) {
 264                rtw89_warn(rtwdev, "no rx desc information\n");
 265                goto err_free_resource;
 266        }
 267        if (ls) {
 268                rtw89_core_rx(rtwdev, desc_info, new);
 269                rx_ring->diliver_skb = NULL;
 270                desc_info->ready = false;
 271        }
 272
 273        return cnt;
 274
 275err_sync_device:
 276        rtw89_pci_sync_skb_for_device(rtwdev, skb);
 277        rtw89_pci_rxbd_increase(rx_ring, 1);
 278err_free_resource:
 279        if (new)
 280                dev_kfree_skb_any(new);
 281        rx_ring->diliver_skb = NULL;
 282        desc_info->ready = false;
 283
 284        return cnt;
 285}
 286
 287static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
 288                                   struct rtw89_pci_rx_ring *rx_ring,
 289                                   u32 cnt)
 290{
 291        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
 292        u32 rx_cnt;
 293
 294        while (cnt && rtwdev->napi_budget_countdown > 0) {
 295                rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
 296                if (!rx_cnt) {
 297                        rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
 298
 299                        /* skip the rest RXBD bufs */
 300                        rtw89_pci_rxbd_increase(rx_ring, cnt);
 301                        break;
 302                }
 303
 304                cnt -= rx_cnt;
 305        }
 306
 307        rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
 308}
 309
 310static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
 311                                  struct rtw89_pci *rtwpci, int budget)
 312{
 313        struct rtw89_pci_rx_ring *rx_ring;
 314        int countdown = rtwdev->napi_budget_countdown;
 315        u32 cnt;
 316
 317        rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
 318
 319        cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
 320        if (!cnt)
 321                return 0;
 322
 323        cnt = min_t(u32, budget, cnt);
 324
 325        rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
 326
 327        /* In case of flushing pending SKBs, the countdown may exceed. */
 328        if (rtwdev->napi_budget_countdown <= 0)
 329                return budget;
 330
 331        return budget - countdown;
 332}
 333
 334static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
 335                                struct rtw89_pci_tx_ring *tx_ring,
 336                                struct sk_buff *skb, u8 tx_status)
 337{
 338        struct ieee80211_tx_info *info;
 339
 340        info = IEEE80211_SKB_CB(skb);
 341        ieee80211_tx_info_clear_status(info);
 342
 343        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 344                info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 345        if (tx_status == RTW89_TX_DONE) {
 346                info->flags |= IEEE80211_TX_STAT_ACK;
 347                tx_ring->tx_acked++;
 348        } else {
 349                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
 350                        rtw89_debug(rtwdev, RTW89_DBG_FW,
 351                                    "failed to TX of status %x\n", tx_status);
 352                switch (tx_status) {
 353                case RTW89_TX_RETRY_LIMIT:
 354                        tx_ring->tx_retry_lmt++;
 355                        break;
 356                case RTW89_TX_LIFE_TIME:
 357                        tx_ring->tx_life_time++;
 358                        break;
 359                case RTW89_TX_MACID_DROP:
 360                        tx_ring->tx_mac_id_drop++;
 361                        break;
 362                default:
 363                        rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
 364                        break;
 365                }
 366        }
 367
 368        ieee80211_tx_status_ni(rtwdev->hw, skb);
 369}
 370
 371static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
 372{
 373        struct rtw89_pci_tx_wd *txwd;
 374        u32 cnt;
 375
 376        cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
 377        while (cnt--) {
 378                txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
 379                if (!txwd) {
 380                        rtw89_warn(rtwdev, "No busy txwd pages available\n");
 381                        break;
 382                }
 383
 384                list_del_init(&txwd->list);
 385
 386                /* this skb has been freed by RPP */
 387                if (skb_queue_len(&txwd->queue) == 0)
 388                        rtw89_pci_enqueue_txwd(tx_ring, txwd);
 389        }
 390}
 391
 392static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
 393                                        struct rtw89_pci_tx_ring *tx_ring)
 394{
 395        struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
 396        struct rtw89_pci_tx_wd *txwd;
 397        int i;
 398
 399        for (i = 0; i < wd_ring->page_num; i++) {
 400                txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
 401                if (!txwd)
 402                        break;
 403
 404                list_del_init(&txwd->list);
 405        }
 406}
 407
 408static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
 409                                       struct rtw89_pci_tx_ring *tx_ring,
 410                                       struct rtw89_pci_tx_wd *txwd, u16 seq,
 411                                       u8 tx_status)
 412{
 413        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 414        struct rtw89_pci_tx_data *tx_data;
 415        struct sk_buff *skb, *tmp;
 416        u8 txch = tx_ring->txch;
 417
 418        if (!list_empty(&txwd->list)) {
 419                rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
 420                /* In low power mode, RPP can receive before updating of TX BD.
 421                 * In normal mode, it should not happen so give it a warning.
 422                 */
 423                if (!rtwpci->low_power && !list_empty(&txwd->list))
 424                        rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
 425                                   txch, seq);
 426        }
 427
 428        skb_queue_walk_safe(&txwd->queue, skb, tmp) {
 429                skb_unlink(skb, &txwd->queue);
 430
 431                tx_data = RTW89_PCI_TX_SKB_CB(skb);
 432                dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
 433                                 DMA_TO_DEVICE);
 434
 435                rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
 436        }
 437
 438        if (list_empty(&txwd->list))
 439                rtw89_pci_enqueue_txwd(tx_ring, txwd);
 440}
 441
 442static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
 443                                  struct rtw89_pci_rpp_fmt *rpp)
 444{
 445        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 446        struct rtw89_pci_tx_ring *tx_ring;
 447        struct rtw89_pci_tx_wd_ring *wd_ring;
 448        struct rtw89_pci_tx_wd *txwd;
 449        u16 seq;
 450        u8 qsel, tx_status, txch;
 451
 452        seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
 453        qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
 454        tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
 455        txch = rtw89_core_get_ch_dma(rtwdev, qsel);
 456
 457        if (txch == RTW89_TXCH_CH12) {
 458                rtw89_warn(rtwdev, "should no fwcmd release report\n");
 459                return;
 460        }
 461
 462        tx_ring = &rtwpci->tx_rings[txch];
 463        wd_ring = &tx_ring->wd_ring;
 464        txwd = &wd_ring->pages[seq];
 465
 466        rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
 467}
 468
 469static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
 470                                               struct rtw89_pci_tx_ring *tx_ring)
 471{
 472        struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
 473        struct rtw89_pci_tx_wd *txwd;
 474        int i;
 475
 476        for (i = 0; i < wd_ring->page_num; i++) {
 477                txwd = &wd_ring->pages[i];
 478
 479                if (!list_empty(&txwd->list))
 480                        continue;
 481
 482                rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
 483        }
 484}
 485
 486static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
 487                                     struct rtw89_pci_rx_ring *rx_ring,
 488                                     u32 max_cnt)
 489{
 490        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
 491        struct rtw89_pci_rx_info *rx_info;
 492        struct rtw89_pci_rpp_fmt *rpp;
 493        struct rtw89_rx_desc_info desc_info = {};
 494        struct sk_buff *skb;
 495        u32 cnt = 0;
 496        u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
 497        u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
 498        u32 offset;
 499        int ret;
 500
 501        skb = rx_ring->buf[bd_ring->wp];
 502        rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
 503
 504        ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
 505        if (ret) {
 506                rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
 507                          bd_ring->wp, ret);
 508                goto err_sync_device;
 509        }
 510
 511        rx_info = RTW89_PCI_RX_SKB_CB(skb);
 512        if (!rx_info->fs || !rx_info->ls) {
 513                rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
 514                return cnt;
 515        }
 516
 517        rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
 518
 519        /* first segment has RX desc */
 520        offset = desc_info.offset;
 521        offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
 522                                          sizeof(struct rtw89_rxdesc_short);
 523        for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
 524                rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
 525                rtw89_pci_release_rpp(rtwdev, rpp);
 526        }
 527
 528        rtw89_pci_sync_skb_for_device(rtwdev, skb);
 529        rtw89_pci_rxbd_increase(rx_ring, 1);
 530        cnt++;
 531
 532        return cnt;
 533
 534err_sync_device:
 535        rtw89_pci_sync_skb_for_device(rtwdev, skb);
 536        return 0;
 537}
 538
 539static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
 540                                 struct rtw89_pci_rx_ring *rx_ring,
 541                                 u32 cnt)
 542{
 543        struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
 544        u32 release_cnt;
 545
 546        while (cnt) {
 547                release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
 548                if (!release_cnt) {
 549                        rtw89_err(rtwdev, "failed to release TX skbs\n");
 550
 551                        /* skip the rest RXBD bufs */
 552                        rtw89_pci_rxbd_increase(rx_ring, cnt);
 553                        break;
 554                }
 555
 556                cnt -= release_cnt;
 557        }
 558
 559        rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
 560}
 561
 562static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
 563                                  struct rtw89_pci *rtwpci, int budget)
 564{
 565        struct rtw89_pci_rx_ring *rx_ring;
 566        u32 cnt;
 567        int work_done;
 568
 569        rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
 570
 571        spin_lock_bh(&rtwpci->trx_lock);
 572
 573        cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
 574        if (cnt == 0)
 575                goto out_unlock;
 576
 577        rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
 578
 579out_unlock:
 580        spin_unlock_bh(&rtwpci->trx_lock);
 581
 582        /* always release all RPQ */
 583        work_done = min_t(int, cnt, budget);
 584        rtwdev->napi_budget_countdown -= work_done;
 585
 586        return work_done;
 587}
 588
 589static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
 590                                      struct rtw89_pci *rtwpci)
 591{
 592        struct rtw89_pci_rx_ring *rx_ring;
 593        struct rtw89_pci_dma_ring *bd_ring;
 594        u32 reg_idx;
 595        u16 hw_idx, hw_idx_next, host_idx;
 596        int i;
 597
 598        for (i = 0; i < RTW89_RXCH_NUM; i++) {
 599                rx_ring = &rtwpci->rx_rings[i];
 600                bd_ring = &rx_ring->bd_ring;
 601
 602                reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
 603                hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
 604                host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
 605                hw_idx_next = (hw_idx + 1) % bd_ring->len;
 606
 607                if (hw_idx_next == host_idx)
 608                        rtw89_warn(rtwdev, "%d RXD unavailable\n", i);
 609
 610                rtw89_debug(rtwdev, RTW89_DBG_TXRX,
 611                            "%d RXD unavailable, idx=0x%08x, len=%d\n",
 612                            i, reg_idx, bd_ring->len);
 613        }
 614}
 615
 616void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
 617                               struct rtw89_pci *rtwpci,
 618                               struct rtw89_pci_isrs *isrs)
 619{
 620        isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
 621        isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
 622        isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
 623
 624        rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
 625        rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
 626        rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
 627}
 628EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
 629
 630void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
 631                                  struct rtw89_pci *rtwpci,
 632                                  struct rtw89_pci_isrs *isrs)
 633{
 634        isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
 635        isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
 636                              rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
 637        isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
 638                        rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
 639        isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
 640                        rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
 641
 642        if (isrs->halt_c2h_isrs)
 643                rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
 644        if (isrs->isrs[0])
 645                rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
 646        if (isrs->isrs[1])
 647                rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
 648}
 649EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
 650
 651static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
 652{
 653        /* write 1 clear */
 654        rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
 655}
 656
 657void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
 658{
 659        rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
 660        rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
 661        rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
 662}
 663EXPORT_SYMBOL(rtw89_pci_enable_intr);
 664
 665void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
 666{
 667        rtw89_write32(rtwdev, R_AX_HIMR0, 0);
 668        rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
 669        rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
 670}
 671EXPORT_SYMBOL(rtw89_pci_disable_intr);
 672
 673void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
 674{
 675        rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
 676        rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
 677        rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
 678        rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
 679}
 680EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
 681
 682void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
 683{
 684        rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
 685}
 686EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
 687
 688static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
 689{
 690        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 691        unsigned long flags;
 692
 693        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 694        rtw89_chip_disable_intr(rtwdev, rtwpci);
 695        rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
 696        rtw89_chip_enable_intr(rtwdev, rtwpci);
 697        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 698}
 699
 700static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
 701{
 702        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 703        unsigned long flags;
 704
 705        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 706        rtw89_chip_disable_intr(rtwdev, rtwpci);
 707        rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
 708        rtw89_chip_enable_intr(rtwdev, rtwpci);
 709        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 710}
 711
 712static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
 713{
 714        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 715        int budget = NAPI_POLL_WEIGHT;
 716
 717        /* To prevent RXQ get stuck due to run out of budget. */
 718        rtwdev->napi_budget_countdown = budget;
 719
 720        rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
 721        rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
 722}
 723
 724static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
 725{
 726        struct rtw89_dev *rtwdev = dev;
 727        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 728        struct rtw89_pci_isrs isrs;
 729        unsigned long flags;
 730
 731        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 732        rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
 733        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 734
 735        if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
 736                rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
 737
 738        if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
 739                rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
 740
 741        if (unlikely(rtwpci->under_recovery))
 742                goto enable_intr;
 743
 744        if (unlikely(rtwpci->low_power)) {
 745                rtw89_pci_low_power_interrupt_handler(rtwdev);
 746                goto enable_intr;
 747        }
 748
 749        if (likely(rtwpci->running)) {
 750                local_bh_disable();
 751                napi_schedule(&rtwdev->napi);
 752                local_bh_enable();
 753        }
 754
 755        return IRQ_HANDLED;
 756
 757enable_intr:
 758        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 759        rtw89_chip_enable_intr(rtwdev, rtwpci);
 760        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 761        return IRQ_HANDLED;
 762}
 763
 764static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
 765{
 766        struct rtw89_dev *rtwdev = dev;
 767        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 768        unsigned long flags;
 769        irqreturn_t irqret = IRQ_WAKE_THREAD;
 770
 771        spin_lock_irqsave(&rtwpci->irq_lock, flags);
 772
 773        /* If interrupt event is on the road, it is still trigger interrupt
 774         * even we have done pci_stop() to turn off IMR.
 775         */
 776        if (unlikely(!rtwpci->running)) {
 777                irqret = IRQ_HANDLED;
 778                goto exit;
 779        }
 780
 781        rtw89_chip_disable_intr(rtwdev, rtwpci);
 782exit:
 783        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
 784
 785        return irqret;
 786}
 787
 788#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
 789        [RTW89_TXCH_##txch] = { \
 790                .num = R_AX_##txch##_TXBD_NUM ##v, \
 791                .idx = R_AX_##txch##_TXBD_IDX ##v, \
 792                .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
 793                .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
 794                .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
 795        }
 796
 797#define DEF_TXCHADDRS(info, txch, v...) \
 798        [RTW89_TXCH_##txch] = { \
 799                .num = R_AX_##txch##_TXBD_NUM, \
 800                .idx = R_AX_##txch##_TXBD_IDX, \
 801                .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
 802                .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
 803                .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
 804        }
 805
 806#define DEF_RXCHADDRS(info, rxch, v...) \
 807        [RTW89_RXCH_##rxch] = { \
 808                .num = R_AX_##rxch##_RXBD_NUM ##v, \
 809                .idx = R_AX_##rxch##_RXBD_IDX ##v, \
 810                .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
 811                .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
 812        }
 813
 814const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
 815        .tx = {
 816                DEF_TXCHADDRS(info, ACH0),
 817                DEF_TXCHADDRS(info, ACH1),
 818                DEF_TXCHADDRS(info, ACH2),
 819                DEF_TXCHADDRS(info, ACH3),
 820                DEF_TXCHADDRS(info, ACH4),
 821                DEF_TXCHADDRS(info, ACH5),
 822                DEF_TXCHADDRS(info, ACH6),
 823                DEF_TXCHADDRS(info, ACH7),
 824                DEF_TXCHADDRS(info, CH8),
 825                DEF_TXCHADDRS(info, CH9),
 826                DEF_TXCHADDRS_TYPE1(info, CH10),
 827                DEF_TXCHADDRS_TYPE1(info, CH11),
 828                DEF_TXCHADDRS(info, CH12),
 829        },
 830        .rx = {
 831                DEF_RXCHADDRS(info, RXQ),
 832                DEF_RXCHADDRS(info, RPQ),
 833        },
 834};
 835EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
 836
 837const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
 838        .tx = {
 839                DEF_TXCHADDRS(info, ACH0, _V1),
 840                DEF_TXCHADDRS(info, ACH1, _V1),
 841                DEF_TXCHADDRS(info, ACH2, _V1),
 842                DEF_TXCHADDRS(info, ACH3, _V1),
 843                DEF_TXCHADDRS(info, ACH4, _V1),
 844                DEF_TXCHADDRS(info, ACH5, _V1),
 845                DEF_TXCHADDRS(info, ACH6, _V1),
 846                DEF_TXCHADDRS(info, ACH7, _V1),
 847                DEF_TXCHADDRS(info, CH8, _V1),
 848                DEF_TXCHADDRS(info, CH9, _V1),
 849                DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
 850                DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
 851                DEF_TXCHADDRS(info, CH12, _V1),
 852        },
 853        .rx = {
 854                DEF_RXCHADDRS(info, RXQ, _V1),
 855                DEF_RXCHADDRS(info, RPQ, _V1),
 856        },
 857};
 858EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
 859
 860#undef DEF_TXCHADDRS_TYPE1
 861#undef DEF_TXCHADDRS
 862#undef DEF_RXCHADDRS
 863
 864static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
 865                                    enum rtw89_tx_channel txch,
 866                                    const struct rtw89_pci_ch_dma_addr **addr)
 867{
 868        const struct rtw89_pci_info *info = rtwdev->pci_info;
 869
 870        if (txch >= RTW89_TXCH_NUM)
 871                return -EINVAL;
 872
 873        *addr = &info->dma_addr_set->tx[txch];
 874
 875        return 0;
 876}
 877
 878static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
 879                                    enum rtw89_rx_channel rxch,
 880                                    const struct rtw89_pci_ch_dma_addr **addr)
 881{
 882        const struct rtw89_pci_info *info = rtwdev->pci_info;
 883
 884        if (rxch >= RTW89_RXCH_NUM)
 885                return -EINVAL;
 886
 887        *addr = &info->dma_addr_set->rx[rxch];
 888
 889        return 0;
 890}
 891
 892static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
 893{
 894        struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
 895
 896        /* reserved 1 desc check ring is full or not */
 897        if (bd_ring->rp > bd_ring->wp)
 898                return bd_ring->rp - bd_ring->wp - 1;
 899
 900        return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
 901}
 902
 903static
 904u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
 905{
 906        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 907        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
 908        u32 cnt;
 909
 910        spin_lock_bh(&rtwpci->trx_lock);
 911        rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
 912        cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
 913        spin_unlock_bh(&rtwpci->trx_lock);
 914
 915        return cnt;
 916}
 917
 918static
 919u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
 920                                                   u8 txch)
 921{
 922        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 923        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
 924        u32 cnt;
 925
 926        spin_lock_bh(&rtwpci->trx_lock);
 927        cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
 928        spin_unlock_bh(&rtwpci->trx_lock);
 929
 930        return cnt;
 931}
 932
 933static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
 934                                                     u8 txch)
 935{
 936        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 937        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
 938        struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
 939        u32 bd_cnt, wd_cnt, min_cnt = 0;
 940        struct rtw89_pci_rx_ring *rx_ring;
 941        u32 cnt;
 942
 943        rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
 944
 945        spin_lock_bh(&rtwpci->trx_lock);
 946        bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
 947        wd_cnt = wd_ring->curr_num;
 948
 949        if (wd_cnt == 0 || bd_cnt == 0) {
 950                cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
 951                if (!cnt)
 952                        goto out_unlock;
 953                rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
 954
 955                bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
 956                if (bd_cnt == 0)
 957                        rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
 958        }
 959
 960        bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
 961        wd_cnt = wd_ring->curr_num;
 962        min_cnt = min(bd_cnt, wd_cnt);
 963        if (min_cnt == 0)
 964                rtw89_warn(rtwdev, "still no tx resource after reclaim\n");
 965
 966out_unlock:
 967        spin_unlock_bh(&rtwpci->trx_lock);
 968
 969        return min_cnt;
 970}
 971
 972static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
 973                                                   u8 txch)
 974{
 975        if (rtwdev->hci.paused)
 976                return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
 977
 978        if (txch == RTW89_TXCH_CH12)
 979                return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
 980
 981        return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
 982}
 983
 984static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
 985{
 986        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
 987        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
 988        u32 host_idx, addr;
 989
 990        spin_lock_bh(&rtwpci->trx_lock);
 991
 992        addr = bd_ring->addr.idx;
 993        host_idx = bd_ring->wp;
 994        rtw89_write16(rtwdev, addr, host_idx);
 995
 996        spin_unlock_bh(&rtwpci->trx_lock);
 997}
 998
 999static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1000                                        int n_txbd)
1001{
1002        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1003        u32 host_idx, len;
1004
1005        len = bd_ring->len;
1006        host_idx = bd_ring->wp + n_txbd;
1007        host_idx = host_idx < len ? host_idx : host_idx - len;
1008
1009        bd_ring->wp = host_idx;
1010}
1011
1012static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1013{
1014        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1015        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1016
1017        if (rtwdev->hci.paused) {
1018                set_bit(txch, rtwpci->kick_map);
1019                return;
1020        }
1021
1022        __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1023}
1024
1025static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1026{
1027        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1028        struct rtw89_pci_tx_ring *tx_ring;
1029        int txch;
1030
1031        for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1032                if (!test_and_clear_bit(txch, rtwpci->kick_map))
1033                        continue;
1034
1035                tx_ring = &rtwpci->tx_rings[txch];
1036                __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1037        }
1038}
1039
1040static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1041{
1042        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1043        struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1044        struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1045        u32 cur_idx, cur_rp;
1046        u8 i;
1047
1048        /* Because the time taked by the I/O is a bit dynamic, it's hard to
1049         * define a reasonable fixed total timeout to use read_poll_timeout*
1050         * helper. Instead, we can ensure a reasonable polling times, so we
1051         * just use for loop with udelay here.
1052         */
1053        for (i = 0; i < 60; i++) {
1054                cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1055                cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1056                if (cur_rp == bd_ring->wp)
1057                        return;
1058
1059                udelay(1);
1060        }
1061
1062        if (!drop)
1063                rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1064}
1065
1066static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1067                                        bool drop)
1068{
1069        u8 i;
1070
1071        for (i = 0; i < RTW89_TXCH_NUM; i++) {
1072                /* It may be unnecessary to flush FWCMD queue. */
1073                if (i == RTW89_TXCH_CH12)
1074                        continue;
1075
1076                if (txchs & BIT(i))
1077                        __pci_flush_txch(rtwdev, i, drop);
1078        }
1079}
1080
1081static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1082                                       bool drop)
1083{
1084        __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1085}
1086
1087u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1088                               void *txaddr_info_addr, u32 total_len,
1089                               dma_addr_t dma, u8 *add_info_nr)
1090{
1091        struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1092
1093        txaddr_info->length = cpu_to_le16(total_len);
1094        txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1095                                          RTW89_PCI_ADDR_NUM(1));
1096        txaddr_info->dma = cpu_to_le32(dma);
1097
1098        *add_info_nr = 1;
1099
1100        return sizeof(*txaddr_info);
1101}
1102EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1103
1104u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1105                                  void *txaddr_info_addr, u32 total_len,
1106                                  dma_addr_t dma, u8 *add_info_nr)
1107{
1108        struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1109        u32 remain = total_len;
1110        u32 len;
1111        u16 length_option;
1112        int n;
1113
1114        for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1115                len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1116                      TXADDR_INFO_LENTHG_V1_MAX : remain;
1117                remain -= len;
1118
1119                length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1120                                FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1121                                FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1122                txaddr_info->length_opt = cpu_to_le16(length_option);
1123                txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1124                txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1125
1126                dma += len;
1127                txaddr_info++;
1128        }
1129
1130        WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1131                  remain, total_len);
1132
1133        *add_info_nr = n;
1134
1135        return n * sizeof(*txaddr_info);
1136}
1137EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1138
1139static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1140                                 struct rtw89_pci_tx_ring *tx_ring,
1141                                 struct rtw89_pci_tx_wd *txwd,
1142                                 struct rtw89_core_tx_request *tx_req)
1143{
1144        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1145        const struct rtw89_chip_info *chip = rtwdev->chip;
1146        struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1147        struct rtw89_txwd_info *txwd_info;
1148        struct rtw89_pci_tx_wp_info *txwp_info;
1149        void *txaddr_info_addr;
1150        struct pci_dev *pdev = rtwpci->pdev;
1151        struct sk_buff *skb = tx_req->skb;
1152        struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1153        bool en_wd_info = desc_info->en_wd_info;
1154        u32 txwd_len;
1155        u32 txwp_len;
1156        u32 txaddr_info_len;
1157        dma_addr_t dma;
1158        int ret;
1159
1160        dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1161        if (dma_mapping_error(&pdev->dev, dma)) {
1162                rtw89_err(rtwdev, "failed to map skb dma data\n");
1163                ret = -EBUSY;
1164                goto err;
1165        }
1166
1167        tx_data->dma = dma;
1168
1169        txwp_len = sizeof(*txwp_info);
1170        txwd_len = chip->txwd_body_size;
1171        txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
1172
1173        txwp_info = txwd->vaddr + txwd_len;
1174        txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1175        txwp_info->seq1 = 0;
1176        txwp_info->seq2 = 0;
1177        txwp_info->seq3 = 0;
1178
1179        tx_ring->tx_cnt++;
1180        txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1181        txaddr_info_len =
1182                rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1183                                            dma, &desc_info->addr_info_nr);
1184
1185        txwd->len = txwd_len + txwp_len + txaddr_info_len;
1186
1187        rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1188
1189        skb_queue_tail(&txwd->queue, skb);
1190
1191        return 0;
1192
1193err:
1194        return ret;
1195}
1196
1197static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1198                                  struct rtw89_pci_tx_ring *tx_ring,
1199                                  struct rtw89_pci_tx_bd_32 *txbd,
1200                                  struct rtw89_core_tx_request *tx_req)
1201{
1202        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1203        const struct rtw89_chip_info *chip = rtwdev->chip;
1204        struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1205        void *txdesc;
1206        int txdesc_size = chip->h2c_desc_size;
1207        struct pci_dev *pdev = rtwpci->pdev;
1208        struct sk_buff *skb = tx_req->skb;
1209        struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1210        dma_addr_t dma;
1211
1212        txdesc = skb_push(skb, txdesc_size);
1213        memset(txdesc, 0, txdesc_size);
1214        rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1215
1216        dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1217        if (dma_mapping_error(&pdev->dev, dma)) {
1218                rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1219                return -EBUSY;
1220        }
1221
1222        tx_data->dma = dma;
1223        txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1224        txbd->length = cpu_to_le16(skb->len);
1225        txbd->dma = cpu_to_le32(tx_data->dma);
1226        skb_queue_tail(&rtwpci->h2c_queue, skb);
1227
1228        rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1229
1230        return 0;
1231}
1232
1233static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1234                                 struct rtw89_pci_tx_ring *tx_ring,
1235                                 struct rtw89_pci_tx_bd_32 *txbd,
1236                                 struct rtw89_core_tx_request *tx_req)
1237{
1238        struct rtw89_pci_tx_wd *txwd;
1239        int ret;
1240
1241        /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1242         * buffer with WD BODY only. So here we don't need to check the free
1243         * pages of the wd ring.
1244         */
1245        if (tx_ring->txch == RTW89_TXCH_CH12)
1246                return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1247
1248        txwd = rtw89_pci_dequeue_txwd(tx_ring);
1249        if (!txwd) {
1250                rtw89_err(rtwdev, "no available TXWD\n");
1251                ret = -ENOSPC;
1252                goto err;
1253        }
1254
1255        ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1256        if (ret) {
1257                rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1258                goto err_enqueue_wd;
1259        }
1260
1261        list_add_tail(&txwd->list, &tx_ring->busy_pages);
1262
1263        txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1264        txbd->length = cpu_to_le16(txwd->len);
1265        txbd->dma = cpu_to_le32(txwd->paddr);
1266
1267        rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1268
1269        return 0;
1270
1271err_enqueue_wd:
1272        rtw89_pci_enqueue_txwd(tx_ring, txwd);
1273err:
1274        return ret;
1275}
1276
1277static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1278                              u8 txch)
1279{
1280        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1281        struct rtw89_pci_tx_ring *tx_ring;
1282        struct rtw89_pci_tx_bd_32 *txbd;
1283        u32 n_avail_txbd;
1284        int ret = 0;
1285
1286        /* check the tx type and dma channel for fw cmd queue */
1287        if ((txch == RTW89_TXCH_CH12 ||
1288             tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1289            (txch != RTW89_TXCH_CH12 ||
1290             tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1291                rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1292                return -EINVAL;
1293        }
1294
1295        tx_ring = &rtwpci->tx_rings[txch];
1296        spin_lock_bh(&rtwpci->trx_lock);
1297
1298        n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1299        if (n_avail_txbd == 0) {
1300                rtw89_err(rtwdev, "no available TXBD\n");
1301                ret = -ENOSPC;
1302                goto err_unlock;
1303        }
1304
1305        txbd = rtw89_pci_get_next_txbd(tx_ring);
1306        ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1307        if (ret) {
1308                rtw89_err(rtwdev, "failed to submit TXBD\n");
1309                goto err_unlock;
1310        }
1311
1312        spin_unlock_bh(&rtwpci->trx_lock);
1313        return 0;
1314
1315err_unlock:
1316        spin_unlock_bh(&rtwpci->trx_lock);
1317        return ret;
1318}
1319
1320static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1321{
1322        struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1323        int ret;
1324
1325        ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1326        if (ret) {
1327                rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1328                return ret;
1329        }
1330
1331        return 0;
1332}
1333
1334static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
1335        [RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1336        [RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1337        [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1338        [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1339        [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1340        [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1341        [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1342        [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1343        [RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1344        [RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1345        [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1346        [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1347        [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1348};
1349
1350static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1351{
1352        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1353        struct rtw89_pci_tx_ring *tx_ring;
1354        struct rtw89_pci_rx_ring *rx_ring;
1355        struct rtw89_pci_dma_ring *bd_ring;
1356        const struct rtw89_pci_bd_ram *bd_ram;
1357        u32 addr_num;
1358        u32 addr_bdram;
1359        u32 addr_desa_l;
1360        u32 val32;
1361        int i;
1362
1363        for (i = 0; i < RTW89_TXCH_NUM; i++) {
1364                tx_ring = &rtwpci->tx_rings[i];
1365                bd_ring = &tx_ring->bd_ring;
1366                bd_ram = &bd_ram_table[i];
1367                addr_num = bd_ring->addr.num;
1368                addr_bdram = bd_ring->addr.bdram;
1369                addr_desa_l = bd_ring->addr.desa_l;
1370                bd_ring->wp = 0;
1371                bd_ring->rp = 0;
1372
1373                val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1374                        FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1375                        FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1376
1377                rtw89_write16(rtwdev, addr_num, bd_ring->len);
1378                rtw89_write32(rtwdev, addr_bdram, val32);
1379                rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1380        }
1381
1382        for (i = 0; i < RTW89_RXCH_NUM; i++) {
1383                rx_ring = &rtwpci->rx_rings[i];
1384                bd_ring = &rx_ring->bd_ring;
1385                addr_num = bd_ring->addr.num;
1386                addr_desa_l = bd_ring->addr.desa_l;
1387                bd_ring->wp = 0;
1388                bd_ring->rp = 0;
1389                rx_ring->diliver_skb = NULL;
1390                rx_ring->diliver_desc.ready = false;
1391
1392                rtw89_write16(rtwdev, addr_num, bd_ring->len);
1393                rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1394        }
1395}
1396
1397static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1398                                      struct rtw89_pci_tx_ring *tx_ring)
1399{
1400        rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1401        rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1402}
1403
1404static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1405{
1406        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1407        int txch;
1408
1409        rtw89_pci_reset_trx_rings(rtwdev);
1410
1411        spin_lock_bh(&rtwpci->trx_lock);
1412        for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1413                if (txch == RTW89_TXCH_CH12) {
1414                        rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1415                                                skb_queue_len(&rtwpci->h2c_queue), true);
1416                        continue;
1417                }
1418                rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1419        }
1420        spin_unlock_bh(&rtwpci->trx_lock);
1421}
1422
1423static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1424{
1425        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1426        unsigned long flags;
1427
1428        spin_lock_irqsave(&rtwpci->irq_lock, flags);
1429        rtwpci->running = true;
1430        rtw89_chip_enable_intr(rtwdev, rtwpci);
1431        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1432}
1433
1434static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1435{
1436        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1437        unsigned long flags;
1438
1439        spin_lock_irqsave(&rtwpci->irq_lock, flags);
1440        rtwpci->running = false;
1441        rtw89_chip_disable_intr(rtwdev, rtwpci);
1442        spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1443}
1444
1445static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1446{
1447        rtw89_core_napi_start(rtwdev);
1448        rtw89_pci_enable_intr_lock(rtwdev);
1449
1450        return 0;
1451}
1452
1453static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1454{
1455        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1456        struct pci_dev *pdev = rtwpci->pdev;
1457
1458        rtw89_pci_disable_intr_lock(rtwdev);
1459        synchronize_irq(pdev->irq);
1460        rtw89_core_napi_stop(rtwdev);
1461}
1462
1463static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1464{
1465        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1466        struct pci_dev *pdev = rtwpci->pdev;
1467
1468        if (pause) {
1469                rtw89_pci_disable_intr_lock(rtwdev);
1470                synchronize_irq(pdev->irq);
1471                if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1472                        napi_synchronize(&rtwdev->napi);
1473        } else {
1474                rtw89_pci_enable_intr_lock(rtwdev);
1475                rtw89_pci_tx_kick_off_pending(rtwdev);
1476        }
1477}
1478
1479static
1480void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1481{
1482        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1483        const struct rtw89_pci_info *info = rtwdev->pci_info;
1484        const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1485        const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1486        struct rtw89_pci_tx_ring *tx_ring;
1487        struct rtw89_pci_rx_ring *rx_ring;
1488        int i;
1489
1490        if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1491                return;
1492
1493        for (i = 0; i < RTW89_TXCH_NUM; i++) {
1494                tx_ring = &rtwpci->tx_rings[i];
1495                tx_ring->bd_ring.addr.idx = low_power ?
1496                                            bd_idx_addr->tx_bd_addrs[i] :
1497                                            dma_addr_set->tx[i].idx;
1498        }
1499
1500        for (i = 0; i < RTW89_RXCH_NUM; i++) {
1501                rx_ring = &rtwpci->rx_rings[i];
1502                rx_ring->bd_ring.addr.idx = low_power ?
1503                                            bd_idx_addr->rx_bd_addrs[i] :
1504                                            dma_addr_set->rx[i].idx;
1505        }
1506}
1507
1508static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1509{
1510        enum rtw89_pci_intr_mask_cfg cfg;
1511
1512        WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1513
1514        cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1515        rtw89_chip_config_intr_mask(rtwdev, cfg);
1516        rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1517}
1518
1519static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1520
1521static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1522{
1523        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1524        u32 val = readl(rtwpci->mmap + addr);
1525        int count;
1526
1527        for (count = 0; ; count++) {
1528                if (val != RTW89_R32_DEAD)
1529                        return val;
1530                if (count >= MAC_REG_POOL_COUNT) {
1531                        rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1532                        return RTW89_R32_DEAD;
1533                }
1534                rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1535                val = readl(rtwpci->mmap + addr);
1536        }
1537
1538        return val;
1539}
1540
1541static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1542{
1543        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1544        u32 addr32, val32, shift;
1545
1546        if (!ACCESS_CMAC(addr))
1547                return readb(rtwpci->mmap + addr);
1548
1549        addr32 = addr & ~0x3;
1550        shift = (addr & 0x3) * 8;
1551        val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1552        return val32 >> shift;
1553}
1554
1555static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1556{
1557        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1558        u32 addr32, val32, shift;
1559
1560        if (!ACCESS_CMAC(addr))
1561                return readw(rtwpci->mmap + addr);
1562
1563        addr32 = addr & ~0x3;
1564        shift = (addr & 0x3) * 8;
1565        val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1566        return val32 >> shift;
1567}
1568
1569static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1570{
1571        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1572
1573        if (!ACCESS_CMAC(addr))
1574                return readl(rtwpci->mmap + addr);
1575
1576        return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1577}
1578
1579static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1580{
1581        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1582
1583        writeb(data, rtwpci->mmap + addr);
1584}
1585
1586static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1587{
1588        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1589
1590        writew(data, rtwpci->mmap + addr);
1591}
1592
1593static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1594{
1595        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1596
1597        writel(data, rtwpci->mmap + addr);
1598}
1599
1600static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1601{
1602        enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1603        const struct rtw89_pci_info *info = rtwdev->pci_info;
1604        u32 txhci_en = info->txhci_en_bit;
1605        u32 rxhci_en = info->rxhci_en_bit;
1606
1607        if (enable) {
1608                if (chip_id != RTL8852C)
1609                        rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
1610                                          B_AX_STOP_PCIEIO);
1611                rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
1612                                  txhci_en | rxhci_en);
1613                if (chip_id == RTL8852C)
1614                        rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
1615                                          B_AX_STOP_AXI_MST);
1616        } else {
1617                if (chip_id != RTL8852C)
1618                        rtw89_write32_set(rtwdev, info->dma_stop1_reg,
1619                                          B_AX_STOP_PCIEIO);
1620                else
1621                        rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
1622                                          B_AX_STOP_AXI_MST);
1623                if (chip_id == RTL8852C)
1624                        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
1625                                          B_AX_STOP_AXI_MST);
1626        }
1627}
1628
1629static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1630{
1631        u16 val;
1632
1633        rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1634
1635        val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1636        switch (speed) {
1637        case PCIE_PHY_GEN1:
1638                if (addr < 0x20)
1639                        val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1640                else
1641                        val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1642                break;
1643        case PCIE_PHY_GEN2:
1644                if (addr < 0x20)
1645                        val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1646                else
1647                        val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1648                break;
1649        default:
1650                rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1651                return -EINVAL;
1652        }
1653        rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1654        rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1655
1656        return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1657                                 false, rtwdev, R_AX_MDIO_CFG);
1658}
1659
1660static int
1661rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1662{
1663        int ret;
1664
1665        ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1666        if (ret) {
1667                rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1668                return ret;
1669        }
1670        *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1671
1672        return 0;
1673}
1674
1675static int
1676rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1677{
1678        int ret;
1679
1680        rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1681        ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1682        if (ret) {
1683                rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1684                return ret;
1685        }
1686
1687        return 0;
1688}
1689
1690static int
1691rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1692{
1693        u32 shift;
1694        int ret;
1695        u16 val;
1696
1697        ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1698        if (ret)
1699                return ret;
1700
1701        shift = __ffs(mask);
1702        val &= ~mask;
1703        val |= ((data << shift) & mask);
1704
1705        ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1706        if (ret)
1707                return ret;
1708
1709        return 0;
1710}
1711
1712static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1713{
1714        int ret;
1715        u16 val;
1716
1717        ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1718        if (ret)
1719                return ret;
1720        ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1721        if (ret)
1722                return ret;
1723
1724        return 0;
1725}
1726
1727static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1728{
1729        int ret;
1730        u16 val;
1731
1732        ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1733        if (ret)
1734                return ret;
1735        ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1736        if (ret)
1737                return ret;
1738
1739        return 0;
1740}
1741
1742static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1743                                       u8 data)
1744{
1745        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1746        struct pci_dev *pdev = rtwpci->pdev;
1747
1748        return pci_write_config_byte(pdev, addr, data);
1749}
1750
1751static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1752                                      u8 *value)
1753{
1754        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1755        struct pci_dev *pdev = rtwpci->pdev;
1756
1757        return pci_read_config_byte(pdev, addr, value);
1758}
1759
1760static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
1761                                     u8 bit)
1762{
1763        u8 value;
1764        int ret;
1765
1766        ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1767        if (ret)
1768                return ret;
1769
1770        value |= bit;
1771        ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1772
1773        return ret;
1774}
1775
1776static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
1777                                     u8 bit)
1778{
1779        u8 value;
1780        int ret;
1781
1782        ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1783        if (ret)
1784                return ret;
1785
1786        value &= ~bit;
1787        ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1788
1789        return ret;
1790}
1791
1792static int
1793__get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
1794{
1795        u16 val, tar;
1796        int ret;
1797
1798        /* Enable counter */
1799        ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
1800        if (ret)
1801                return ret;
1802        ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1803                                 phy_rate);
1804        if (ret)
1805                return ret;
1806        ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
1807                                 phy_rate);
1808        if (ret)
1809                return ret;
1810
1811        fsleep(300);
1812
1813        ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
1814        if (ret)
1815                return ret;
1816        ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1817                                 phy_rate);
1818        if (ret)
1819                return ret;
1820
1821        tar = tar & 0x0FFF;
1822        if (tar == 0 || tar == 0x0FFF) {
1823                rtw89_err(rtwdev, "[ERR]Get target failed.\n");
1824                return -EINVAL;
1825        }
1826
1827        *target = tar;
1828
1829        return 0;
1830}
1831
1832static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
1833{
1834        enum rtw89_pcie_phy phy_rate;
1835        u16 val16, mgn_set, div_set, tar;
1836        u8 val8, bdr_ori;
1837        bool l1_flag = false;
1838        int ret = 0;
1839
1840        if (rtwdev->chip->chip_id != RTL8852B)
1841                return 0;
1842
1843        ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
1844        if (ret) {
1845                rtw89_err(rtwdev, "[ERR]pci config read %X\n",
1846                          RTW89_PCIE_PHY_RATE);
1847                return ret;
1848        }
1849
1850        if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
1851                phy_rate = PCIE_PHY_GEN1;
1852        } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
1853                phy_rate = PCIE_PHY_GEN2;
1854        } else {
1855                rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
1856                return -EOPNOTSUPP;
1857        }
1858        /* Disable L1BD */
1859        ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
1860        if (ret) {
1861                rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
1862                return ret;
1863        }
1864
1865        if (bdr_ori & RTW89_PCIE_BIT_L1) {
1866                ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
1867                                                  bdr_ori & ~RTW89_PCIE_BIT_L1);
1868                if (ret) {
1869                        rtw89_err(rtwdev, "[ERR]pci config write %X\n",
1870                                  RTW89_PCIE_L1_CTRL);
1871                        return ret;
1872                }
1873                l1_flag = true;
1874        }
1875
1876        ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
1877        if (ret) {
1878                rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
1879                goto end;
1880        }
1881
1882        if (val16 & B_AX_CALIB_EN) {
1883                ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
1884                                         val16 & ~B_AX_CALIB_EN, phy_rate);
1885                if (ret) {
1886                        rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1887                        goto end;
1888                }
1889        }
1890
1891        if (!autook_en)
1892                goto end;
1893        /* Set div */
1894        ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
1895        if (ret) {
1896                rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1897                goto end;
1898        }
1899
1900        /* Obtain div and margin */
1901        ret = __get_target(rtwdev, &tar, phy_rate);
1902        if (ret) {
1903                rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
1904                goto end;
1905        }
1906
1907        mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
1908
1909        if (mgn_set >= 128) {
1910                div_set = 0x0003;
1911                mgn_set = 0x000F;
1912        } else if (mgn_set >= 64) {
1913                div_set = 0x0003;
1914                mgn_set >>= 3;
1915        } else if (mgn_set >= 32) {
1916                div_set = 0x0002;
1917                mgn_set >>= 2;
1918        } else if (mgn_set >= 16) {
1919                div_set = 0x0001;
1920                mgn_set >>= 1;
1921        } else if (mgn_set == 0) {
1922                rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
1923                goto end;
1924        } else {
1925                div_set = 0x0000;
1926        }
1927
1928        ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
1929        if (ret) {
1930                rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
1931                goto end;
1932        }
1933
1934        val16 |= u16_encode_bits(div_set, B_AX_DIV);
1935
1936        ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
1937        if (ret) {
1938                rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1939                goto end;
1940        }
1941
1942        ret = __get_target(rtwdev, &tar, phy_rate);
1943        if (ret) {
1944                rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
1945                goto end;
1946        }
1947
1948        rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
1949                    tar, div_set, mgn_set);
1950        ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
1951                                 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
1952        if (ret) {
1953                rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
1954                goto end;
1955        }
1956
1957        /* Enable function */
1958        ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
1959        if (ret) {
1960                rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1961                goto end;
1962        }
1963
1964        /* CLK delay = 0 */
1965        ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
1966                                          PCIE_CLKDLY_HW_0);
1967
1968end:
1969        /* Set L1BD to ori */
1970        if (l1_flag) {
1971                ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
1972                                                  bdr_ori);
1973                if (ret) {
1974                        rtw89_err(rtwdev, "[ERR]pci config write %X\n",
1975                                  RTW89_PCIE_L1_CTRL);
1976                        return ret;
1977                }
1978        }
1979
1980        return ret;
1981}
1982
1983static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
1984{
1985        enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1986        int ret;
1987
1988        if (chip_id == RTL8852A) {
1989                ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
1990                                             PCIE_PHY_GEN1);
1991                if (ret)
1992                        return ret;
1993                ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
1994                                             PCIE_PHY_GEN2);
1995                if (ret)
1996                        return ret;
1997        } else if (chip_id == RTL8852C) {
1998                rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
1999                                  B_AX_DEGLITCH);
2000                rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2001                                  B_AX_DEGLITCH);
2002        }
2003
2004        return 0;
2005}
2006
2007static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2008{
2009        if (rtwdev->chip->chip_id != RTL8852A)
2010                return;
2011
2012        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2013}
2014
2015static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2016{
2017        if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
2018                return;
2019
2020        rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2021}
2022
2023static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2024{
2025        int ret;
2026
2027        if (rtwdev->chip->chip_id != RTL8852A)
2028                return 0;
2029
2030        ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2031                                     PCIE_PHY_GEN1);
2032        if (ret)
2033                return ret;
2034
2035        ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2036                                     PCIE_PHY_GEN2);
2037        if (ret)
2038                return ret;
2039
2040        return 0;
2041}
2042
2043static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2044{
2045        if (rtwdev->chip->chip_id != RTL8852A)
2046                return;
2047
2048        rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2049}
2050
2051static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2052{
2053        if (rtwdev->chip->chip_id == RTL8852A ||
2054            rtwdev->chip->chip_id == RTL8852B) {
2055                rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2056                                  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2057                rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2058                                  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2059        } else if (rtwdev->chip->chip_id == RTL8852C) {
2060                rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2061                                  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2062        }
2063}
2064
2065static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2066{
2067        if (rtwdev->chip->chip_id != RTL8852B)
2068                return 0;
2069
2070        return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2071                                       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2072}
2073
2074static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
2075{
2076        if (pwr_up)
2077                rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2078        else
2079                rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2080}
2081
2082static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2083{
2084        if (rtwdev->chip->chip_id != RTL8852C)
2085                return;
2086
2087        rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2088        rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2089}
2090
2091static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2092{
2093        if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2094                return;
2095
2096        rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2097}
2098
2099static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2100{
2101        if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2102                return;
2103
2104        rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2105                          B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2106        rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2107        rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2108                          B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2109}
2110
2111static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2112{
2113        if (rtwdev->chip->chip_id != RTL8852C)
2114                return;
2115
2116        rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2117}
2118
2119static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2120{
2121        if (rtwdev->chip->chip_id != RTL8852C)
2122                return;
2123
2124        rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2125}
2126
2127static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2128{
2129        if (rtwdev->chip->chip_id == RTL8852C)
2130                return;
2131
2132        rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2133                          B_AX_SIC_EN_FORCE_CLKREQ);
2134}
2135
2136static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2137{
2138        const struct rtw89_pci_info *info = rtwdev->pci_info;
2139        u32 lbc;
2140
2141        if (rtwdev->chip->chip_id == RTL8852C)
2142                return;
2143
2144        lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2145        if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2146                lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2147                lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2148                rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2149        } else {
2150                lbc &= ~B_AX_LBC_EN;
2151        }
2152        rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2153}
2154
2155static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2156{
2157        const struct rtw89_pci_info *info = rtwdev->pci_info;
2158        u32 val32;
2159
2160        if (rtwdev->chip->chip_id != RTL8852C)
2161                return;
2162
2163        if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2164                val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2165                                   info->io_rcy_tmr);
2166                rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2167                rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2168                rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2169
2170                rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2171                rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2172                rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2173        } else {
2174                rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2175                rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2176                rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2177        }
2178
2179        rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2180}
2181
2182static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2183{
2184        if (rtwdev->chip->chip_id == RTL8852C)
2185                return;
2186
2187        rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2188                          B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2189
2190        if (rtwdev->chip->chip_id == RTL8852A)
2191                rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2192                                  B_AX_EN_CHKDSC_NO_RX_STUCK);
2193}
2194
2195static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2196{
2197        if (rtwdev->chip->chip_id == RTL8852C)
2198                return;
2199
2200        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2201                          B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2202}
2203
2204static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
2205{
2206        const struct rtw89_pci_info *info = rtwdev->pci_info;
2207        enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2208        u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2209                  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2210                  B_AX_CLR_CH12_IDX;
2211        u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2212        u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2213
2214        if (chip_id == RTL8852A || chip_id == RTL8852C)
2215                val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2216                       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2217        /* clear DMA indexes */
2218        rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2219        if (chip_id == RTL8852A || chip_id == RTL8852C)
2220                rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2221                                  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2222        rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2223                          B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2224}
2225
2226static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2227{
2228        const struct rtw89_pci_info *info = rtwdev->pci_info;
2229        u32 ret, check, dma_busy;
2230        u32 dma_busy1 = info->dma_busy1_reg;
2231        u32 dma_busy2 = info->dma_busy2_reg;
2232
2233        check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY |
2234                B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY |
2235                B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY |
2236                B_AX_CH9_BUSY | B_AX_CH12_BUSY;
2237
2238        ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2239                                10, 100, false, rtwdev, dma_busy1);
2240        if (ret)
2241                return ret;
2242
2243        check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2244
2245        ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2246                                10, 100, false, rtwdev, dma_busy2);
2247        if (ret)
2248                return ret;
2249
2250        return 0;
2251}
2252
2253static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2254{
2255        const struct rtw89_pci_info *info = rtwdev->pci_info;
2256        u32 ret, check, dma_busy;
2257        u32 dma_busy3 = info->dma_busy3_reg;
2258
2259        check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2260
2261        ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2262                                10, 100, false, rtwdev, dma_busy3);
2263        if (ret)
2264                return ret;
2265
2266        return 0;
2267}
2268
2269static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2270{
2271        u32 ret;
2272
2273        ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
2274        if (ret) {
2275                rtw89_err(rtwdev, "txdma ch busy\n");
2276                return ret;
2277        }
2278
2279        ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
2280        if (ret) {
2281                rtw89_err(rtwdev, "rxdma ch busy\n");
2282                return ret;
2283        }
2284
2285        return 0;
2286}
2287
2288static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2289{
2290        const struct rtw89_pci_info *info = rtwdev->pci_info;
2291        enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2292        enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2293        enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2294        enum mac_ax_tag_mode tag_mode = info->tag_mode;
2295        enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2296        enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2297        enum mac_ax_tx_burst tx_burst = info->tx_burst;
2298        enum mac_ax_rx_burst rx_burst = info->rx_burst;
2299        enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2300        u8 cv = rtwdev->hal.cv;
2301        u32 val32;
2302
2303        if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2304                if (chip_id == RTL8852A && cv == CHIP_CBV)
2305                        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2306        } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2307                if (chip_id == RTL8852A || chip_id == RTL8852B)
2308                        rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2309        }
2310
2311        if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2312                if (chip_id == RTL8852A && cv == CHIP_CBV)
2313                        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2314        } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2315                if (chip_id == RTL8852A || chip_id == RTL8852B)
2316                        rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2317        }
2318
2319        if (rxbd_mode == MAC_AX_RXBD_PKT) {
2320                rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2321        } else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2322                rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2323
2324                if (chip_id == RTL8852A || chip_id == RTL8852B)
2325                        rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2326                                           B_AX_PCIE_RX_APPLEN_MASK, 0);
2327        }
2328
2329        if (chip_id == RTL8852A || chip_id == RTL8852B) {
2330                rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2331                rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2332        } else if (chip_id == RTL8852C) {
2333                rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2334                rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2335        }
2336
2337        if (chip_id == RTL8852A || chip_id == RTL8852B) {
2338                if (tag_mode == MAC_AX_TAG_SGL) {
2339                        val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2340                                            ~B_AX_LATENCY_CONTROL;
2341                        rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2342                } else if (tag_mode == MAC_AX_TAG_MULTI) {
2343                        val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2344                                            B_AX_LATENCY_CONTROL;
2345                        rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2346                }
2347        }
2348
2349        rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2350                           info->multi_tag_num);
2351
2352        if (chip_id == RTL8852A || chip_id == RTL8852B) {
2353                rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2354                                   wd_dma_idle_intvl);
2355                rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2356                                   wd_dma_act_intvl);
2357        } else if (chip_id == RTL8852C) {
2358                rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2359                                   wd_dma_idle_intvl);
2360                rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2361                                   wd_dma_act_intvl);
2362        }
2363
2364        if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2365                rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2366                                  B_AX_HOST_ADDR_INFO_8B_SEL);
2367                rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2368        } else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2369                rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2370                                  B_AX_HOST_ADDR_INFO_8B_SEL);
2371                rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2372        }
2373
2374        return 0;
2375}
2376
2377static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2378{
2379        const struct rtw89_pci_info *info = rtwdev->pci_info;
2380
2381        if (rtwdev->chip->chip_id == RTL8852A) {
2382                /* ltr sw trigger */
2383                rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2384        }
2385        info->ltr_set(rtwdev, false);
2386        rtw89_pci_ctrl_dma_all(rtwdev, false);
2387        rtw89_pci_clr_idx_all(rtwdev);
2388
2389        return 0;
2390}
2391
2392static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
2393{
2394        const struct rtw89_pci_info *info = rtwdev->pci_info;
2395        int ret;
2396
2397        rtw89_pci_rxdma_prefth(rtwdev);
2398        rtw89_pci_l1off_pwroff(rtwdev);
2399        rtw89_pci_deglitch_setting(rtwdev);
2400        ret = rtw89_pci_l2_rxen_lat(rtwdev);
2401        if (ret) {
2402                rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2403                return ret;
2404        }
2405
2406        rtw89_pci_aphy_pwrcut(rtwdev);
2407        rtw89_pci_hci_ldo(rtwdev);
2408        rtw89_pci_dphy_delay(rtwdev);
2409
2410        ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2411        if (ret) {
2412                rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2413                return ret;
2414        }
2415
2416        rtw89_pci_power_wake(rtwdev, true);
2417        rtw89_pci_autoload_hang(rtwdev);
2418        rtw89_pci_l12_vmain(rtwdev);
2419        rtw89_pci_gen2_force_ib(rtwdev);
2420        rtw89_pci_l1_ent_lat(rtwdev);
2421        rtw89_pci_wd_exit_l1(rtwdev);
2422        rtw89_pci_set_sic(rtwdev);
2423        rtw89_pci_set_lbc(rtwdev);
2424        rtw89_pci_set_io_rcy(rtwdev);
2425        rtw89_pci_set_dbg(rtwdev);
2426        rtw89_pci_set_keep_reg(rtwdev);
2427
2428        rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA);
2429
2430        /* stop DMA activities */
2431        rtw89_pci_ctrl_dma_all(rtwdev, false);
2432
2433        ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2434        if (ret) {
2435                rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2436                return ret;
2437        }
2438
2439        rtw89_pci_clr_idx_all(rtwdev);
2440        rtw89_pci_mode_op(rtwdev);
2441
2442        /* fill TRX BD indexes */
2443        rtw89_pci_ops_reset(rtwdev);
2444
2445        ret = rtw89_pci_rst_bdram_pcie(rtwdev);
2446        if (ret) {
2447                rtw89_warn(rtwdev, "reset bdram busy\n");
2448                return ret;
2449        }
2450
2451        /* enable FW CMD queue to download firmware */
2452        rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
2453        rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12);
2454        rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
2455
2456        /* start DMA activities */
2457        rtw89_pci_ctrl_dma_all(rtwdev, true);
2458
2459        return 0;
2460}
2461
2462int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2463{
2464        u32 val;
2465
2466        if (!en)
2467                return 0;
2468
2469        val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2470        if (rtw89_pci_ltr_is_err_reg_val(val))
2471                return -EINVAL;
2472        val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2473        if (rtw89_pci_ltr_is_err_reg_val(val))
2474                return -EINVAL;
2475        val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2476        if (rtw89_pci_ltr_is_err_reg_val(val))
2477                return -EINVAL;
2478        val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2479        if (rtw89_pci_ltr_is_err_reg_val(val))
2480                return -EINVAL;
2481
2482        rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
2483        rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
2484        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2485                           PCI_LTR_SPC_500US);
2486        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2487                           PCI_LTR_IDLE_TIMER_800US);
2488        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2489        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2490        rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
2491        rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2492
2493        return 0;
2494}
2495EXPORT_SYMBOL(rtw89_pci_ltr_set);
2496
2497int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2498{
2499        u32 dec_ctrl;
2500        u32 val32;
2501
2502        val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2503        if (rtw89_pci_ltr_is_err_reg_val(val32))
2504                return -EINVAL;
2505        val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2506        if (rtw89_pci_ltr_is_err_reg_val(val32))
2507                return -EINVAL;
2508        dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2509        if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2510                return -EINVAL;
2511        val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2512        if (rtw89_pci_ltr_is_err_reg_val(val32))
2513                return -EINVAL;
2514        val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2515        if (rtw89_pci_ltr_is_err_reg_val(val32))
2516                return -EINVAL;
2517
2518        if (!en) {
2519                dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2520                dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2521                            B_AX_LTR_REQ_DRV;
2522        } else {
2523                dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2524        }
2525
2526        dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2527        dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2528
2529        if (en)
2530                rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2531                                  B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
2532        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2533                           PCI_LTR_IDLE_TIMER_3_2MS);
2534        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2535        rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2536        rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
2537        rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
2538        rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
2539
2540        return 0;
2541}
2542EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
2543
2544static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
2545{
2546        const struct rtw89_pci_info *info = rtwdev->pci_info;
2547        enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2548        int ret;
2549
2550        ret = info->ltr_set(rtwdev, true);
2551        if (ret) {
2552                rtw89_err(rtwdev, "pci ltr set fail\n");
2553                return ret;
2554        }
2555        if (chip_id == RTL8852A) {
2556                /* ltr sw trigger */
2557                rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
2558        }
2559        if (chip_id == RTL8852A || chip_id == RTL8852B) {
2560                /* ADDR info 8-byte mode */
2561                rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2562                                  B_AX_HOST_ADDR_INFO_8B_SEL);
2563                rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2564        }
2565
2566        /* enable DMA for all queues */
2567        rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
2568        rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
2569
2570        /* Release PCI IO */
2571        rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
2572                          B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
2573
2574        return 0;
2575}
2576
2577static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
2578                                  struct pci_dev *pdev)
2579{
2580        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2581        int ret;
2582
2583        ret = pci_enable_device(pdev);
2584        if (ret) {
2585                rtw89_err(rtwdev, "failed to enable pci device\n");
2586                return ret;
2587        }
2588
2589        pci_set_master(pdev);
2590        pci_set_drvdata(pdev, rtwdev->hw);
2591
2592        rtwpci->pdev = pdev;
2593
2594        return 0;
2595}
2596
2597static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
2598                                     struct pci_dev *pdev)
2599{
2600        pci_clear_master(pdev);
2601        pci_disable_device(pdev);
2602}
2603
2604static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
2605                                   struct pci_dev *pdev)
2606{
2607        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2608        unsigned long resource_len;
2609        u8 bar_id = 2;
2610        int ret;
2611
2612        ret = pci_request_regions(pdev, KBUILD_MODNAME);
2613        if (ret) {
2614                rtw89_err(rtwdev, "failed to request pci regions\n");
2615                goto err;
2616        }
2617
2618        ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2619        if (ret) {
2620                rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
2621                goto err_release_regions;
2622        }
2623
2624        ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2625        if (ret) {
2626                rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
2627                goto err_release_regions;
2628        }
2629
2630        resource_len = pci_resource_len(pdev, bar_id);
2631        rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
2632        if (!rtwpci->mmap) {
2633                rtw89_err(rtwdev, "failed to map pci io\n");
2634                ret = -EIO;
2635                goto err_release_regions;
2636        }
2637
2638        return 0;
2639
2640err_release_regions:
2641        pci_release_regions(pdev);
2642err:
2643        return ret;
2644}
2645
2646static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
2647                                    struct pci_dev *pdev)
2648{
2649        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2650
2651        if (rtwpci->mmap) {
2652                pci_iounmap(pdev, rtwpci->mmap);
2653                pci_release_regions(pdev);
2654        }
2655}
2656
2657static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
2658                                      struct pci_dev *pdev,
2659                                      struct rtw89_pci_tx_ring *tx_ring)
2660{
2661        struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2662        u8 *head = wd_ring->head;
2663        dma_addr_t dma = wd_ring->dma;
2664        u32 page_size = wd_ring->page_size;
2665        u32 page_num = wd_ring->page_num;
2666        u32 ring_sz = page_size * page_num;
2667
2668        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2669        wd_ring->head = NULL;
2670}
2671
2672static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
2673                                   struct pci_dev *pdev,
2674                                   struct rtw89_pci_tx_ring *tx_ring)
2675{
2676        int ring_sz;
2677        u8 *head;
2678        dma_addr_t dma;
2679
2680        head = tx_ring->bd_ring.head;
2681        dma = tx_ring->bd_ring.dma;
2682        ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
2683        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2684
2685        tx_ring->bd_ring.head = NULL;
2686}
2687
2688static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
2689                                    struct pci_dev *pdev)
2690{
2691        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2692        struct rtw89_pci_tx_ring *tx_ring;
2693        int i;
2694
2695        for (i = 0; i < RTW89_TXCH_NUM; i++) {
2696                tx_ring = &rtwpci->tx_rings[i];
2697                rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2698                rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2699        }
2700}
2701
2702static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
2703                                   struct pci_dev *pdev,
2704                                   struct rtw89_pci_rx_ring *rx_ring)
2705{
2706        struct rtw89_pci_rx_info *rx_info;
2707        struct sk_buff *skb;
2708        dma_addr_t dma;
2709        u32 buf_sz;
2710        u8 *head;
2711        int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
2712        int i;
2713
2714        buf_sz = rx_ring->buf_sz;
2715        for (i = 0; i < rx_ring->bd_ring.len; i++) {
2716                skb = rx_ring->buf[i];
2717                if (!skb)
2718                        continue;
2719
2720                rx_info = RTW89_PCI_RX_SKB_CB(skb);
2721                dma = rx_info->dma;
2722                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2723                dev_kfree_skb(skb);
2724                rx_ring->buf[i] = NULL;
2725        }
2726
2727        head = rx_ring->bd_ring.head;
2728        dma = rx_ring->bd_ring.dma;
2729        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2730
2731        rx_ring->bd_ring.head = NULL;
2732}
2733
2734static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
2735                                    struct pci_dev *pdev)
2736{
2737        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2738        struct rtw89_pci_rx_ring *rx_ring;
2739        int i;
2740
2741        for (i = 0; i < RTW89_RXCH_NUM; i++) {
2742                rx_ring = &rtwpci->rx_rings[i];
2743                rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
2744        }
2745}
2746
2747static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
2748                                     struct pci_dev *pdev)
2749{
2750        rtw89_pci_free_rx_rings(rtwdev, pdev);
2751        rtw89_pci_free_tx_rings(rtwdev, pdev);
2752}
2753
2754static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
2755                                struct rtw89_pci_rx_ring *rx_ring,
2756                                struct sk_buff *skb, int buf_sz, u32 idx)
2757{
2758        struct rtw89_pci_rx_info *rx_info;
2759        struct rtw89_pci_rx_bd_32 *rx_bd;
2760        dma_addr_t dma;
2761
2762        if (!skb)
2763                return -EINVAL;
2764
2765        dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
2766        if (dma_mapping_error(&pdev->dev, dma))
2767                return -EBUSY;
2768
2769        rx_info = RTW89_PCI_RX_SKB_CB(skb);
2770        rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
2771
2772        memset(rx_bd, 0, sizeof(*rx_bd));
2773        rx_bd->buf_size = cpu_to_le16(buf_sz);
2774        rx_bd->dma = cpu_to_le32(dma);
2775        rx_info->dma = dma;
2776
2777        return 0;
2778}
2779
2780static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
2781                                      struct pci_dev *pdev,
2782                                      struct rtw89_pci_tx_ring *tx_ring,
2783                                      enum rtw89_tx_channel txch)
2784{
2785        struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2786        struct rtw89_pci_tx_wd *txwd;
2787        dma_addr_t dma;
2788        dma_addr_t cur_paddr;
2789        u8 *head;
2790        u8 *cur_vaddr;
2791        u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
2792        u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
2793        u32 ring_sz = page_size * page_num;
2794        u32 page_offset;
2795        int i;
2796
2797        /* FWCMD queue doesn't use txwd as pages */
2798        if (txch == RTW89_TXCH_CH12)
2799                return 0;
2800
2801        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2802        if (!head)
2803                return -ENOMEM;
2804
2805        INIT_LIST_HEAD(&wd_ring->free_pages);
2806        wd_ring->head = head;
2807        wd_ring->dma = dma;
2808        wd_ring->page_size = page_size;
2809        wd_ring->page_num = page_num;
2810
2811        page_offset = 0;
2812        for (i = 0; i < page_num; i++) {
2813                txwd = &wd_ring->pages[i];
2814                cur_paddr = dma + page_offset;
2815                cur_vaddr = head + page_offset;
2816
2817                skb_queue_head_init(&txwd->queue);
2818                INIT_LIST_HEAD(&txwd->list);
2819                txwd->paddr = cur_paddr;
2820                txwd->vaddr = cur_vaddr;
2821                txwd->len = page_size;
2822                txwd->seq = i;
2823                rtw89_pci_enqueue_txwd(tx_ring, txwd);
2824
2825                page_offset += page_size;
2826        }
2827
2828        return 0;
2829}
2830
2831static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
2832                                   struct pci_dev *pdev,
2833                                   struct rtw89_pci_tx_ring *tx_ring,
2834                                   u32 desc_size, u32 len,
2835                                   enum rtw89_tx_channel txch)
2836{
2837        const struct rtw89_pci_ch_dma_addr *txch_addr;
2838        int ring_sz = desc_size * len;
2839        u8 *head;
2840        dma_addr_t dma;
2841        int ret;
2842
2843        ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
2844        if (ret) {
2845                rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
2846                goto err;
2847        }
2848
2849        ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
2850        if (ret) {
2851                rtw89_err(rtwdev, "failed to get address of txch %d", txch);
2852                goto err_free_wd_ring;
2853        }
2854
2855        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2856        if (!head) {
2857                ret = -ENOMEM;
2858                goto err_free_wd_ring;
2859        }
2860
2861        INIT_LIST_HEAD(&tx_ring->busy_pages);
2862        tx_ring->bd_ring.head = head;
2863        tx_ring->bd_ring.dma = dma;
2864        tx_ring->bd_ring.len = len;
2865        tx_ring->bd_ring.desc_size = desc_size;
2866        tx_ring->bd_ring.addr = *txch_addr;
2867        tx_ring->bd_ring.wp = 0;
2868        tx_ring->bd_ring.rp = 0;
2869        tx_ring->txch = txch;
2870
2871        return 0;
2872
2873err_free_wd_ring:
2874        rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2875err:
2876        return ret;
2877}
2878
2879static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
2880                                    struct pci_dev *pdev)
2881{
2882        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2883        struct rtw89_pci_tx_ring *tx_ring;
2884        u32 desc_size;
2885        u32 len;
2886        u32 i, tx_allocated;
2887        int ret;
2888
2889        for (i = 0; i < RTW89_TXCH_NUM; i++) {
2890                tx_ring = &rtwpci->tx_rings[i];
2891                desc_size = sizeof(struct rtw89_pci_tx_bd_32);
2892                len = RTW89_PCI_TXBD_NUM_MAX;
2893                ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
2894                                              desc_size, len, i);
2895                if (ret) {
2896                        rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
2897                        goto err_free;
2898                }
2899        }
2900
2901        return 0;
2902
2903err_free:
2904        tx_allocated = i;
2905        for (i = 0; i < tx_allocated; i++) {
2906                tx_ring = &rtwpci->tx_rings[i];
2907                rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2908        }
2909
2910        return ret;
2911}
2912
2913static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
2914                                   struct pci_dev *pdev,
2915                                   struct rtw89_pci_rx_ring *rx_ring,
2916                                   u32 desc_size, u32 len, u32 rxch)
2917{
2918        const struct rtw89_pci_ch_dma_addr *rxch_addr;
2919        struct sk_buff *skb;
2920        u8 *head;
2921        dma_addr_t dma;
2922        int ring_sz = desc_size * len;
2923        int buf_sz = RTW89_PCI_RX_BUF_SIZE;
2924        int i, allocated;
2925        int ret;
2926
2927        ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
2928        if (ret) {
2929                rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
2930                return ret;
2931        }
2932
2933        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2934        if (!head) {
2935                ret = -ENOMEM;
2936                goto err;
2937        }
2938
2939        rx_ring->bd_ring.head = head;
2940        rx_ring->bd_ring.dma = dma;
2941        rx_ring->bd_ring.len = len;
2942        rx_ring->bd_ring.desc_size = desc_size;
2943        rx_ring->bd_ring.addr = *rxch_addr;
2944        rx_ring->bd_ring.wp = 0;
2945        rx_ring->bd_ring.rp = 0;
2946        rx_ring->buf_sz = buf_sz;
2947        rx_ring->diliver_skb = NULL;
2948        rx_ring->diliver_desc.ready = false;
2949
2950        for (i = 0; i < len; i++) {
2951                skb = dev_alloc_skb(buf_sz);
2952                if (!skb) {
2953                        ret = -ENOMEM;
2954                        goto err_free;
2955                }
2956
2957                memset(skb->data, 0, buf_sz);
2958                rx_ring->buf[i] = skb;
2959                ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
2960                                           buf_sz, i);
2961                if (ret) {
2962                        rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
2963                        dev_kfree_skb_any(skb);
2964                        rx_ring->buf[i] = NULL;
2965                        goto err_free;
2966                }
2967        }
2968
2969        return 0;
2970
2971err_free:
2972        allocated = i;
2973        for (i = 0; i < allocated; i++) {
2974                skb = rx_ring->buf[i];
2975                if (!skb)
2976                        continue;
2977                dma = *((dma_addr_t *)skb->cb);
2978                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2979                dev_kfree_skb(skb);
2980                rx_ring->buf[i] = NULL;
2981        }
2982
2983        head = rx_ring->bd_ring.head;
2984        dma = rx_ring->bd_ring.dma;
2985        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2986
2987        rx_ring->bd_ring.head = NULL;
2988err:
2989        return ret;
2990}
2991
2992static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
2993                                    struct pci_dev *pdev)
2994{
2995        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2996        struct rtw89_pci_rx_ring *rx_ring;
2997        u32 desc_size;
2998        u32 len;
2999        int i, rx_allocated;
3000        int ret;
3001
3002        for (i = 0; i < RTW89_RXCH_NUM; i++) {
3003                rx_ring = &rtwpci->rx_rings[i];
3004                desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3005                len = RTW89_PCI_RXBD_NUM_MAX;
3006                ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3007                                              desc_size, len, i);
3008                if (ret) {
3009                        rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3010                        goto err_free;
3011                }
3012        }
3013
3014        return 0;
3015
3016err_free:
3017        rx_allocated = i;
3018        for (i = 0; i < rx_allocated; i++) {
3019                rx_ring = &rtwpci->rx_rings[i];
3020                rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3021        }
3022
3023        return ret;
3024}
3025
3026static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3027                                     struct pci_dev *pdev)
3028{
3029        int ret;
3030
3031        ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3032        if (ret) {
3033                rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3034                goto err;
3035        }
3036
3037        ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3038        if (ret) {
3039                rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3040                goto err_free_tx_rings;
3041        }
3042
3043        return 0;
3044
3045err_free_tx_rings:
3046        rtw89_pci_free_tx_rings(rtwdev, pdev);
3047err:
3048        return ret;
3049}
3050
3051static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3052                               struct rtw89_pci *rtwpci)
3053{
3054        skb_queue_head_init(&rtwpci->h2c_queue);
3055        skb_queue_head_init(&rtwpci->h2c_release_queue);
3056}
3057
3058static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3059                                    struct pci_dev *pdev)
3060{
3061        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3062        int ret;
3063
3064        ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3065        if (ret) {
3066                rtw89_err(rtwdev, "failed to setup pci mapping\n");
3067                goto err;
3068        }
3069
3070        ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3071        if (ret) {
3072                rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3073                goto err_pci_unmap;
3074        }
3075
3076        rtw89_pci_h2c_init(rtwdev, rtwpci);
3077
3078        spin_lock_init(&rtwpci->irq_lock);
3079        spin_lock_init(&rtwpci->trx_lock);
3080
3081        return 0;
3082
3083err_pci_unmap:
3084        rtw89_pci_clear_mapping(rtwdev, pdev);
3085err:
3086        return ret;
3087}
3088
3089static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3090                                     struct pci_dev *pdev)
3091{
3092        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3093
3094        rtw89_pci_free_trx_rings(rtwdev, pdev);
3095        rtw89_pci_clear_mapping(rtwdev, pdev);
3096        rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3097                                skb_queue_len(&rtwpci->h2c_queue), true);
3098}
3099
3100void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3101{
3102        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3103
3104        rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3105
3106        if (rtwpci->under_recovery) {
3107                rtwpci->intrs[0] = 0;
3108                rtwpci->intrs[1] = 0;
3109        } else {
3110                rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3111                                   B_AX_RXDMA_INT_EN |
3112                                   B_AX_RXP1DMA_INT_EN |
3113                                   B_AX_RPQDMA_INT_EN |
3114                                   B_AX_RXDMA_STUCK_INT_EN |
3115                                   B_AX_RDU_INT_EN |
3116                                   B_AX_RPQBD_FULL_INT_EN |
3117                                   B_AX_HS0ISR_IND_INT_EN;
3118
3119                rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3120        }
3121}
3122EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3123
3124static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3125{
3126        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3127
3128        rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3129        rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
3130        rtwpci->intrs[0] = 0;
3131        rtwpci->intrs[1] = 0;
3132}
3133
3134static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3135{
3136        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3137
3138        rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3139                            B_AX_HS1ISR_IND_INT_EN |
3140                            B_AX_HS0ISR_IND_INT_EN;
3141        rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
3142        rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3143                           B_AX_RXDMA_INT_EN |
3144                           B_AX_RXP1DMA_INT_EN |
3145                           B_AX_RPQDMA_INT_EN |
3146                           B_AX_RXDMA_STUCK_INT_EN |
3147                           B_AX_RDU_INT_EN |
3148                           B_AX_RPQBD_FULL_INT_EN;
3149        rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3150}
3151
3152static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3153{
3154        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3155
3156        rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3157                            B_AX_HS0ISR_IND_INT_EN;
3158        rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN;
3159        rtwpci->intrs[0] = 0;
3160        rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3161}
3162
3163void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3164{
3165        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3166
3167        if (rtwpci->under_recovery)
3168                rtw89_pci_recovery_intr_mask_v1(rtwdev);
3169        else if (rtwpci->low_power)
3170                rtw89_pci_low_power_intr_mask_v1(rtwdev);
3171        else
3172                rtw89_pci_default_intr_mask_v1(rtwdev);
3173}
3174EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3175
3176static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3177                                 struct pci_dev *pdev)
3178{
3179        unsigned long flags = 0;
3180        int ret;
3181
3182        flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
3183        ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3184        if (ret < 0) {
3185                rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3186                goto err;
3187        }
3188
3189        ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3190                                        rtw89_pci_interrupt_handler,
3191                                        rtw89_pci_interrupt_threadfn,
3192                                        IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3193        if (ret) {
3194                rtw89_err(rtwdev, "failed to request threaded irq\n");
3195                goto err_free_vector;
3196        }
3197
3198        rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3199
3200        return 0;
3201
3202err_free_vector:
3203        pci_free_irq_vectors(pdev);
3204err:
3205        return ret;
3206}
3207
3208static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3209                               struct pci_dev *pdev)
3210{
3211        devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3212        pci_free_irq_vectors(pdev);
3213}
3214
3215static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3216{
3217        int ret;
3218
3219        if (rtw89_pci_disable_clkreq)
3220                return;
3221
3222        ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3223                                          PCIE_CLKDLY_HW_30US);
3224        if (ret)
3225                rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3226
3227        if (enable)
3228                ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
3229                                                RTW89_PCIE_BIT_CLK);
3230        else
3231                ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
3232                                                RTW89_PCIE_BIT_CLK);
3233        if (ret)
3234                rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3235                          enable ? "set" : "unset", ret);
3236}
3237
3238static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3239{
3240        u8 value = 0;
3241        int ret;
3242
3243        if (rtw89_pci_disable_aspm_l1)
3244                return;
3245
3246        ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3247        if (ret)
3248                rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3249
3250        value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
3251        value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
3252                 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
3253
3254        ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3255        if (ret)
3256                rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3257
3258        if (enable)
3259                ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
3260                                                RTW89_PCIE_BIT_L1);
3261        else
3262                ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
3263                                                RTW89_PCIE_BIT_L1);
3264        if (ret)
3265                rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3266                          enable ? "set" : "unset", ret);
3267}
3268
3269static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3270{
3271        struct rtw89_traffic_stats *stats = &rtwdev->stats;
3272        enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3273        enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3274        u32 val = 0;
3275
3276        if (!rtwdev->scanning &&
3277            (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
3278                val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3279                      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3280                      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3281                      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3282
3283        rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
3284}
3285
3286static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3287{
3288        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3289        struct pci_dev *pdev = rtwpci->pdev;
3290        u16 link_ctrl;
3291        int ret;
3292
3293        /* Though there is standard PCIE configuration space to set the
3294         * link control register, but by Realtek's design, driver should
3295         * check if host supports CLKREQ/ASPM to enable the HW module.
3296         *
3297         * These functions are implemented by two HW modules associated,
3298         * one is responsible to access PCIE configuration space to
3299         * follow the host settings, and another is in charge of doing
3300         * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
3301         * the host does not support it, and due to some reasons or wrong
3302         * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
3303         * loss if HW misbehaves on the link.
3304         *
3305         * Hence it's designed that driver should first check the PCIE
3306         * configuration space is sync'ed and enabled, then driver can turn
3307         * on the other module that is actually working on the mechanism.
3308         */
3309        ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
3310        if (ret) {
3311                rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
3312                return;
3313        }
3314
3315        if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
3316                rtw89_pci_clkreq_set(rtwdev, true);
3317
3318        if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
3319                rtw89_pci_aspm_set(rtwdev, true);
3320}
3321
3322static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
3323{
3324        int ret;
3325
3326        if (enable)
3327                ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
3328                                                RTW89_PCIE_BIT_L1SUB);
3329        else
3330                ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
3331                                                RTW89_PCIE_BIT_L1SUB);
3332        if (ret)
3333                rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
3334                          enable ? "set" : "unset", ret);
3335}
3336
3337static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
3338{
3339        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3340        struct pci_dev *pdev = rtwpci->pdev;
3341        u32 l1ss_cap_ptr, l1ss_ctrl;
3342
3343        if (rtw89_pci_disable_l1ss)
3344                return;
3345
3346        l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3347        if (!l1ss_cap_ptr)
3348                return;
3349
3350        pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
3351
3352        if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
3353                rtw89_pci_l1ss_set(rtwdev, true);
3354}
3355
3356static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
3357{
3358        const struct rtw89_pci_info *info = rtwdev->pci_info;
3359        u32 val32;
3360
3361        if (en == MAC_AX_FUNC_EN) {
3362                val32 = B_AX_STOP_PCIEIO;
3363                rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32);
3364
3365                val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
3366                rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3367        } else {
3368                val32 = B_AX_STOP_PCIEIO;
3369                rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32);
3370
3371                val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
3372                rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3373        }
3374}
3375
3376static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
3377{
3378        int ret = 0;
3379        u32 sts;
3380        u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
3381
3382        ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
3383                                       10, 1000, false, rtwdev,
3384                                       R_AX_PCIE_DMA_BUSY1);
3385        if (ret) {
3386                rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
3387                          rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
3388                return -EINVAL;
3389        }
3390        return ret;
3391}
3392
3393static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
3394{
3395        u32 val, dma_rst = 0;
3396        int ret;
3397
3398        rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
3399        ret = rtw89_pci_poll_io_idle(rtwdev);
3400        if (ret) {
3401                val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3402                rtw89_debug(rtwdev, RTW89_DBG_HCI,
3403                            "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
3404                            R_AX_DBG_ERR_FLAG, val);
3405                if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
3406                        dma_rst |= B_AX_HCI_TXDMA_EN;
3407                if (val & B_AX_RX_STUCK)
3408                        dma_rst |= B_AX_HCI_RXDMA_EN;
3409                val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
3410                rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
3411                rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
3412                ret = rtw89_pci_poll_io_idle(rtwdev);
3413                val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3414                rtw89_debug(rtwdev, RTW89_DBG_HCI,
3415                            "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
3416                            R_AX_DBG_ERR_FLAG, val);
3417        }
3418
3419        return ret;
3420}
3421
3422static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
3423{
3424        u32 val32;
3425
3426        if (en == MAC_AX_FUNC_EN) {
3427                val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
3428                rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
3429        } else {
3430                val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
3431                rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
3432        }
3433}
3434
3435static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
3436{
3437        int ret = 0;
3438        u32 val32, sts;
3439
3440        val32 = B_AX_RST_BDRAM;
3441        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3442
3443        ret = read_poll_timeout_atomic(rtw89_read32, sts,
3444                                       (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
3445                                       true, rtwdev, R_AX_PCIE_INIT_CFG1);
3446        return ret;
3447}
3448
3449static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
3450{
3451        u32 ret;
3452
3453        rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
3454        rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
3455        rtw89_pci_clr_idx_all(rtwdev);
3456
3457        ret = rtw89_pci_rst_bdram(rtwdev);
3458        if (ret)
3459                return ret;
3460
3461        rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
3462        return ret;
3463}
3464
3465static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
3466                                          enum rtw89_lv1_rcvy_step step)
3467{
3468        int ret;
3469
3470        switch (step) {
3471        case RTW89_LV1_RCVY_STEP_1:
3472                ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
3473                if (ret)
3474                        rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
3475
3476                break;
3477
3478        case RTW89_LV1_RCVY_STEP_2:
3479                ret = rtw89_pci_lv1rst_start_dma(rtwdev);
3480                if (ret)
3481                        rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
3482                break;
3483
3484        default:
3485                return -EINVAL;
3486        }
3487
3488        return ret;
3489}
3490
3491static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
3492{
3493        rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
3494                   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
3495        rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
3496                   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
3497        rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
3498                   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
3499}
3500
3501static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
3502{
3503        struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
3504        struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3505        unsigned long flags;
3506        int work_done;
3507
3508        rtwdev->napi_budget_countdown = budget;
3509
3510        rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
3511        work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3512        if (work_done == budget)
3513                return budget;
3514
3515        rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
3516        work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3517        if (work_done < budget && napi_complete_done(napi, work_done)) {
3518                spin_lock_irqsave(&rtwpci->irq_lock, flags);
3519                if (likely(rtwpci->running))
3520                        rtw89_chip_enable_intr(rtwdev, rtwpci);
3521                spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
3522        }
3523
3524        return work_done;
3525}
3526
3527static int __maybe_unused rtw89_pci_suspend(struct device *dev)
3528{
3529        struct ieee80211_hw *hw = dev_get_drvdata(dev);
3530        struct rtw89_dev *rtwdev = hw->priv;
3531
3532        rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
3533                          B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3534        rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3535        rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3536        rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3537        rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
3538                          B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3539
3540        return 0;
3541}
3542
3543static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
3544{
3545        if (rtwdev->chip->chip_id == RTL8852C)
3546                return;
3547
3548        /* Hardware need write the reg twice to ensure the setting work */
3549        rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3550                                    RTW89_PCIE_BIT_CFG_RST_MSTATE);
3551        rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3552                                    RTW89_PCIE_BIT_CFG_RST_MSTATE);
3553}
3554
3555static int __maybe_unused rtw89_pci_resume(struct device *dev)
3556{
3557        struct ieee80211_hw *hw = dev_get_drvdata(dev);
3558        struct rtw89_dev *rtwdev = hw->priv;
3559
3560        rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
3561                          B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3562        rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3563        rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3564        rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3565        rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
3566                          B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3567        rtw89_pci_l2_hci_ldo(rtwdev);
3568        rtw89_pci_link_cfg(rtwdev);
3569        rtw89_pci_l1ss_cfg(rtwdev);
3570
3571        return 0;
3572}
3573
3574SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
3575EXPORT_SYMBOL(rtw89_pm_ops);
3576
3577static const struct rtw89_hci_ops rtw89_pci_ops = {
3578        .tx_write       = rtw89_pci_ops_tx_write,
3579        .tx_kick_off    = rtw89_pci_ops_tx_kick_off,
3580        .flush_queues   = rtw89_pci_ops_flush_queues,
3581        .reset          = rtw89_pci_ops_reset,
3582        .start          = rtw89_pci_ops_start,
3583        .stop           = rtw89_pci_ops_stop,
3584        .pause          = rtw89_pci_ops_pause,
3585        .switch_mode    = rtw89_pci_ops_switch_mode,
3586        .recalc_int_mit = rtw89_pci_recalc_int_mit,
3587
3588        .read8          = rtw89_pci_ops_read8,
3589        .read16         = rtw89_pci_ops_read16,
3590        .read32         = rtw89_pci_ops_read32,
3591        .write8         = rtw89_pci_ops_write8,
3592        .write16        = rtw89_pci_ops_write16,
3593        .write32        = rtw89_pci_ops_write32,
3594
3595        .mac_pre_init   = rtw89_pci_ops_mac_pre_init,
3596        .mac_post_init  = rtw89_pci_ops_mac_post_init,
3597        .deinit         = rtw89_pci_ops_deinit,
3598
3599        .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
3600        .mac_lv1_rcvy   = rtw89_pci_ops_mac_lv1_recovery,
3601        .dump_err_status = rtw89_pci_ops_dump_err_status,
3602        .napi_poll      = rtw89_pci_napi_poll,
3603
3604        .recovery_start = rtw89_pci_ops_recovery_start,
3605        .recovery_complete = rtw89_pci_ops_recovery_complete,
3606};
3607
3608int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3609{
3610        struct ieee80211_hw *hw;
3611        struct rtw89_dev *rtwdev;
3612        const struct rtw89_driver_info *info;
3613        const struct rtw89_pci_info *pci_info;
3614        int driver_data_size;
3615        int ret;
3616
3617        driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
3618        hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
3619        if (!hw) {
3620                dev_err(&pdev->dev, "failed to allocate hw\n");
3621                return -ENOMEM;
3622        }
3623
3624        info = (const struct rtw89_driver_info *)id->driver_data;
3625        pci_info = info->bus.pci;
3626
3627        rtwdev = hw->priv;
3628        rtwdev->hw = hw;
3629        rtwdev->dev = &pdev->dev;
3630        rtwdev->chip = info->chip;
3631        rtwdev->pci_info = info->bus.pci;
3632        rtwdev->hci.ops = &rtw89_pci_ops;
3633        rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
3634        rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
3635        rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
3636
3637        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
3638
3639        ret = rtw89_core_init(rtwdev);
3640        if (ret) {
3641                rtw89_err(rtwdev, "failed to initialise core\n");
3642                goto err_release_hw;
3643        }
3644
3645        ret = rtw89_pci_claim_device(rtwdev, pdev);
3646        if (ret) {
3647                rtw89_err(rtwdev, "failed to claim pci device\n");
3648                goto err_core_deinit;
3649        }
3650
3651        ret = rtw89_pci_setup_resource(rtwdev, pdev);
3652        if (ret) {
3653                rtw89_err(rtwdev, "failed to setup pci resource\n");
3654                goto err_declaim_pci;
3655        }
3656
3657        ret = rtw89_chip_info_setup(rtwdev);
3658        if (ret) {
3659                rtw89_err(rtwdev, "failed to setup chip information\n");
3660                goto err_clear_resource;
3661        }
3662
3663        rtw89_pci_link_cfg(rtwdev);
3664        rtw89_pci_l1ss_cfg(rtwdev);
3665
3666        ret = rtw89_core_register(rtwdev);
3667        if (ret) {
3668                rtw89_err(rtwdev, "failed to register core\n");
3669                goto err_clear_resource;
3670        }
3671
3672        rtw89_core_napi_init(rtwdev);
3673
3674        ret = rtw89_pci_request_irq(rtwdev, pdev);
3675        if (ret) {
3676                rtw89_err(rtwdev, "failed to request pci irq\n");
3677                goto err_unregister;
3678        }
3679
3680        return 0;
3681
3682err_unregister:
3683        rtw89_core_napi_deinit(rtwdev);
3684        rtw89_core_unregister(rtwdev);
3685err_clear_resource:
3686        rtw89_pci_clear_resource(rtwdev, pdev);
3687err_declaim_pci:
3688        rtw89_pci_declaim_device(rtwdev, pdev);
3689err_core_deinit:
3690        rtw89_core_deinit(rtwdev);
3691err_release_hw:
3692        ieee80211_free_hw(hw);
3693
3694        return ret;
3695}
3696EXPORT_SYMBOL(rtw89_pci_probe);
3697
3698void rtw89_pci_remove(struct pci_dev *pdev)
3699{
3700        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
3701        struct rtw89_dev *rtwdev;
3702
3703        rtwdev = hw->priv;
3704
3705        rtw89_pci_free_irq(rtwdev, pdev);
3706        rtw89_core_napi_deinit(rtwdev);
3707        rtw89_core_unregister(rtwdev);
3708        rtw89_pci_clear_resource(rtwdev, pdev);
3709        rtw89_pci_declaim_device(rtwdev, pdev);
3710        rtw89_core_deinit(rtwdev);
3711        ieee80211_free_hw(hw);
3712}
3713EXPORT_SYMBOL(rtw89_pci_remove);
3714
3715MODULE_AUTHOR("Realtek Corporation");
3716MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
3717MODULE_LICENSE("Dual BSD/GPL");
3718