linux/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Copyright (c) 2018 Quantenna Communications */
   3
   4#include <linux/kernel.h>
   5#include <linux/firmware.h>
   6#include <linux/pci.h>
   7#include <linux/vmalloc.h>
   8#include <linux/delay.h>
   9#include <linux/interrupt.h>
  10#include <linux/sched.h>
  11#include <linux/completion.h>
  12#include <linux/crc32.h>
  13#include <linux/spinlock.h>
  14#include <linux/circ_buf.h>
  15#include <linux/log2.h>
  16
  17#include "pcie_priv.h"
  18#include "pearl_pcie_regs.h"
  19#include "pearl_pcie_ipc.h"
  20#include "qtn_hw_ids.h"
  21#include "core.h"
  22#include "bus.h"
  23#include "shm_ipc.h"
  24#include "debug.h"
  25
  26#define PEARL_TX_BD_SIZE_DEFAULT        32
  27#define PEARL_RX_BD_SIZE_DEFAULT        256
  28
  29struct qtnf_pearl_bda {
  30        __le16 bda_len;
  31        __le16 bda_version;
  32        __le32 bda_pci_endian;
  33        __le32 bda_ep_state;
  34        __le32 bda_rc_state;
  35        __le32 bda_dma_mask;
  36        __le32 bda_msi_addr;
  37        __le32 bda_flashsz;
  38        u8 bda_boardname[PCIE_BDA_NAMELEN];
  39        __le32 bda_rc_msi_enabled;
  40        u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
  41        __le32 bda_dsbw_start_index;
  42        __le32 bda_dsbw_end_index;
  43        __le32 bda_dsbw_total_bytes;
  44        __le32 bda_rc_tx_bd_base;
  45        __le32 bda_rc_tx_bd_num;
  46        u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
  47        struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
  48        struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
  49} __packed;
  50
  51struct qtnf_pearl_tx_bd {
  52        __le32 addr;
  53        __le32 addr_h;
  54        __le32 info;
  55        __le32 info_h;
  56} __packed;
  57
  58struct qtnf_pearl_rx_bd {
  59        __le32 addr;
  60        __le32 addr_h;
  61        __le32 info;
  62        __le32 info_h;
  63        __le32 next_ptr;
  64        __le32 next_ptr_h;
  65} __packed;
  66
  67struct qtnf_pearl_fw_hdr {
  68        u8 boardflg[8];
  69        __le32 fwsize;
  70        __le32 seqnum;
  71        __le32 type;
  72        __le32 pktlen;
  73        __le32 crc;
  74} __packed;
  75
  76struct qtnf_pcie_pearl_state {
  77        struct qtnf_pcie_bus_priv base;
  78
  79        /* lock for irq configuration changes */
  80        spinlock_t irq_lock;
  81
  82        struct qtnf_pearl_bda __iomem *bda;
  83        void __iomem *pcie_reg_base;
  84
  85        struct qtnf_pearl_tx_bd *tx_bd_vbase;
  86        dma_addr_t tx_bd_pbase;
  87
  88        struct qtnf_pearl_rx_bd *rx_bd_vbase;
  89        dma_addr_t rx_bd_pbase;
  90
  91        dma_addr_t bd_table_paddr;
  92        void *bd_table_vaddr;
  93        u32 bd_table_len;
  94        u32 pcie_irq_mask;
  95        u32 pcie_irq_rx_count;
  96        u32 pcie_irq_tx_count;
  97        u32 pcie_irq_uf_count;
  98};
  99
 100static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
 101{
 102        unsigned long flags;
 103
 104        spin_lock_irqsave(&ps->irq_lock, flags);
 105        ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
 106        spin_unlock_irqrestore(&ps->irq_lock, flags);
 107}
 108
 109static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
 110{
 111        unsigned long flags;
 112
 113        spin_lock_irqsave(&ps->irq_lock, flags);
 114        writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 115        spin_unlock_irqrestore(&ps->irq_lock, flags);
 116}
 117
 118static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
 119{
 120        unsigned long flags;
 121
 122        spin_lock_irqsave(&ps->irq_lock, flags);
 123        writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 124        spin_unlock_irqrestore(&ps->irq_lock, flags);
 125}
 126
 127static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
 128{
 129        unsigned long flags;
 130
 131        spin_lock_irqsave(&ps->irq_lock, flags);
 132        ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
 133        writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 134        spin_unlock_irqrestore(&ps->irq_lock, flags);
 135}
 136
 137static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
 138{
 139        unsigned long flags;
 140
 141        spin_lock_irqsave(&ps->irq_lock, flags);
 142        ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
 143        writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 144        spin_unlock_irqrestore(&ps->irq_lock, flags);
 145}
 146
 147static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps)
 148{
 149        unsigned long flags;
 150
 151        spin_lock_irqsave(&ps->irq_lock, flags);
 152        ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
 153        writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 154        spin_unlock_irqrestore(&ps->irq_lock, flags);
 155}
 156
 157static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps)
 158{
 159        unsigned long flags;
 160
 161        spin_lock_irqsave(&ps->irq_lock, flags);
 162        ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
 163        writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
 164        spin_unlock_irqrestore(&ps->irq_lock, flags);
 165}
 166
 167static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps)
 168{
 169        void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
 170        u32 cfg;
 171
 172        cfg = readl(reg);
 173        cfg &= ~PEARL_ASSERT_INTX;
 174        qtnf_non_posted_write(cfg, reg);
 175}
 176
 177static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps)
 178{
 179        const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
 180        void __iomem *reg = ps->base.sysctl_bar +
 181                            QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
 182
 183        qtnf_non_posted_write(data, reg);
 184        msleep(QTN_EP_RESET_WAIT_MS);
 185        pci_restore_state(ps->base.pdev);
 186}
 187
 188static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg)
 189{
 190        const struct qtnf_pcie_pearl_state *ps = arg;
 191        const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
 192        void __iomem *reg = ps->base.sysctl_bar +
 193                            QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
 194
 195        qtnf_non_posted_write(data, reg);
 196}
 197
 198static int qtnf_is_state(__le32 __iomem *reg, u32 state)
 199{
 200        u32 s = readl(reg);
 201
 202        return s & state;
 203}
 204
 205static void qtnf_set_state(__le32 __iomem *reg, u32 state)
 206{
 207        u32 s = readl(reg);
 208
 209        qtnf_non_posted_write(state | s, reg);
 210}
 211
 212static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
 213{
 214        u32 s = readl(reg);
 215
 216        qtnf_non_posted_write(s & ~state, reg);
 217}
 218
 219static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
 220{
 221        u32 timeout = 0;
 222
 223        while ((qtnf_is_state(reg, state) == 0)) {
 224                usleep_range(1000, 1200);
 225                if (++timeout > delay_in_ms)
 226                        return -1;
 227        }
 228
 229        return 0;
 230}
 231
 232static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
 233{
 234        struct qtnf_pcie_bus_priv *priv = &ps->base;
 235        dma_addr_t paddr;
 236        void *vaddr;
 237        int len;
 238
 239        len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
 240                priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
 241
 242        vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
 243        if (!vaddr)
 244                return -ENOMEM;
 245
 246        /* tx bd */
 247
 248        ps->bd_table_vaddr = vaddr;
 249        ps->bd_table_paddr = paddr;
 250        ps->bd_table_len = len;
 251
 252        ps->tx_bd_vbase = vaddr;
 253        ps->tx_bd_pbase = paddr;
 254
 255        pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
 256
 257        priv->tx_bd_r_index = 0;
 258        priv->tx_bd_w_index = 0;
 259
 260        /* rx bd */
 261
 262        vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
 263        paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
 264
 265        ps->rx_bd_vbase = vaddr;
 266        ps->rx_bd_pbase = paddr;
 267
 268#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 269        writel(QTN_HOST_HI32(paddr),
 270               PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
 271#endif
 272        writel(QTN_HOST_LO32(paddr),
 273               PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
 274        writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
 275               PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
 276
 277        pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
 278
 279        return 0;
 280}
 281
 282static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
 283{
 284        struct qtnf_pcie_bus_priv *priv = &ps->base;
 285        struct qtnf_pearl_rx_bd *rxbd;
 286        struct sk_buff *skb;
 287        dma_addr_t paddr;
 288
 289        skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
 290        if (!skb) {
 291                priv->rx_skb[index] = NULL;
 292                return -ENOMEM;
 293        }
 294
 295        priv->rx_skb[index] = skb;
 296        rxbd = &ps->rx_bd_vbase[index];
 297
 298        paddr = dma_map_single(&priv->pdev->dev, skb->data, SKB_BUF_SIZE,
 299                               DMA_FROM_DEVICE);
 300        if (dma_mapping_error(&priv->pdev->dev, paddr)) {
 301                pr_err("skb DMA mapping error: %pad\n", &paddr);
 302                return -ENOMEM;
 303        }
 304
 305        /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
 306        rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
 307        rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
 308        rxbd->info = 0x0;
 309
 310        priv->rx_bd_w_index = index;
 311
 312        /* sync up all descriptor updates */
 313        wmb();
 314
 315#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 316        writel(QTN_HOST_HI32(paddr),
 317               PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
 318#endif
 319        writel(QTN_HOST_LO32(paddr),
 320               PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
 321
 322        writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
 323        return 0;
 324}
 325
 326static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps)
 327{
 328        u16 i;
 329        int ret = 0;
 330
 331        memset(ps->rx_bd_vbase, 0x0,
 332               ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
 333
 334        for (i = 0; i < ps->base.rx_bd_num; i++) {
 335                ret = pearl_skb2rbd_attach(ps, i);
 336                if (ret)
 337                        break;
 338        }
 339
 340        return ret;
 341}
 342
 343/* all rx/tx activity should have ceased before calling this function */
 344static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
 345{
 346        struct qtnf_pcie_bus_priv *priv = &ps->base;
 347        struct qtnf_pearl_tx_bd *txbd;
 348        struct qtnf_pearl_rx_bd *rxbd;
 349        struct sk_buff *skb;
 350        dma_addr_t paddr;
 351        int i;
 352
 353        /* free rx buffers */
 354        for (i = 0; i < priv->rx_bd_num; i++) {
 355                if (priv->rx_skb && priv->rx_skb[i]) {
 356                        rxbd = &ps->rx_bd_vbase[i];
 357                        skb = priv->rx_skb[i];
 358                        paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
 359                                              le32_to_cpu(rxbd->addr));
 360                        dma_unmap_single(&priv->pdev->dev, paddr,
 361                                         SKB_BUF_SIZE, DMA_FROM_DEVICE);
 362                        dev_kfree_skb_any(skb);
 363                        priv->rx_skb[i] = NULL;
 364                }
 365        }
 366
 367        /* free tx buffers */
 368        for (i = 0; i < priv->tx_bd_num; i++) {
 369                if (priv->tx_skb && priv->tx_skb[i]) {
 370                        txbd = &ps->tx_bd_vbase[i];
 371                        skb = priv->tx_skb[i];
 372                        paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
 373                                              le32_to_cpu(txbd->addr));
 374                        dma_unmap_single(&priv->pdev->dev, paddr, skb->len,
 375                                         DMA_TO_DEVICE);
 376                        dev_kfree_skb_any(skb);
 377                        priv->tx_skb[i] = NULL;
 378                }
 379        }
 380}
 381
 382static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
 383{
 384        u32 val;
 385
 386        val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
 387        val |= HHBM_CONFIG_SOFT_RESET;
 388        writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
 389        usleep_range(50, 100);
 390        val &= ~HHBM_CONFIG_SOFT_RESET;
 391#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 392        val |= HHBM_64BIT;
 393#endif
 394        writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
 395        writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
 396
 397        return 0;
 398}
 399
 400static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
 401                                     unsigned int tx_bd_size,
 402                                     unsigned int rx_bd_size)
 403{
 404        struct qtnf_pcie_bus_priv *priv = &ps->base;
 405        int ret;
 406        u32 val;
 407
 408        if (tx_bd_size == 0)
 409                tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT;
 410
 411        val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
 412
 413        if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
 414                pr_warn("invalid tx_bd_size value %u, use default %u\n",
 415                        tx_bd_size, PEARL_TX_BD_SIZE_DEFAULT);
 416                priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
 417        } else {
 418                priv->tx_bd_num = tx_bd_size;
 419        }
 420
 421        if (rx_bd_size == 0)
 422                rx_bd_size = PEARL_RX_BD_SIZE_DEFAULT;
 423
 424        val = rx_bd_size * sizeof(dma_addr_t);
 425
 426        if (!is_power_of_2(rx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
 427                pr_warn("invalid rx_bd_size value %u, use default %u\n",
 428                        rx_bd_size, PEARL_RX_BD_SIZE_DEFAULT);
 429                priv->rx_bd_num = PEARL_RX_BD_SIZE_DEFAULT;
 430        } else {
 431                priv->rx_bd_num = rx_bd_size;
 432        }
 433
 434        priv->rx_bd_w_index = 0;
 435        priv->rx_bd_r_index = 0;
 436
 437        ret = pearl_hhbm_init(ps);
 438        if (ret) {
 439                pr_err("failed to init h/w queues\n");
 440                return ret;
 441        }
 442
 443        ret = qtnf_pcie_alloc_skb_array(priv);
 444        if (ret) {
 445                pr_err("failed to allocate skb array\n");
 446                return ret;
 447        }
 448
 449        ret = pearl_alloc_bd_table(ps);
 450        if (ret) {
 451                pr_err("failed to allocate bd table\n");
 452                return ret;
 453        }
 454
 455        ret = pearl_alloc_rx_buffers(ps);
 456        if (ret) {
 457                pr_err("failed to allocate rx buffers\n");
 458                return ret;
 459        }
 460
 461        return ret;
 462}
 463
 464static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
 465{
 466        struct qtnf_pcie_bus_priv *priv = &ps->base;
 467        struct qtnf_pearl_tx_bd *txbd;
 468        struct sk_buff *skb;
 469        unsigned long flags;
 470        dma_addr_t paddr;
 471        u32 tx_done_index;
 472        int count = 0;
 473        int i;
 474
 475        spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
 476
 477        tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
 478                        & (priv->tx_bd_num - 1);
 479
 480        i = priv->tx_bd_r_index;
 481
 482        while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
 483                skb = priv->tx_skb[i];
 484                if (likely(skb)) {
 485                        txbd = &ps->tx_bd_vbase[i];
 486                        paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
 487                                              le32_to_cpu(txbd->addr));
 488                        dma_unmap_single(&priv->pdev->dev, paddr, skb->len,
 489                                         DMA_TO_DEVICE);
 490
 491                        if (skb->dev) {
 492                                dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
 493                                if (unlikely(priv->tx_stopped)) {
 494                                        qtnf_wake_all_queues(skb->dev);
 495                                        priv->tx_stopped = 0;
 496                                }
 497                        }
 498
 499                        dev_kfree_skb_any(skb);
 500                }
 501
 502                priv->tx_skb[i] = NULL;
 503                count++;
 504
 505                if (++i >= priv->tx_bd_num)
 506                        i = 0;
 507        }
 508
 509        priv->tx_reclaim_done += count;
 510        priv->tx_reclaim_req++;
 511        priv->tx_bd_r_index = i;
 512
 513        spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
 514}
 515
 516static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
 517{
 518        struct qtnf_pcie_bus_priv *priv = &ps->base;
 519
 520        if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
 521                        priv->tx_bd_num)) {
 522                qtnf_pearl_data_tx_reclaim(ps);
 523
 524                if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
 525                                priv->tx_bd_num)) {
 526                        pr_warn_ratelimited("reclaim full Tx queue\n");
 527                        priv->tx_full_count++;
 528                        return 0;
 529                }
 530        }
 531
 532        return 1;
 533}
 534
 535static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
 536{
 537        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
 538        struct qtnf_pcie_bus_priv *priv = &ps->base;
 539        dma_addr_t txbd_paddr, skb_paddr;
 540        struct qtnf_pearl_tx_bd *txbd;
 541        unsigned long flags;
 542        int len, i;
 543        u32 info;
 544        int ret = 0;
 545
 546        spin_lock_irqsave(&priv->tx_lock, flags);
 547
 548        if (!qtnf_tx_queue_ready(ps)) {
 549                if (skb->dev) {
 550                        netif_tx_stop_all_queues(skb->dev);
 551                        priv->tx_stopped = 1;
 552                }
 553
 554                spin_unlock_irqrestore(&priv->tx_lock, flags);
 555                return NETDEV_TX_BUSY;
 556        }
 557
 558        i = priv->tx_bd_w_index;
 559        priv->tx_skb[i] = skb;
 560        len = skb->len;
 561
 562        skb_paddr = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
 563                                   DMA_TO_DEVICE);
 564        if (dma_mapping_error(&priv->pdev->dev, skb_paddr)) {
 565                pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
 566                ret = -ENOMEM;
 567                goto tx_done;
 568        }
 569
 570        txbd = &ps->tx_bd_vbase[i];
 571        txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
 572        txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
 573
 574        info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
 575        txbd->info = cpu_to_le32(info);
 576
 577        /* sync up all descriptor updates before passing them to EP */
 578        dma_wmb();
 579
 580        /* write new TX descriptor to PCIE_RX_FIFO on EP */
 581        txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
 582
 583#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 584        writel(QTN_HOST_HI32(txbd_paddr),
 585               PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
 586#endif
 587        writel(QTN_HOST_LO32(txbd_paddr),
 588               PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
 589
 590        if (++i >= priv->tx_bd_num)
 591                i = 0;
 592
 593        priv->tx_bd_w_index = i;
 594
 595tx_done:
 596        if (ret) {
 597                pr_err_ratelimited("drop skb\n");
 598                if (skb->dev)
 599                        skb->dev->stats.tx_dropped++;
 600                dev_kfree_skb_any(skb);
 601        }
 602
 603        priv->tx_done_count++;
 604        spin_unlock_irqrestore(&priv->tx_lock, flags);
 605
 606        qtnf_pearl_data_tx_reclaim(ps);
 607
 608        return NETDEV_TX_OK;
 609}
 610
 611static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
 612                             unsigned int macid, unsigned int vifid)
 613{
 614        return qtnf_pcie_skb_send(bus, skb);
 615}
 616
 617static int qtnf_pcie_data_tx_meta(struct qtnf_bus *bus, struct sk_buff *skb,
 618                                  unsigned int macid, unsigned int vifid)
 619{
 620        struct qtnf_frame_meta_info *meta;
 621        int tail_need = sizeof(*meta) - skb_tailroom(skb);
 622        int ret;
 623
 624        if (tail_need > 0 && pskb_expand_head(skb, 0, tail_need, GFP_ATOMIC)) {
 625                skb->dev->stats.tx_dropped++;
 626                dev_kfree_skb_any(skb);
 627                return NETDEV_TX_OK;
 628        }
 629
 630        meta = skb_put(skb, sizeof(*meta));
 631        meta->magic_s = HBM_FRAME_META_MAGIC_PATTERN_S;
 632        meta->magic_e = HBM_FRAME_META_MAGIC_PATTERN_E;
 633        meta->macid = macid;
 634        meta->ifidx = vifid;
 635
 636        ret = qtnf_pcie_skb_send(bus, skb);
 637        if (unlikely(ret == NETDEV_TX_BUSY))
 638                __skb_trim(skb, skb->len - sizeof(*meta));
 639
 640        return ret;
 641}
 642
 643static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
 644{
 645        struct qtnf_bus *bus = (struct qtnf_bus *)data;
 646        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
 647        struct qtnf_pcie_bus_priv *priv = &ps->base;
 648        u32 status;
 649
 650        priv->pcie_irq_count++;
 651        status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
 652
 653        qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
 654        qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
 655
 656        if (!(status & ps->pcie_irq_mask))
 657                goto irq_done;
 658
 659        if (status & PCIE_HDP_INT_RX_BITS)
 660                ps->pcie_irq_rx_count++;
 661
 662        if (status & PCIE_HDP_INT_TX_BITS)
 663                ps->pcie_irq_tx_count++;
 664
 665        if (status & PCIE_HDP_INT_HHBM_UF)
 666                ps->pcie_irq_uf_count++;
 667
 668        if (status & PCIE_HDP_INT_RX_BITS) {
 669                qtnf_dis_rxdone_irq(ps);
 670                napi_schedule(&bus->mux_napi);
 671        }
 672
 673        if (status & PCIE_HDP_INT_TX_BITS) {
 674                qtnf_dis_txdone_irq(ps);
 675                tasklet_hi_schedule(&priv->reclaim_tq);
 676        }
 677
 678irq_done:
 679        /* H/W workaround: clean all bits, not only enabled */
 680        qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
 681
 682        if (!priv->msi_enabled)
 683                qtnf_deassert_intx(ps);
 684
 685        return IRQ_HANDLED;
 686}
 687
 688static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps)
 689{
 690        u16 index = ps->base.rx_bd_r_index;
 691        struct qtnf_pearl_rx_bd *rxbd;
 692        u32 descw;
 693
 694        rxbd = &ps->rx_bd_vbase[index];
 695        descw = le32_to_cpu(rxbd->info);
 696
 697        if (descw & QTN_TXDONE_MASK)
 698                return 1;
 699
 700        return 0;
 701}
 702
 703static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
 704{
 705        struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
 706        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
 707        struct qtnf_pcie_bus_priv *priv = &ps->base;
 708        struct net_device *ndev = NULL;
 709        struct sk_buff *skb = NULL;
 710        int processed = 0;
 711        struct qtnf_pearl_rx_bd *rxbd;
 712        dma_addr_t skb_paddr;
 713        int consume;
 714        u32 descw;
 715        u32 psize;
 716        u16 r_idx;
 717        u16 w_idx;
 718        int ret;
 719
 720        while (processed < budget) {
 721                if (!qtnf_rx_data_ready(ps))
 722                        goto rx_out;
 723
 724                r_idx = priv->rx_bd_r_index;
 725                rxbd = &ps->rx_bd_vbase[r_idx];
 726                descw = le32_to_cpu(rxbd->info);
 727
 728                skb = priv->rx_skb[r_idx];
 729                psize = QTN_GET_LEN(descw);
 730                consume = 1;
 731
 732                if (!(descw & QTN_TXDONE_MASK)) {
 733                        pr_warn("skip invalid rxbd[%d]\n", r_idx);
 734                        consume = 0;
 735                }
 736
 737                if (!skb) {
 738                        pr_warn("skip missing rx_skb[%d]\n", r_idx);
 739                        consume = 0;
 740                }
 741
 742                if (skb && (skb_tailroom(skb) <  psize)) {
 743                        pr_err("skip packet with invalid length: %u > %u\n",
 744                               psize, skb_tailroom(skb));
 745                        consume = 0;
 746                }
 747
 748                if (skb) {
 749                        skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
 750                                                  le32_to_cpu(rxbd->addr));
 751                        dma_unmap_single(&priv->pdev->dev, skb_paddr,
 752                                         SKB_BUF_SIZE, DMA_FROM_DEVICE);
 753                }
 754
 755                if (consume) {
 756                        skb_put(skb, psize);
 757                        ndev = qtnf_classify_skb(bus, skb);
 758                        if (likely(ndev)) {
 759                                dev_sw_netstats_rx_add(ndev, skb->len);
 760                                skb->protocol = eth_type_trans(skb, ndev);
 761                                napi_gro_receive(napi, skb);
 762                        } else {
 763                                pr_debug("drop untagged skb\n");
 764                                bus->mux_dev.stats.rx_dropped++;
 765                                dev_kfree_skb_any(skb);
 766                        }
 767                } else {
 768                        if (skb) {
 769                                bus->mux_dev.stats.rx_dropped++;
 770                                dev_kfree_skb_any(skb);
 771                        }
 772                }
 773
 774                priv->rx_skb[r_idx] = NULL;
 775                if (++r_idx >= priv->rx_bd_num)
 776                        r_idx = 0;
 777
 778                priv->rx_bd_r_index = r_idx;
 779
 780                /* repalce processed buffer by a new one */
 781                w_idx = priv->rx_bd_w_index;
 782                while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
 783                                  priv->rx_bd_num) > 0) {
 784                        if (++w_idx >= priv->rx_bd_num)
 785                                w_idx = 0;
 786
 787                        ret = pearl_skb2rbd_attach(ps, w_idx);
 788                        if (ret) {
 789                                pr_err("failed to allocate new rx_skb[%d]\n",
 790                                       w_idx);
 791                                break;
 792                        }
 793                }
 794
 795                processed++;
 796        }
 797
 798rx_out:
 799        if (processed < budget) {
 800                napi_complete(napi);
 801                qtnf_en_rxdone_irq(ps);
 802        }
 803
 804        return processed;
 805}
 806
 807static void
 808qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
 809{
 810        struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
 811
 812        tasklet_hi_schedule(&ps->base.reclaim_tq);
 813}
 814
 815static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
 816{
 817        struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
 818
 819        qtnf_enable_hdp_irqs(ps);
 820        napi_enable(&bus->mux_napi);
 821}
 822
 823static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
 824{
 825        struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
 826
 827        napi_disable(&bus->mux_napi);
 828        qtnf_disable_hdp_irqs(ps);
 829}
 830
 831static void qtnf_pearl_tx_use_meta_info_set(struct qtnf_bus *bus, bool use_meta)
 832{
 833        if (use_meta)
 834                bus->bus_ops->data_tx = qtnf_pcie_data_tx_meta;
 835        else
 836                bus->bus_ops->data_tx = qtnf_pcie_data_tx;
 837}
 838
 839static struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
 840        /* control path methods */
 841        .control_tx     = qtnf_pcie_control_tx,
 842
 843        /* data path methods */
 844        .data_tx                = qtnf_pcie_data_tx,
 845        .data_tx_timeout        = qtnf_pcie_data_tx_timeout,
 846        .data_tx_use_meta_set   = qtnf_pearl_tx_use_meta_info_set,
 847        .data_rx_start          = qtnf_pcie_data_rx_start,
 848        .data_rx_stop           = qtnf_pcie_data_rx_stop,
 849};
 850
 851static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
 852{
 853        struct qtnf_bus *bus = dev_get_drvdata(s->private);
 854        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
 855        u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base));
 856        u32 status;
 857
 858        seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count);
 859        seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count);
 860        status = reg &  PCIE_HDP_INT_TX_BITS;
 861        seq_printf(s, "pcie_irq_tx_status(%s)\n",
 862                   (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
 863        seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count);
 864        status = reg &  PCIE_HDP_INT_RX_BITS;
 865        seq_printf(s, "pcie_irq_rx_status(%s)\n",
 866                   (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
 867        seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count);
 868        status = reg &  PCIE_HDP_INT_HHBM_UF;
 869        seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
 870                   (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
 871
 872        return 0;
 873}
 874
 875static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
 876{
 877        struct qtnf_bus *bus = dev_get_drvdata(s->private);
 878        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
 879        struct qtnf_pcie_bus_priv *priv = &ps->base;
 880
 881        seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
 882        seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
 883        seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
 884        seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
 885
 886        seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
 887        seq_printf(s, "tx_bd_p_index(%u)\n",
 888                   readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
 889                        & (priv->tx_bd_num - 1));
 890        seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
 891        seq_printf(s, "tx queue len(%u)\n",
 892                   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
 893                            priv->tx_bd_num));
 894
 895        seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
 896        seq_printf(s, "rx_bd_p_index(%u)\n",
 897                   readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
 898                        & (priv->rx_bd_num - 1));
 899        seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
 900        seq_printf(s, "rx alloc queue len(%u)\n",
 901                   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
 902                              priv->rx_bd_num));
 903
 904        return 0;
 905}
 906
 907static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
 908                           int blk, const u8 *pblk, const u8 *fw)
 909{
 910        struct qtnf_bus *bus = pci_get_drvdata(pdev);
 911
 912        struct qtnf_pearl_fw_hdr *hdr;
 913        u8 *pdata;
 914
 915        int hds = sizeof(*hdr);
 916        struct sk_buff *skb = NULL;
 917        int len = 0;
 918        int ret;
 919
 920        skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
 921        if (!skb)
 922                return -ENOMEM;
 923
 924        skb->len = QTN_PCIE_FW_BUFSZ;
 925        skb->dev = NULL;
 926
 927        hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
 928        memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
 929        hdr->fwsize = cpu_to_le32(size);
 930        hdr->seqnum = cpu_to_le32(blk);
 931
 932        if (blk)
 933                hdr->type = cpu_to_le32(QTN_FW_DSUB);
 934        else
 935                hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
 936
 937        pdata = skb->data + hds;
 938
 939        len = QTN_PCIE_FW_BUFSZ - hds;
 940        if (pblk >= (fw + size - len)) {
 941                len = fw + size - pblk;
 942                hdr->type = cpu_to_le32(QTN_FW_DEND);
 943        }
 944
 945        hdr->pktlen = cpu_to_le32(len);
 946        memcpy(pdata, pblk, len);
 947        hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
 948
 949        ret = qtnf_pcie_skb_send(bus, skb);
 950
 951        return (ret == NETDEV_TX_OK) ? len : 0;
 952}
 953
 954static int
 955qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size)
 956{
 957        int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
 958        int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
 959        const u8 *pblk = fw;
 960        int threshold = 0;
 961        int blk = 0;
 962        int len;
 963
 964        pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
 965
 966        while (blk < blk_count) {
 967                if (++threshold > 10000) {
 968                        pr_err("FW upload failed: too many retries\n");
 969                        return -ETIMEDOUT;
 970                }
 971
 972                len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw);
 973                if (len <= 0)
 974                        continue;
 975
 976                if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
 977                    (blk == (blk_count - 1))) {
 978                        qtnf_set_state(&ps->bda->bda_rc_state,
 979                                       QTN_RC_FW_SYNC);
 980                        if (qtnf_poll_state(&ps->bda->bda_ep_state,
 981                                            QTN_EP_FW_SYNC,
 982                                            QTN_FW_DL_TIMEOUT_MS)) {
 983                                pr_err("FW upload failed: SYNC timed out\n");
 984                                return -ETIMEDOUT;
 985                        }
 986
 987                        qtnf_clear_state(&ps->bda->bda_ep_state,
 988                                         QTN_EP_FW_SYNC);
 989
 990                        if (qtnf_is_state(&ps->bda->bda_ep_state,
 991                                          QTN_EP_FW_RETRY)) {
 992                                if (blk == (blk_count - 1)) {
 993                                        int last_round =
 994                                                blk_count & QTN_PCIE_FW_DLMASK;
 995                                        blk -= last_round;
 996                                        pblk -= ((last_round - 1) *
 997                                                blk_size + len);
 998                                } else {
 999                                        blk -= QTN_PCIE_FW_DLMASK;
1000                                        pblk -= QTN_PCIE_FW_DLMASK * blk_size;
1001                                }
1002
1003                                qtnf_clear_state(&ps->bda->bda_ep_state,
1004                                                 QTN_EP_FW_RETRY);
1005
1006                                pr_warn("FW upload retry: block #%d\n", blk);
1007                                continue;
1008                        }
1009
1010                        qtnf_pearl_data_tx_reclaim(ps);
1011                }
1012
1013                pblk += len;
1014                blk++;
1015        }
1016
1017        pr_debug("FW upload completed: totally sent %d blocks\n", blk);
1018        return 0;
1019}
1020
1021static void qtnf_pearl_fw_work_handler(struct work_struct *work)
1022{
1023        struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1024        struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
1025        u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
1026        const char *fwname = QTN_PCI_PEARL_FW_NAME;
1027        struct pci_dev *pdev = ps->base.pdev;
1028        const struct firmware *fw;
1029        int ret;
1030
1031        if (ps->base.flashboot) {
1032                state |= QTN_RC_FW_FLASHBOOT;
1033        } else {
1034                ret = request_firmware(&fw, fwname, &pdev->dev);
1035                if (ret < 0) {
1036                        pr_err("failed to get firmware %s\n", fwname);
1037                        goto fw_load_exit;
1038                }
1039        }
1040
1041        qtnf_set_state(&ps->bda->bda_rc_state, state);
1042
1043        if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
1044                            QTN_FW_DL_TIMEOUT_MS)) {
1045                pr_err("card is not ready\n");
1046
1047                if (!ps->base.flashboot)
1048                        release_firmware(fw);
1049
1050                goto fw_load_exit;
1051        }
1052
1053        qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
1054
1055        if (ps->base.flashboot) {
1056                pr_info("booting firmware from flash\n");
1057
1058        } else {
1059                pr_info("starting firmware upload: %s\n", fwname);
1060
1061                ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
1062                release_firmware(fw);
1063                if (ret) {
1064                        pr_err("firmware upload error\n");
1065                        goto fw_load_exit;
1066                }
1067        }
1068
1069        if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE,
1070                            QTN_FW_DL_TIMEOUT_MS)) {
1071                pr_err("firmware bringup timed out\n");
1072                goto fw_load_exit;
1073        }
1074
1075        if (qtnf_poll_state(&ps->bda->bda_ep_state,
1076                            QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
1077                pr_err("firmware runtime failure\n");
1078                goto fw_load_exit;
1079        }
1080
1081        pr_info("firmware is up and running\n");
1082
1083        ret = qtnf_pcie_fw_boot_done(bus);
1084        if (ret)
1085                goto fw_load_exit;
1086
1087        qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
1088        qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1089
1090fw_load_exit:
1091        put_device(&pdev->dev);
1092}
1093
1094static void qtnf_pearl_reclaim_tasklet_fn(struct tasklet_struct *t)
1095{
1096        struct qtnf_pcie_pearl_state *ps = from_tasklet(ps, t, base.reclaim_tq);
1097
1098        qtnf_pearl_data_tx_reclaim(ps);
1099        qtnf_en_txdone_irq(ps);
1100}
1101
1102static u64 qtnf_pearl_dma_mask_get(void)
1103{
1104#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1105        return DMA_BIT_MASK(64);
1106#else
1107        return DMA_BIT_MASK(32);
1108#endif
1109}
1110
1111static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
1112                                 unsigned int rx_bd_size)
1113{
1114        struct qtnf_shm_ipc_int ipc_int;
1115        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
1116        struct pci_dev *pdev = ps->base.pdev;
1117        int ret;
1118
1119        bus->bus_ops = &qtnf_pcie_pearl_bus_ops;
1120        spin_lock_init(&ps->irq_lock);
1121        INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
1122
1123        ps->pcie_reg_base = ps->base.dmareg_bar;
1124        ps->bda = ps->base.epmem_bar;
1125        writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
1126
1127        ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size, rx_bd_size);
1128        if (ret) {
1129                pr_err("PCIE xfer init failed\n");
1130                return ret;
1131        }
1132
1133        /* init default irq settings */
1134        qtnf_init_hdp_irqs(ps);
1135
1136        /* start with disabled irqs */
1137        qtnf_disable_hdp_irqs(ps);
1138
1139        ret = devm_request_irq(&pdev->dev, pdev->irq,
1140                               &qtnf_pcie_pearl_interrupt, 0,
1141                               "qtnf_pearl_irq", (void *)bus);
1142        if (ret) {
1143                pr_err("failed to request pcie irq %d\n", pdev->irq);
1144                qtnf_pearl_free_xfer_buffers(ps);
1145                return ret;
1146        }
1147
1148        tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
1149        netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1150                       qtnf_pcie_pearl_rx_poll, 10);
1151
1152        ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
1153        ipc_int.arg = ps;
1154        qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
1155                               &ps->bda->bda_shm_reg2, &ipc_int);
1156
1157        return 0;
1158}
1159
1160static void qtnf_pcie_pearl_remove(struct qtnf_bus *bus)
1161{
1162        struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
1163
1164        qtnf_pearl_reset_ep(ps);
1165        qtnf_pearl_free_xfer_buffers(ps);
1166}
1167
1168#ifdef CONFIG_PM_SLEEP
1169static int qtnf_pcie_pearl_suspend(struct qtnf_bus *bus)
1170{
1171        return -EOPNOTSUPP;
1172}
1173
1174static int qtnf_pcie_pearl_resume(struct qtnf_bus *bus)
1175{
1176        return 0;
1177}
1178#endif
1179
1180struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev)
1181{
1182        struct qtnf_bus *bus;
1183        struct qtnf_pcie_pearl_state *ps;
1184
1185        bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ps), GFP_KERNEL);
1186        if (!bus)
1187                return NULL;
1188
1189        ps = get_bus_priv(bus);
1190        ps->base.probe_cb = qtnf_pcie_pearl_probe;
1191        ps->base.remove_cb = qtnf_pcie_pearl_remove;
1192        ps->base.dma_mask_get_cb = qtnf_pearl_dma_mask_get;
1193#ifdef CONFIG_PM_SLEEP
1194        ps->base.resume_cb = qtnf_pcie_pearl_resume;
1195        ps->base.suspend_cb = qtnf_pcie_pearl_suspend;
1196#endif
1197
1198        return bus;
1199}
1200