linux/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016 Quantenna Communications, Inc.
   3 * All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License
   7 * as published by the Free Software Foundation; either version 2
   8 * of the License, or (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/firmware.h>
  20#include <linux/pci.h>
  21#include <linux/vmalloc.h>
  22#include <linux/delay.h>
  23#include <linux/interrupt.h>
  24#include <linux/sched.h>
  25#include <linux/completion.h>
  26#include <linux/crc32.h>
  27#include <linux/spinlock.h>
  28#include <linux/circ_buf.h>
  29#include <linux/log2.h>
  30
  31#include "qtn_hw_ids.h"
  32#include "pcie_bus_priv.h"
  33#include "core.h"
  34#include "bus.h"
  35#include "debug.h"
  36
  37static bool use_msi = true;
  38module_param(use_msi, bool, 0644);
  39MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
  40
  41static unsigned int tx_bd_size_param = 32;
  42module_param(tx_bd_size_param, uint, 0644);
  43MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
  44
  45static unsigned int rx_bd_size_param = 256;
  46module_param(rx_bd_size_param, uint, 0644);
  47MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
  48
  49static u8 flashboot = 1;
  50module_param(flashboot, byte, 0644);
  51MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
  52
  53#define DRV_NAME        "qtnfmac_pearl_pcie"
  54
  55static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
  56{
  57        writel(val, basereg);
  58
  59        /* flush posted write */
  60        readl(basereg);
  61}
  62
  63static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
  64{
  65        unsigned long flags;
  66
  67        spin_lock_irqsave(&priv->irq_lock, flags);
  68        priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
  69        spin_unlock_irqrestore(&priv->irq_lock, flags);
  70}
  71
  72static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
  73{
  74        unsigned long flags;
  75
  76        spin_lock_irqsave(&priv->irq_lock, flags);
  77        writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
  78        spin_unlock_irqrestore(&priv->irq_lock, flags);
  79}
  80
  81static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
  82{
  83        unsigned long flags;
  84
  85        spin_lock_irqsave(&priv->irq_lock, flags);
  86        writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base));
  87        spin_unlock_irqrestore(&priv->irq_lock, flags);
  88}
  89
  90static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
  91{
  92        unsigned long flags;
  93
  94        spin_lock_irqsave(&priv->irq_lock, flags);
  95        priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
  96        writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
  97        spin_unlock_irqrestore(&priv->irq_lock, flags);
  98}
  99
 100static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
 101{
 102        unsigned long flags;
 103
 104        spin_lock_irqsave(&priv->irq_lock, flags);
 105        priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
 106        writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
 107        spin_unlock_irqrestore(&priv->irq_lock, flags);
 108}
 109
 110static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv)
 111{
 112        unsigned long flags;
 113
 114        spin_lock_irqsave(&priv->irq_lock, flags);
 115        priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
 116        writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
 117        spin_unlock_irqrestore(&priv->irq_lock, flags);
 118}
 119
 120static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv)
 121{
 122        unsigned long flags;
 123
 124        spin_lock_irqsave(&priv->irq_lock, flags);
 125        priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
 126        writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
 127        spin_unlock_irqrestore(&priv->irq_lock, flags);
 128}
 129
 130static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv)
 131{
 132        struct pci_dev *pdev = priv->pdev;
 133
 134        /* fall back to legacy INTx interrupts by default */
 135        priv->msi_enabled = 0;
 136
 137        /* check if MSI capability is available */
 138        if (use_msi) {
 139                if (!pci_enable_msi(pdev)) {
 140                        pr_debug("MSI interrupt enabled\n");
 141                        priv->msi_enabled = 1;
 142                } else {
 143                        pr_warn("failed to enable MSI interrupts");
 144                }
 145        }
 146
 147        if (!priv->msi_enabled) {
 148                pr_warn("legacy PCIE interrupts enabled\n");
 149                pci_intx(pdev, 1);
 150        }
 151}
 152
 153static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv)
 154{
 155        void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
 156        u32 cfg;
 157
 158        cfg = readl(reg);
 159        cfg &= ~PEARL_ASSERT_INTX;
 160        qtnf_non_posted_write(cfg, reg);
 161}
 162
 163static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv)
 164{
 165        const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
 166        void __iomem *reg = priv->sysctl_bar +
 167                            QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
 168
 169        qtnf_non_posted_write(data, reg);
 170        msleep(QTN_EP_RESET_WAIT_MS);
 171        pci_restore_state(priv->pdev);
 172}
 173
 174static void qtnf_ipc_gen_ep_int(void *arg)
 175{
 176        const struct qtnf_pcie_bus_priv *priv = arg;
 177        const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
 178        void __iomem *reg = priv->sysctl_bar +
 179                            QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
 180
 181        qtnf_non_posted_write(data, reg);
 182}
 183
 184static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
 185{
 186        void __iomem *vaddr;
 187        dma_addr_t busaddr;
 188        size_t len;
 189        int ret;
 190
 191        ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
 192        if (ret)
 193                return IOMEM_ERR_PTR(ret);
 194
 195        busaddr = pci_resource_start(priv->pdev, index);
 196        len = pci_resource_len(priv->pdev, index);
 197        vaddr = pcim_iomap_table(priv->pdev)[index];
 198        if (!vaddr)
 199                return IOMEM_ERR_PTR(-ENOMEM);
 200
 201        pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
 202                 index, vaddr, &busaddr, (int)len);
 203
 204        return vaddr;
 205}
 206
 207static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
 208{
 209        struct qtnf_pcie_bus_priv *priv = arg;
 210        struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
 211        struct sk_buff *skb;
 212
 213        if (unlikely(len == 0)) {
 214                pr_warn("zero length packet received\n");
 215                return;
 216        }
 217
 218        skb = __dev_alloc_skb(len, GFP_KERNEL);
 219
 220        if (unlikely(!skb)) {
 221                pr_err("failed to allocate skb\n");
 222                return;
 223        }
 224
 225        skb_put_data(skb, buf, len);
 226
 227        qtnf_trans_handle_rx_ctl_packet(bus, skb);
 228}
 229
 230static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv)
 231{
 232        struct qtnf_shm_ipc_region __iomem *ipc_tx_reg;
 233        struct qtnf_shm_ipc_region __iomem *ipc_rx_reg;
 234        const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv };
 235        const struct qtnf_shm_ipc_rx_callback rx_callback = {
 236                                        qtnf_pcie_control_rx_callback, priv };
 237
 238        ipc_tx_reg = &priv->bda->bda_shm_reg1;
 239        ipc_rx_reg = &priv->bda->bda_shm_reg2;
 240
 241        qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
 242                          ipc_tx_reg, priv->workqueue,
 243                          &ipc_int, &rx_callback);
 244        qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
 245                          ipc_rx_reg, priv->workqueue,
 246                          &ipc_int, &rx_callback);
 247
 248        return 0;
 249}
 250
 251static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
 252{
 253        qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
 254        qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
 255}
 256
 257static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
 258{
 259        int ret = -ENOMEM;
 260
 261        priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
 262        if (IS_ERR(priv->sysctl_bar)) {
 263                pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
 264                return ret;
 265        }
 266
 267        priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
 268        if (IS_ERR(priv->dmareg_bar)) {
 269                pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
 270                return ret;
 271        }
 272
 273        priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
 274        if (IS_ERR(priv->epmem_bar)) {
 275                pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
 276                return ret;
 277        }
 278
 279        priv->pcie_reg_base = priv->dmareg_bar;
 280        priv->bda = priv->epmem_bar;
 281        writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled);
 282
 283        return 0;
 284}
 285
 286static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
 287{
 288        struct pci_dev *pdev = priv->pdev;
 289        struct pci_dev *parent;
 290        int mps_p, mps_o, mps_m, mps;
 291        int ret;
 292
 293        /* current mps */
 294        mps_o = pcie_get_mps(pdev);
 295
 296        /* maximum supported mps */
 297        mps_m = 128 << pdev->pcie_mpss;
 298
 299        /* suggested new mps value */
 300        mps = mps_m;
 301
 302        if (pdev->bus && pdev->bus->self) {
 303                /* parent (bus) mps */
 304                parent = pdev->bus->self;
 305
 306                if (pci_is_pcie(parent)) {
 307                        mps_p = pcie_get_mps(parent);
 308                        mps = min(mps_m, mps_p);
 309                }
 310        }
 311
 312        ret = pcie_set_mps(pdev, mps);
 313        if (ret) {
 314                pr_err("failed to set mps to %d, keep using current %d\n",
 315                       mps, mps_o);
 316                priv->mps = mps_o;
 317                return;
 318        }
 319
 320        pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
 321        priv->mps = mps;
 322}
 323
 324static int qtnf_is_state(__le32 __iomem *reg, u32 state)
 325{
 326        u32 s = readl(reg);
 327
 328        return s & state;
 329}
 330
 331static void qtnf_set_state(__le32 __iomem *reg, u32 state)
 332{
 333        u32 s = readl(reg);
 334
 335        qtnf_non_posted_write(state | s, reg);
 336}
 337
 338static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
 339{
 340        u32 s = readl(reg);
 341
 342        qtnf_non_posted_write(s & ~state, reg);
 343}
 344
 345static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
 346{
 347        u32 timeout = 0;
 348
 349        while ((qtnf_is_state(reg, state) == 0)) {
 350                usleep_range(1000, 1200);
 351                if (++timeout > delay_in_ms)
 352                        return -1;
 353        }
 354
 355        return 0;
 356}
 357
 358static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
 359{
 360        struct sk_buff **vaddr;
 361        int len;
 362
 363        len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
 364                priv->rx_bd_num * sizeof(*priv->rx_skb);
 365        vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
 366
 367        if (!vaddr)
 368                return -ENOMEM;
 369
 370        priv->tx_skb = vaddr;
 371
 372        vaddr += priv->tx_bd_num;
 373        priv->rx_skb = vaddr;
 374
 375        return 0;
 376}
 377
 378static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
 379{
 380        dma_addr_t paddr;
 381        void *vaddr;
 382        int len;
 383
 384        len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
 385                priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
 386
 387        vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
 388        if (!vaddr)
 389                return -ENOMEM;
 390
 391        /* tx bd */
 392
 393        memset(vaddr, 0, len);
 394
 395        priv->bd_table_vaddr = vaddr;
 396        priv->bd_table_paddr = paddr;
 397        priv->bd_table_len = len;
 398
 399        priv->tx_bd_vbase = vaddr;
 400        priv->tx_bd_pbase = paddr;
 401
 402        pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
 403
 404        priv->tx_bd_r_index = 0;
 405        priv->tx_bd_w_index = 0;
 406
 407        /* rx bd */
 408
 409        vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
 410        paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
 411
 412        priv->rx_bd_vbase = vaddr;
 413        priv->rx_bd_pbase = paddr;
 414
 415#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 416        writel(QTN_HOST_HI32(paddr),
 417               PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
 418#endif
 419        writel(QTN_HOST_LO32(paddr),
 420               PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
 421        writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
 422               PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
 423
 424        pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
 425
 426        return 0;
 427}
 428
 429static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index)
 430{
 431        struct qtnf_rx_bd *rxbd;
 432        struct sk_buff *skb;
 433        dma_addr_t paddr;
 434
 435        skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
 436        if (!skb) {
 437                priv->rx_skb[index] = NULL;
 438                return -ENOMEM;
 439        }
 440
 441        priv->rx_skb[index] = skb;
 442        rxbd = &priv->rx_bd_vbase[index];
 443
 444        paddr = pci_map_single(priv->pdev, skb->data,
 445                               SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
 446        if (pci_dma_mapping_error(priv->pdev, paddr)) {
 447                pr_err("skb DMA mapping error: %pad\n", &paddr);
 448                return -ENOMEM;
 449        }
 450
 451        /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
 452        rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
 453        rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
 454        rxbd->info = 0x0;
 455
 456        priv->rx_bd_w_index = index;
 457
 458        /* sync up all descriptor updates */
 459        wmb();
 460
 461#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 462        writel(QTN_HOST_HI32(paddr),
 463               PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
 464#endif
 465        writel(QTN_HOST_LO32(paddr),
 466               PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
 467
 468        writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
 469        return 0;
 470}
 471
 472static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv)
 473{
 474        u16 i;
 475        int ret = 0;
 476
 477        memset(priv->rx_bd_vbase, 0x0,
 478               priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
 479
 480        for (i = 0; i < priv->rx_bd_num; i++) {
 481                ret = skb2rbd_attach(priv, i);
 482                if (ret)
 483                        break;
 484        }
 485
 486        return ret;
 487}
 488
 489/* all rx/tx activity should have ceased before calling this function */
 490static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv)
 491{
 492        struct qtnf_tx_bd *txbd;
 493        struct qtnf_rx_bd *rxbd;
 494        struct sk_buff *skb;
 495        dma_addr_t paddr;
 496        int i;
 497
 498        /* free rx buffers */
 499        for (i = 0; i < priv->rx_bd_num; i++) {
 500                if (priv->rx_skb && priv->rx_skb[i]) {
 501                        rxbd = &priv->rx_bd_vbase[i];
 502                        skb = priv->rx_skb[i];
 503                        paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
 504                                              le32_to_cpu(rxbd->addr));
 505                        pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
 506                                         PCI_DMA_FROMDEVICE);
 507                        dev_kfree_skb_any(skb);
 508                        priv->rx_skb[i] = NULL;
 509                }
 510        }
 511
 512        /* free tx buffers */
 513        for (i = 0; i < priv->tx_bd_num; i++) {
 514                if (priv->tx_skb && priv->tx_skb[i]) {
 515                        txbd = &priv->tx_bd_vbase[i];
 516                        skb = priv->tx_skb[i];
 517                        paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
 518                                              le32_to_cpu(txbd->addr));
 519                        pci_unmap_single(priv->pdev, paddr, skb->len,
 520                                         PCI_DMA_TODEVICE);
 521                        dev_kfree_skb_any(skb);
 522                        priv->tx_skb[i] = NULL;
 523                }
 524        }
 525}
 526
 527static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv)
 528{
 529        u32 val;
 530
 531        val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base));
 532        val |= HHBM_CONFIG_SOFT_RESET;
 533        writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
 534        usleep_range(50, 100);
 535        val &= ~HHBM_CONFIG_SOFT_RESET;
 536#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 537        val |= HHBM_64BIT;
 538#endif
 539        writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
 540        writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base));
 541
 542        return 0;
 543}
 544
 545static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
 546{
 547        int ret;
 548        u32 val;
 549
 550        priv->tx_bd_num = tx_bd_size_param;
 551        priv->rx_bd_num = rx_bd_size_param;
 552        priv->rx_bd_w_index = 0;
 553        priv->rx_bd_r_index = 0;
 554
 555        if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
 556                pr_err("tx_bd_size_param %u is not power of two\n",
 557                       priv->tx_bd_num);
 558                return -EINVAL;
 559        }
 560
 561        val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
 562        if (val > PCIE_HHBM_MAX_SIZE) {
 563                pr_err("tx_bd_size_param %u is too large\n",
 564                       priv->tx_bd_num);
 565                return -EINVAL;
 566        }
 567
 568        if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
 569                pr_err("rx_bd_size_param %u is not power of two\n",
 570                       priv->rx_bd_num);
 571                return -EINVAL;
 572        }
 573
 574        val = priv->rx_bd_num * sizeof(dma_addr_t);
 575        if (val > PCIE_HHBM_MAX_SIZE) {
 576                pr_err("rx_bd_size_param %u is too large\n",
 577                       priv->rx_bd_num);
 578                return -EINVAL;
 579        }
 580
 581        ret = qtnf_hhbm_init(priv);
 582        if (ret) {
 583                pr_err("failed to init h/w queues\n");
 584                return ret;
 585        }
 586
 587        ret = alloc_skb_array(priv);
 588        if (ret) {
 589                pr_err("failed to allocate skb array\n");
 590                return ret;
 591        }
 592
 593        ret = alloc_bd_table(priv);
 594        if (ret) {
 595                pr_err("failed to allocate bd table\n");
 596                return ret;
 597        }
 598
 599        ret = alloc_rx_buffers(priv);
 600        if (ret) {
 601                pr_err("failed to allocate rx buffers\n");
 602                return ret;
 603        }
 604
 605        return ret;
 606}
 607
 608static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
 609{
 610        struct qtnf_tx_bd *txbd;
 611        struct sk_buff *skb;
 612        unsigned long flags;
 613        dma_addr_t paddr;
 614        u32 tx_done_index;
 615        int count = 0;
 616        int i;
 617
 618        spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
 619
 620        tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
 621                        & (priv->tx_bd_num - 1);
 622
 623        i = priv->tx_bd_r_index;
 624
 625        while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
 626                skb = priv->tx_skb[i];
 627                if (likely(skb)) {
 628                        txbd = &priv->tx_bd_vbase[i];
 629                        paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
 630                                              le32_to_cpu(txbd->addr));
 631                        pci_unmap_single(priv->pdev, paddr, skb->len,
 632                                         PCI_DMA_TODEVICE);
 633
 634                        if (skb->dev) {
 635                                qtnf_update_tx_stats(skb->dev, skb);
 636                                if (unlikely(priv->tx_stopped)) {
 637                                        qtnf_wake_all_queues(skb->dev);
 638                                        priv->tx_stopped = 0;
 639                                }
 640                        }
 641
 642                        dev_kfree_skb_any(skb);
 643                }
 644
 645                priv->tx_skb[i] = NULL;
 646                count++;
 647
 648                if (++i >= priv->tx_bd_num)
 649                        i = 0;
 650        }
 651
 652        priv->tx_reclaim_done += count;
 653        priv->tx_reclaim_req++;
 654        priv->tx_bd_r_index = i;
 655
 656        spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
 657}
 658
 659static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
 660{
 661        if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
 662                        priv->tx_bd_num)) {
 663                qtnf_pcie_data_tx_reclaim(priv);
 664
 665                if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
 666                                priv->tx_bd_num)) {
 667                        pr_warn_ratelimited("reclaim full Tx queue\n");
 668                        priv->tx_full_count++;
 669                        return 0;
 670                }
 671        }
 672
 673        return 1;
 674}
 675
 676static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
 677{
 678        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 679        dma_addr_t txbd_paddr, skb_paddr;
 680        struct qtnf_tx_bd *txbd;
 681        unsigned long flags;
 682        int len, i;
 683        u32 info;
 684        int ret = 0;
 685
 686        spin_lock_irqsave(&priv->tx0_lock, flags);
 687
 688        if (!qtnf_tx_queue_ready(priv)) {
 689                if (skb->dev) {
 690                        netif_tx_stop_all_queues(skb->dev);
 691                        priv->tx_stopped = 1;
 692                }
 693
 694                spin_unlock_irqrestore(&priv->tx0_lock, flags);
 695                return NETDEV_TX_BUSY;
 696        }
 697
 698        i = priv->tx_bd_w_index;
 699        priv->tx_skb[i] = skb;
 700        len = skb->len;
 701
 702        skb_paddr = pci_map_single(priv->pdev, skb->data,
 703                                   skb->len, PCI_DMA_TODEVICE);
 704        if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
 705                pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
 706                ret = -ENOMEM;
 707                goto tx_done;
 708        }
 709
 710        txbd = &priv->tx_bd_vbase[i];
 711        txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
 712        txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
 713
 714        info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
 715        txbd->info = cpu_to_le32(info);
 716
 717        /* sync up all descriptor updates before passing them to EP */
 718        dma_wmb();
 719
 720        /* write new TX descriptor to PCIE_RX_FIFO on EP */
 721        txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
 722
 723#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 724        writel(QTN_HOST_HI32(txbd_paddr),
 725               PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
 726#endif
 727        writel(QTN_HOST_LO32(txbd_paddr),
 728               PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
 729
 730        if (++i >= priv->tx_bd_num)
 731                i = 0;
 732
 733        priv->tx_bd_w_index = i;
 734
 735tx_done:
 736        if (ret && skb) {
 737                pr_err_ratelimited("drop skb\n");
 738                if (skb->dev)
 739                        skb->dev->stats.tx_dropped++;
 740                dev_kfree_skb_any(skb);
 741        }
 742
 743        priv->tx_done_count++;
 744        spin_unlock_irqrestore(&priv->tx0_lock, flags);
 745
 746        qtnf_pcie_data_tx_reclaim(priv);
 747
 748        return NETDEV_TX_OK;
 749}
 750
 751static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
 752{
 753        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 754        int ret;
 755
 756        ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
 757
 758        if (ret == -ETIMEDOUT) {
 759                pr_err("EP firmware is dead\n");
 760                bus->fw_state = QTNF_FW_STATE_EP_DEAD;
 761        }
 762
 763        return ret;
 764}
 765
 766static irqreturn_t qtnf_interrupt(int irq, void *data)
 767{
 768        struct qtnf_bus *bus = (struct qtnf_bus *)data;
 769        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 770        u32 status;
 771
 772        priv->pcie_irq_count++;
 773        status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
 774
 775        qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
 776        qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
 777
 778        if (!(status & priv->pcie_irq_mask))
 779                goto irq_done;
 780
 781        if (status & PCIE_HDP_INT_RX_BITS)
 782                priv->pcie_irq_rx_count++;
 783
 784        if (status & PCIE_HDP_INT_TX_BITS)
 785                priv->pcie_irq_tx_count++;
 786
 787        if (status & PCIE_HDP_INT_HHBM_UF)
 788                priv->pcie_irq_uf_count++;
 789
 790        if (status & PCIE_HDP_INT_RX_BITS) {
 791                qtnf_dis_rxdone_irq(priv);
 792                napi_schedule(&bus->mux_napi);
 793        }
 794
 795        if (status & PCIE_HDP_INT_TX_BITS) {
 796                qtnf_dis_txdone_irq(priv);
 797                tasklet_hi_schedule(&priv->reclaim_tq);
 798        }
 799
 800irq_done:
 801        /* H/W workaround: clean all bits, not only enabled */
 802        qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
 803
 804        if (!priv->msi_enabled)
 805                qtnf_deassert_intx(priv);
 806
 807        return IRQ_HANDLED;
 808}
 809
 810static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv)
 811{
 812        u16 index = priv->rx_bd_r_index;
 813        struct qtnf_rx_bd *rxbd;
 814        u32 descw;
 815
 816        rxbd = &priv->rx_bd_vbase[index];
 817        descw = le32_to_cpu(rxbd->info);
 818
 819        if (descw & QTN_TXDONE_MASK)
 820                return 1;
 821
 822        return 0;
 823}
 824
 825static int qtnf_rx_poll(struct napi_struct *napi, int budget)
 826{
 827        struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
 828        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 829        struct net_device *ndev = NULL;
 830        struct sk_buff *skb = NULL;
 831        int processed = 0;
 832        struct qtnf_rx_bd *rxbd;
 833        dma_addr_t skb_paddr;
 834        int consume;
 835        u32 descw;
 836        u32 psize;
 837        u16 r_idx;
 838        u16 w_idx;
 839        int ret;
 840
 841        while (processed < budget) {
 842
 843
 844                if (!qtnf_rx_data_ready(priv))
 845                        goto rx_out;
 846
 847                r_idx = priv->rx_bd_r_index;
 848                rxbd = &priv->rx_bd_vbase[r_idx];
 849                descw = le32_to_cpu(rxbd->info);
 850
 851                skb = priv->rx_skb[r_idx];
 852                psize = QTN_GET_LEN(descw);
 853                consume = 1;
 854
 855                if (!(descw & QTN_TXDONE_MASK)) {
 856                        pr_warn("skip invalid rxbd[%d]\n", r_idx);
 857                        consume = 0;
 858                }
 859
 860                if (!skb) {
 861                        pr_warn("skip missing rx_skb[%d]\n", r_idx);
 862                        consume = 0;
 863                }
 864
 865                if (skb && (skb_tailroom(skb) <  psize)) {
 866                        pr_err("skip packet with invalid length: %u > %u\n",
 867                               psize, skb_tailroom(skb));
 868                        consume = 0;
 869                }
 870
 871                if (skb) {
 872                        skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
 873                                                  le32_to_cpu(rxbd->addr));
 874                        pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
 875                                         PCI_DMA_FROMDEVICE);
 876                }
 877
 878                if (consume) {
 879                        skb_put(skb, psize);
 880                        ndev = qtnf_classify_skb(bus, skb);
 881                        if (likely(ndev)) {
 882                                qtnf_update_rx_stats(ndev, skb);
 883                                skb->protocol = eth_type_trans(skb, ndev);
 884                                napi_gro_receive(napi, skb);
 885                        } else {
 886                                pr_debug("drop untagged skb\n");
 887                                bus->mux_dev.stats.rx_dropped++;
 888                                dev_kfree_skb_any(skb);
 889                        }
 890                } else {
 891                        if (skb) {
 892                                bus->mux_dev.stats.rx_dropped++;
 893                                dev_kfree_skb_any(skb);
 894                        }
 895                }
 896
 897                priv->rx_skb[r_idx] = NULL;
 898                if (++r_idx >= priv->rx_bd_num)
 899                        r_idx = 0;
 900
 901                priv->rx_bd_r_index = r_idx;
 902
 903                /* repalce processed buffer by a new one */
 904                w_idx = priv->rx_bd_w_index;
 905                while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
 906                                  priv->rx_bd_num) > 0) {
 907                        if (++w_idx >= priv->rx_bd_num)
 908                                w_idx = 0;
 909
 910                        ret = skb2rbd_attach(priv, w_idx);
 911                        if (ret) {
 912                                pr_err("failed to allocate new rx_skb[%d]\n",
 913                                       w_idx);
 914                                break;
 915                        }
 916                }
 917
 918                processed++;
 919        }
 920
 921rx_out:
 922        if (processed < budget) {
 923                napi_complete(napi);
 924                qtnf_en_rxdone_irq(priv);
 925        }
 926
 927        return processed;
 928}
 929
 930static void
 931qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
 932{
 933        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 934
 935        tasklet_hi_schedule(&priv->reclaim_tq);
 936}
 937
 938static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
 939{
 940        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 941
 942        qtnf_enable_hdp_irqs(priv);
 943        napi_enable(&bus->mux_napi);
 944}
 945
 946static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
 947{
 948        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
 949
 950        napi_disable(&bus->mux_napi);
 951        qtnf_disable_hdp_irqs(priv);
 952}
 953
 954static const struct qtnf_bus_ops qtnf_pcie_bus_ops = {
 955        /* control path methods */
 956        .control_tx     = qtnf_pcie_control_tx,
 957
 958        /* data path methods */
 959        .data_tx                = qtnf_pcie_data_tx,
 960        .data_tx_timeout        = qtnf_pcie_data_tx_timeout,
 961        .data_rx_start          = qtnf_pcie_data_rx_start,
 962        .data_rx_stop           = qtnf_pcie_data_rx_stop,
 963};
 964
 965static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
 966{
 967        struct qtnf_bus *bus = dev_get_drvdata(s->private);
 968        struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
 969
 970        seq_printf(s, "%d\n", priv->mps);
 971
 972        return 0;
 973}
 974
 975static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
 976{
 977        struct qtnf_bus *bus = dev_get_drvdata(s->private);
 978        struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
 979
 980        seq_printf(s, "%u\n", priv->msi_enabled);
 981
 982        return 0;
 983}
 984
 985static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
 986{
 987        struct qtnf_bus *bus = dev_get_drvdata(s->private);
 988        struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
 989        u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base));
 990        u32 status;
 991
 992        seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
 993        seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
 994        status = reg &  PCIE_HDP_INT_TX_BITS;
 995        seq_printf(s, "pcie_irq_tx_status(%s)\n",
 996                   (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
 997        seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
 998        status = reg &  PCIE_HDP_INT_RX_BITS;
 999        seq_printf(s, "pcie_irq_rx_status(%s)\n",
1000                   (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
1001        seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count);
1002        status = reg &  PCIE_HDP_INT_HHBM_UF;
1003        seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
1004                   (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
1005
1006        return 0;
1007}
1008
1009static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
1010{
1011        struct qtnf_bus *bus = dev_get_drvdata(s->private);
1012        struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1013
1014        seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
1015        seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
1016        seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
1017        seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
1018
1019        seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
1020        seq_printf(s, "tx_bd_p_index(%u)\n",
1021                   readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
1022                        & (priv->tx_bd_num - 1));
1023        seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
1024        seq_printf(s, "tx queue len(%u)\n",
1025                   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
1026                            priv->tx_bd_num));
1027
1028        seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
1029        seq_printf(s, "rx_bd_p_index(%u)\n",
1030                   readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base))
1031                        & (priv->rx_bd_num - 1));
1032        seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
1033        seq_printf(s, "rx alloc queue len(%u)\n",
1034                   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
1035                              priv->rx_bd_num));
1036
1037        return 0;
1038}
1039
1040static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
1041{
1042        struct qtnf_bus *bus = dev_get_drvdata(s->private);
1043        struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
1044
1045        seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
1046                   priv->shm_ipc_ep_in.tx_packet_count);
1047        seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
1048                   priv->shm_ipc_ep_in.rx_packet_count);
1049        seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
1050                   priv->shm_ipc_ep_out.tx_timeout_count);
1051        seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
1052                   priv->shm_ipc_ep_out.rx_packet_count);
1053
1054        return 0;
1055}
1056
1057static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
1058                           int blk, const u8 *pblk, const u8 *fw)
1059{
1060        struct pci_dev *pdev = priv->pdev;
1061        struct qtnf_bus *bus = pci_get_drvdata(pdev);
1062
1063        struct qtnf_pcie_fw_hdr *hdr;
1064        u8 *pdata;
1065
1066        int hds = sizeof(*hdr);
1067        struct sk_buff *skb = NULL;
1068        int len = 0;
1069        int ret;
1070
1071        skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
1072        if (!skb)
1073                return -ENOMEM;
1074
1075        skb->len = QTN_PCIE_FW_BUFSZ;
1076        skb->dev = NULL;
1077
1078        hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
1079        memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
1080        hdr->fwsize = cpu_to_le32(size);
1081        hdr->seqnum = cpu_to_le32(blk);
1082
1083        if (blk)
1084                hdr->type = cpu_to_le32(QTN_FW_DSUB);
1085        else
1086                hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
1087
1088        pdata = skb->data + hds;
1089
1090        len = QTN_PCIE_FW_BUFSZ - hds;
1091        if (pblk >= (fw + size - len)) {
1092                len = fw + size - pblk;
1093                hdr->type = cpu_to_le32(QTN_FW_DEND);
1094        }
1095
1096        hdr->pktlen = cpu_to_le32(len);
1097        memcpy(pdata, pblk, len);
1098        hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
1099
1100        ret = qtnf_pcie_data_tx(bus, skb);
1101
1102        return (ret == NETDEV_TX_OK) ? len : 0;
1103}
1104
1105static int
1106qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size)
1107{
1108        int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
1109        int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
1110        const u8 *pblk = fw;
1111        int threshold = 0;
1112        int blk = 0;
1113        int len;
1114
1115        pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
1116
1117        while (blk < blk_count) {
1118                if (++threshold > 10000) {
1119                        pr_err("FW upload failed: too many retries\n");
1120                        return -ETIMEDOUT;
1121                }
1122
1123                len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw);
1124                if (len <= 0)
1125                        continue;
1126
1127                if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
1128                    (blk == (blk_count - 1))) {
1129                        qtnf_set_state(&priv->bda->bda_rc_state,
1130                                       QTN_RC_FW_SYNC);
1131                        if (qtnf_poll_state(&priv->bda->bda_ep_state,
1132                                            QTN_EP_FW_SYNC,
1133                                            QTN_FW_DL_TIMEOUT_MS)) {
1134                                pr_err("FW upload failed: SYNC timed out\n");
1135                                return -ETIMEDOUT;
1136                        }
1137
1138                        qtnf_clear_state(&priv->bda->bda_ep_state,
1139                                         QTN_EP_FW_SYNC);
1140
1141                        if (qtnf_is_state(&priv->bda->bda_ep_state,
1142                                          QTN_EP_FW_RETRY)) {
1143                                if (blk == (blk_count - 1)) {
1144                                        int last_round =
1145                                                blk_count & QTN_PCIE_FW_DLMASK;
1146                                        blk -= last_round;
1147                                        pblk -= ((last_round - 1) *
1148                                                blk_size + len);
1149                                } else {
1150                                        blk -= QTN_PCIE_FW_DLMASK;
1151                                        pblk -= QTN_PCIE_FW_DLMASK * blk_size;
1152                                }
1153
1154                                qtnf_clear_state(&priv->bda->bda_ep_state,
1155                                                 QTN_EP_FW_RETRY);
1156
1157                                pr_warn("FW upload retry: block #%d\n", blk);
1158                                continue;
1159                        }
1160
1161                        qtnf_pcie_data_tx_reclaim(priv);
1162                }
1163
1164                pblk += len;
1165                blk++;
1166        }
1167
1168        pr_debug("FW upload completed: totally sent %d blocks\n", blk);
1169        return 0;
1170}
1171
1172static void qtnf_fw_work_handler(struct work_struct *work)
1173{
1174        struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1175        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
1176        struct pci_dev *pdev = priv->pdev;
1177        const struct firmware *fw;
1178        int ret;
1179        u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
1180
1181        if (flashboot) {
1182                state |= QTN_RC_FW_FLASHBOOT;
1183        } else {
1184                ret = request_firmware(&fw, bus->fwname, &pdev->dev);
1185                if (ret < 0) {
1186                        pr_err("failed to get firmware %s\n", bus->fwname);
1187                        goto fw_load_fail;
1188                }
1189        }
1190
1191        qtnf_set_state(&priv->bda->bda_rc_state, state);
1192
1193        if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
1194                            QTN_FW_DL_TIMEOUT_MS)) {
1195                pr_err("card is not ready\n");
1196
1197                if (!flashboot)
1198                        release_firmware(fw);
1199
1200                goto fw_load_fail;
1201        }
1202
1203        qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
1204
1205        if (flashboot) {
1206                pr_info("booting firmware from flash\n");
1207        } else {
1208                pr_info("starting firmware upload: %s\n", bus->fwname);
1209
1210                ret = qtnf_ep_fw_load(priv, fw->data, fw->size);
1211                release_firmware(fw);
1212                if (ret) {
1213                        pr_err("firmware upload error\n");
1214                        goto fw_load_fail;
1215                }
1216        }
1217
1218        if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
1219                            QTN_FW_DL_TIMEOUT_MS)) {
1220                pr_err("firmware bringup timed out\n");
1221                goto fw_load_fail;
1222        }
1223
1224        bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
1225        pr_info("firmware is up and running\n");
1226
1227        if (qtnf_poll_state(&priv->bda->bda_ep_state,
1228                            QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
1229                pr_err("firmware runtime failure\n");
1230                goto fw_load_fail;
1231        }
1232
1233        ret = qtnf_core_attach(bus);
1234        if (ret) {
1235                pr_err("failed to attach core\n");
1236                goto fw_load_fail;
1237        }
1238
1239        qtnf_debugfs_init(bus, DRV_NAME);
1240        qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
1241        qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
1242        qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
1243        qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1244        qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
1245
1246        goto fw_load_exit;
1247
1248fw_load_fail:
1249        bus->fw_state = QTNF_FW_STATE_DETACHED;
1250
1251fw_load_exit:
1252        complete(&bus->firmware_init_complete);
1253        put_device(&pdev->dev);
1254}
1255
1256static void qtnf_bringup_fw_async(struct qtnf_bus *bus)
1257{
1258        struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
1259        struct pci_dev *pdev = priv->pdev;
1260
1261        get_device(&pdev->dev);
1262        INIT_WORK(&bus->fw_work, qtnf_fw_work_handler);
1263        schedule_work(&bus->fw_work);
1264}
1265
1266static void qtnf_reclaim_tasklet_fn(unsigned long data)
1267{
1268        struct qtnf_pcie_bus_priv *priv = (void *)data;
1269
1270        qtnf_pcie_data_tx_reclaim(priv);
1271        qtnf_en_txdone_irq(priv);
1272}
1273
1274static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1275{
1276        struct qtnf_pcie_bus_priv *pcie_priv;
1277        struct qtnf_bus *bus;
1278        int ret;
1279
1280        bus = devm_kzalloc(&pdev->dev,
1281                           sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL);
1282        if (!bus)
1283                return -ENOMEM;
1284
1285        pcie_priv = get_bus_priv(bus);
1286
1287        pci_set_drvdata(pdev, bus);
1288        bus->bus_ops = &qtnf_pcie_bus_ops;
1289        bus->dev = &pdev->dev;
1290        bus->fw_state = QTNF_FW_STATE_RESET;
1291        pcie_priv->pdev = pdev;
1292
1293        strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
1294        init_completion(&bus->firmware_init_complete);
1295        mutex_init(&bus->bus_lock);
1296        spin_lock_init(&pcie_priv->tx0_lock);
1297        spin_lock_init(&pcie_priv->irq_lock);
1298        spin_lock_init(&pcie_priv->tx_reclaim_lock);
1299
1300        /* init stats */
1301        pcie_priv->tx_full_count = 0;
1302        pcie_priv->tx_done_count = 0;
1303        pcie_priv->pcie_irq_count = 0;
1304        pcie_priv->pcie_irq_rx_count = 0;
1305        pcie_priv->pcie_irq_tx_count = 0;
1306        pcie_priv->pcie_irq_uf_count = 0;
1307        pcie_priv->tx_reclaim_done = 0;
1308        pcie_priv->tx_reclaim_req = 0;
1309
1310        tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn,
1311                     (unsigned long)pcie_priv);
1312
1313        init_dummy_netdev(&bus->mux_dev);
1314        netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1315                       qtnf_rx_poll, 10);
1316
1317        pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE");
1318        if (!pcie_priv->workqueue) {
1319                pr_err("failed to alloc bus workqueue\n");
1320                ret = -ENODEV;
1321                goto err_init;
1322        }
1323
1324        if (!pci_is_pcie(pdev)) {
1325                pr_err("device %s is not PCI Express\n", pci_name(pdev));
1326                ret = -EIO;
1327                goto err_base;
1328        }
1329
1330        qtnf_tune_pcie_mps(pcie_priv);
1331
1332        ret = pcim_enable_device(pdev);
1333        if (ret) {
1334                pr_err("failed to init PCI device %x\n", pdev->device);
1335                goto err_base;
1336        } else {
1337                pr_debug("successful init of PCI device %x\n", pdev->device);
1338        }
1339
1340#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1341        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1342#else
1343        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1344#endif
1345        if (ret) {
1346                pr_err("PCIE DMA coherent mask init failed\n");
1347                goto err_base;
1348        }
1349
1350        pci_set_master(pdev);
1351        qtnf_pcie_init_irq(pcie_priv);
1352
1353        ret = qtnf_pcie_init_memory(pcie_priv);
1354        if (ret < 0) {
1355                pr_err("PCIE memory init failed\n");
1356                goto err_base;
1357        }
1358
1359        pci_save_state(pdev);
1360
1361        ret = qtnf_pcie_init_shm_ipc(pcie_priv);
1362        if (ret < 0) {
1363                pr_err("PCIE SHM IPC init failed\n");
1364                goto err_base;
1365        }
1366
1367        ret = qtnf_pcie_init_xfer(pcie_priv);
1368        if (ret) {
1369                pr_err("PCIE xfer init failed\n");
1370                goto err_ipc;
1371        }
1372
1373        /* init default irq settings */
1374        qtnf_init_hdp_irqs(pcie_priv);
1375
1376        /* start with disabled irqs */
1377        qtnf_disable_hdp_irqs(pcie_priv);
1378
1379        ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0,
1380                               "qtnf_pcie_irq", (void *)bus);
1381        if (ret) {
1382                pr_err("failed to request pcie irq %d\n", pdev->irq);
1383                goto err_xfer;
1384        }
1385
1386        qtnf_bringup_fw_async(bus);
1387
1388        return 0;
1389
1390err_xfer:
1391        qtnf_free_xfer_buffers(pcie_priv);
1392
1393err_ipc:
1394        qtnf_pcie_free_shm_ipc(pcie_priv);
1395
1396err_base:
1397        flush_workqueue(pcie_priv->workqueue);
1398        destroy_workqueue(pcie_priv->workqueue);
1399        netif_napi_del(&bus->mux_napi);
1400
1401err_init:
1402        tasklet_kill(&pcie_priv->reclaim_tq);
1403        pci_set_drvdata(pdev, NULL);
1404
1405        return ret;
1406}
1407
1408static void qtnf_pcie_remove(struct pci_dev *pdev)
1409{
1410        struct qtnf_pcie_bus_priv *priv;
1411        struct qtnf_bus *bus;
1412
1413        bus = pci_get_drvdata(pdev);
1414        if (!bus)
1415                return;
1416
1417        wait_for_completion(&bus->firmware_init_complete);
1418
1419        if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
1420            bus->fw_state == QTNF_FW_STATE_EP_DEAD)
1421                qtnf_core_detach(bus);
1422
1423        priv = get_bus_priv(bus);
1424
1425        netif_napi_del(&bus->mux_napi);
1426        flush_workqueue(priv->workqueue);
1427        destroy_workqueue(priv->workqueue);
1428        tasklet_kill(&priv->reclaim_tq);
1429
1430        qtnf_free_xfer_buffers(priv);
1431        qtnf_debugfs_remove(bus);
1432
1433        qtnf_pcie_free_shm_ipc(priv);
1434        qtnf_reset_card(priv);
1435}
1436
1437#ifdef CONFIG_PM_SLEEP
1438static int qtnf_pcie_suspend(struct device *dev)
1439{
1440        return -EOPNOTSUPP;
1441}
1442
1443static int qtnf_pcie_resume(struct device *dev)
1444{
1445        return 0;
1446}
1447#endif /* CONFIG_PM_SLEEP */
1448
1449#ifdef CONFIG_PM_SLEEP
1450/* Power Management Hooks */
1451static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
1452                         qtnf_pcie_resume);
1453#endif
1454
1455static const struct pci_device_id qtnf_pcie_devid_table[] = {
1456        {
1457                PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
1458                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1459        },
1460        { },
1461};
1462
1463MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
1464
1465static struct pci_driver qtnf_pcie_drv_data = {
1466        .name = DRV_NAME,
1467        .id_table = qtnf_pcie_devid_table,
1468        .probe = qtnf_pcie_probe,
1469        .remove = qtnf_pcie_remove,
1470#ifdef CONFIG_PM_SLEEP
1471        .driver = {
1472                .pm = &qtnf_pcie_pm_ops,
1473        },
1474#endif
1475};
1476
1477static int __init qtnf_pcie_register(void)
1478{
1479        pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
1480        return pci_register_driver(&qtnf_pcie_drv_data);
1481}
1482
1483static void __exit qtnf_pcie_exit(void)
1484{
1485        pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
1486        pci_unregister_driver(&qtnf_pcie_drv_data);
1487}
1488
1489module_init(qtnf_pcie_register);
1490module_exit(qtnf_pcie_exit);
1491
1492MODULE_AUTHOR("Quantenna Communications");
1493MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
1494MODULE_LICENSE("GPL");
1495