linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22
  23#include "core.h"
  24#include "debug.h"
  25
  26#include "targaddrs.h"
  27#include "bmi.h"
  28
  29#include "hif.h"
  30#include "htc.h"
  31
  32#include "ce.h"
  33#include "pci.h"
  34
  35static unsigned int ath10k_target_ps;
  36module_param(ath10k_target_ps, uint, 0644);
  37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
  38
  39#define QCA988X_1_0_DEVICE_ID   (0xabcd)
  40#define QCA988X_2_0_DEVICE_ID   (0x003c)
  41
  42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
  43        { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
  44        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  45        {0}
  46};
  47
  48static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
  49                                       u32 *data);
  50
  51static void ath10k_pci_process_ce(struct ath10k *ar);
  52static int ath10k_pci_post_rx(struct ath10k *ar);
  53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
  54                                             int num);
  55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
  56static void ath10k_pci_stop_ce(struct ath10k *ar);
  57static void ath10k_pci_device_reset(struct ath10k *ar);
  58static int ath10k_pci_reset_target(struct ath10k *ar);
  59static int ath10k_pci_start_intr(struct ath10k *ar);
  60static void ath10k_pci_stop_intr(struct ath10k *ar);
  61
  62static const struct ce_attr host_ce_config_wlan[] = {
  63        /* host->target HTC control and raw streams */
  64        { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
  65        /* could be moved to share CE3 */
  66        /* target->host HTT + HTC control */
  67        { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
  68        /* target->host WMI */
  69        { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
  70        /* host->target WMI */
  71        { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
  72        /* host->target HTT */
  73        { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
  74                    CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
  75        /* unused */
  76        { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
  77        /* Target autonomous hif_memcpy */
  78        { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
  79        /* ce_diag, the Diagnostic Window */
  80        { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
  81};
  82
  83/* Target firmware's Copy Engine configuration. */
  84static const struct ce_pipe_config target_ce_config_wlan[] = {
  85        /* host->target HTC control and raw streams */
  86        { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
  87        /* target->host HTT + HTC control */
  88        { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
  89        /* target->host WMI */
  90        { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
  91        /* host->target WMI */
  92        { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
  93        /* host->target HTT */
  94        { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
  95        /* NB: 50% of src nentries, since tx has 2 frags */
  96        /* unused */
  97        { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
  98        /* Reserved for target autonomous hif_memcpy */
  99        { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
 100        /* CE7 used only by Host */
 101};
 102
 103/*
 104 * Diagnostic read/write access is provided for startup/config/debug usage.
 105 * Caller must guarantee proper alignment, when applicable, and single user
 106 * at any moment.
 107 */
 108static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 109                                    int nbytes)
 110{
 111        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 112        int ret = 0;
 113        u32 buf;
 114        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 115        unsigned int id;
 116        unsigned int flags;
 117        struct ce_state *ce_diag;
 118        /* Host buffer address in CE space */
 119        u32 ce_data;
 120        dma_addr_t ce_data_base = 0;
 121        void *data_buf = NULL;
 122        int i;
 123
 124        /*
 125         * This code cannot handle reads to non-memory space. Redirect to the
 126         * register read fn but preserve the multi word read capability of
 127         * this fn
 128         */
 129        if (address < DRAM_BASE_ADDRESS) {
 130                if (!IS_ALIGNED(address, 4) ||
 131                    !IS_ALIGNED((unsigned long)data, 4))
 132                        return -EIO;
 133
 134                while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
 135                                           ar, address, (u32 *)data)) == 0)) {
 136                        nbytes -= sizeof(u32);
 137                        address += sizeof(u32);
 138                        data += sizeof(u32);
 139                }
 140                return ret;
 141        }
 142
 143        ce_diag = ar_pci->ce_diag;
 144
 145        /*
 146         * Allocate a temporary bounce buffer to hold caller's data
 147         * to be DMA'ed from Target. This guarantees
 148         *   1) 4-byte alignment
 149         *   2) Buffer in DMA-able space
 150         */
 151        orig_nbytes = nbytes;
 152        data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
 153                                                         orig_nbytes,
 154                                                         &ce_data_base);
 155
 156        if (!data_buf) {
 157                ret = -ENOMEM;
 158                goto done;
 159        }
 160        memset(data_buf, 0, orig_nbytes);
 161
 162        remaining_bytes = orig_nbytes;
 163        ce_data = ce_data_base;
 164        while (remaining_bytes) {
 165                nbytes = min_t(unsigned int, remaining_bytes,
 166                               DIAG_TRANSFER_LIMIT);
 167
 168                ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
 169                if (ret != 0)
 170                        goto done;
 171
 172                /* Request CE to send from Target(!) address to Host buffer */
 173                /*
 174                 * The address supplied by the caller is in the
 175                 * Target CPU virtual address space.
 176                 *
 177                 * In order to use this address with the diagnostic CE,
 178                 * convert it from Target CPU virtual address space
 179                 * to CE address space
 180                 */
 181                ath10k_pci_wake(ar);
 182                address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
 183                                                     address);
 184                ath10k_pci_sleep(ar);
 185
 186                ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
 187                                 0);
 188                if (ret)
 189                        goto done;
 190
 191                i = 0;
 192                while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
 193                                                     &completed_nbytes,
 194                                                     &id) != 0) {
 195                        mdelay(1);
 196                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 197                                ret = -EBUSY;
 198                                goto done;
 199                        }
 200                }
 201
 202                if (nbytes != completed_nbytes) {
 203                        ret = -EIO;
 204                        goto done;
 205                }
 206
 207                if (buf != (u32) address) {
 208                        ret = -EIO;
 209                        goto done;
 210                }
 211
 212                i = 0;
 213                while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
 214                                                     &completed_nbytes,
 215                                                     &id, &flags) != 0) {
 216                        mdelay(1);
 217
 218                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 219                                ret = -EBUSY;
 220                                goto done;
 221                        }
 222                }
 223
 224                if (nbytes != completed_nbytes) {
 225                        ret = -EIO;
 226                        goto done;
 227                }
 228
 229                if (buf != ce_data) {
 230                        ret = -EIO;
 231                        goto done;
 232                }
 233
 234                remaining_bytes -= nbytes;
 235                address += nbytes;
 236                ce_data += nbytes;
 237        }
 238
 239done:
 240        if (ret == 0) {
 241                /* Copy data from allocated DMA buf to caller's buf */
 242                WARN_ON_ONCE(orig_nbytes & 3);
 243                for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
 244                        ((u32 *)data)[i] =
 245                                __le32_to_cpu(((__le32 *)data_buf)[i]);
 246                }
 247        } else
 248                ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
 249                           __func__, address);
 250
 251        if (data_buf)
 252                pci_free_consistent(ar_pci->pdev, orig_nbytes,
 253                                    data_buf, ce_data_base);
 254
 255        return ret;
 256}
 257
 258/* Read 4-byte aligned data from Target memory or register */
 259static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
 260                                       u32 *data)
 261{
 262        /* Assume range doesn't cross this boundary */
 263        if (address >= DRAM_BASE_ADDRESS)
 264                return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
 265
 266        ath10k_pci_wake(ar);
 267        *data = ath10k_pci_read32(ar, address);
 268        ath10k_pci_sleep(ar);
 269        return 0;
 270}
 271
 272static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 273                                     const void *data, int nbytes)
 274{
 275        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 276        int ret = 0;
 277        u32 buf;
 278        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 279        unsigned int id;
 280        unsigned int flags;
 281        struct ce_state *ce_diag;
 282        void *data_buf = NULL;
 283        u32 ce_data;    /* Host buffer address in CE space */
 284        dma_addr_t ce_data_base = 0;
 285        int i;
 286
 287        ce_diag = ar_pci->ce_diag;
 288
 289        /*
 290         * Allocate a temporary bounce buffer to hold caller's data
 291         * to be DMA'ed to Target. This guarantees
 292         *   1) 4-byte alignment
 293         *   2) Buffer in DMA-able space
 294         */
 295        orig_nbytes = nbytes;
 296        data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
 297                                                         orig_nbytes,
 298                                                         &ce_data_base);
 299        if (!data_buf) {
 300                ret = -ENOMEM;
 301                goto done;
 302        }
 303
 304        /* Copy caller's data to allocated DMA buf */
 305        WARN_ON_ONCE(orig_nbytes & 3);
 306        for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
 307                ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
 308
 309        /*
 310         * The address supplied by the caller is in the
 311         * Target CPU virtual address space.
 312         *
 313         * In order to use this address with the diagnostic CE,
 314         * convert it from
 315         *    Target CPU virtual address space
 316         * to
 317         *    CE address space
 318         */
 319        ath10k_pci_wake(ar);
 320        address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
 321        ath10k_pci_sleep(ar);
 322
 323        remaining_bytes = orig_nbytes;
 324        ce_data = ce_data_base;
 325        while (remaining_bytes) {
 326                /* FIXME: check cast */
 327                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 328
 329                /* Set up to receive directly into Target(!) address */
 330                ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
 331                if (ret != 0)
 332                        goto done;
 333
 334                /*
 335                 * Request CE to send caller-supplied data that
 336                 * was copied to bounce buffer to Target(!) address.
 337                 */
 338                ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
 339                                     nbytes, 0, 0);
 340                if (ret != 0)
 341                        goto done;
 342
 343                i = 0;
 344                while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
 345                                                     &completed_nbytes,
 346                                                     &id) != 0) {
 347                        mdelay(1);
 348
 349                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 350                                ret = -EBUSY;
 351                                goto done;
 352                        }
 353                }
 354
 355                if (nbytes != completed_nbytes) {
 356                        ret = -EIO;
 357                        goto done;
 358                }
 359
 360                if (buf != ce_data) {
 361                        ret = -EIO;
 362                        goto done;
 363                }
 364
 365                i = 0;
 366                while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
 367                                                     &completed_nbytes,
 368                                                     &id, &flags) != 0) {
 369                        mdelay(1);
 370
 371                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 372                                ret = -EBUSY;
 373                                goto done;
 374                        }
 375                }
 376
 377                if (nbytes != completed_nbytes) {
 378                        ret = -EIO;
 379                        goto done;
 380                }
 381
 382                if (buf != address) {
 383                        ret = -EIO;
 384                        goto done;
 385                }
 386
 387                remaining_bytes -= nbytes;
 388                address += nbytes;
 389                ce_data += nbytes;
 390        }
 391
 392done:
 393        if (data_buf) {
 394                pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
 395                                    ce_data_base);
 396        }
 397
 398        if (ret != 0)
 399                ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
 400                           address);
 401
 402        return ret;
 403}
 404
 405/* Write 4B data to Target memory or register */
 406static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
 407                                        u32 data)
 408{
 409        /* Assume range doesn't cross this boundary */
 410        if (address >= DRAM_BASE_ADDRESS)
 411                return ath10k_pci_diag_write_mem(ar, address, &data,
 412                                                 sizeof(u32));
 413
 414        ath10k_pci_wake(ar);
 415        ath10k_pci_write32(ar, address, data);
 416        ath10k_pci_sleep(ar);
 417        return 0;
 418}
 419
 420static bool ath10k_pci_target_is_awake(struct ath10k *ar)
 421{
 422        void __iomem *mem = ath10k_pci_priv(ar)->mem;
 423        u32 val;
 424        val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
 425                       RTC_STATE_ADDRESS);
 426        return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
 427}
 428
 429static void ath10k_pci_wait(struct ath10k *ar)
 430{
 431        int n = 100;
 432
 433        while (n-- && !ath10k_pci_target_is_awake(ar))
 434                msleep(10);
 435
 436        if (n < 0)
 437                ath10k_warn("Unable to wakeup target\n");
 438}
 439
 440void ath10k_do_pci_wake(struct ath10k *ar)
 441{
 442        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 443        void __iomem *pci_addr = ar_pci->mem;
 444        int tot_delay = 0;
 445        int curr_delay = 5;
 446
 447        if (atomic_read(&ar_pci->keep_awake_count) == 0) {
 448                /* Force AWAKE */
 449                iowrite32(PCIE_SOC_WAKE_V_MASK,
 450                          pci_addr + PCIE_LOCAL_BASE_ADDRESS +
 451                          PCIE_SOC_WAKE_ADDRESS);
 452        }
 453        atomic_inc(&ar_pci->keep_awake_count);
 454
 455        if (ar_pci->verified_awake)
 456                return;
 457
 458        for (;;) {
 459                if (ath10k_pci_target_is_awake(ar)) {
 460                        ar_pci->verified_awake = true;
 461                        break;
 462                }
 463
 464                if (tot_delay > PCIE_WAKE_TIMEOUT) {
 465                        ath10k_warn("target takes too long to wake up (awake count %d)\n",
 466                                    atomic_read(&ar_pci->keep_awake_count));
 467                        break;
 468                }
 469
 470                udelay(curr_delay);
 471                tot_delay += curr_delay;
 472
 473                if (curr_delay < 50)
 474                        curr_delay += 5;
 475        }
 476}
 477
 478void ath10k_do_pci_sleep(struct ath10k *ar)
 479{
 480        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 481        void __iomem *pci_addr = ar_pci->mem;
 482
 483        if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
 484                /* Allow sleep */
 485                ar_pci->verified_awake = false;
 486                iowrite32(PCIE_SOC_WAKE_RESET,
 487                          pci_addr + PCIE_LOCAL_BASE_ADDRESS +
 488                          PCIE_SOC_WAKE_ADDRESS);
 489        }
 490}
 491
 492/*
 493 * FIXME: Handle OOM properly.
 494 */
 495static inline
 496struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
 497{
 498        struct ath10k_pci_compl *compl = NULL;
 499
 500        spin_lock_bh(&pipe_info->pipe_lock);
 501        if (list_empty(&pipe_info->compl_free)) {
 502                ath10k_warn("Completion buffers are full\n");
 503                goto exit;
 504        }
 505        compl = list_first_entry(&pipe_info->compl_free,
 506                                 struct ath10k_pci_compl, list);
 507        list_del(&compl->list);
 508exit:
 509        spin_unlock_bh(&pipe_info->pipe_lock);
 510        return compl;
 511}
 512
 513/* Called by lower (CE) layer when a send to Target completes. */
 514static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
 515                                    void *transfer_context,
 516                                    u32 ce_data,
 517                                    unsigned int nbytes,
 518                                    unsigned int transfer_id)
 519{
 520        struct ath10k *ar = ce_state->ar;
 521        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 522        struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 523        struct ath10k_pci_compl *compl;
 524        bool process = false;
 525
 526        do {
 527                /*
 528                 * For the send completion of an item in sendlist, just
 529                 * increment num_sends_allowed. The upper layer callback will
 530                 * be triggered when last fragment is done with send.
 531                 */
 532                if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
 533                        spin_lock_bh(&pipe_info->pipe_lock);
 534                        pipe_info->num_sends_allowed++;
 535                        spin_unlock_bh(&pipe_info->pipe_lock);
 536                        continue;
 537                }
 538
 539                compl = get_free_compl(pipe_info);
 540                if (!compl)
 541                        break;
 542
 543                compl->send_or_recv = HIF_CE_COMPLETE_SEND;
 544                compl->ce_state = ce_state;
 545                compl->pipe_info = pipe_info;
 546                compl->transfer_context = transfer_context;
 547                compl->nbytes = nbytes;
 548                compl->transfer_id = transfer_id;
 549                compl->flags = 0;
 550
 551                /*
 552                 * Add the completion to the processing queue.
 553                 */
 554                spin_lock_bh(&ar_pci->compl_lock);
 555                list_add_tail(&compl->list, &ar_pci->compl_process);
 556                spin_unlock_bh(&ar_pci->compl_lock);
 557
 558                process = true;
 559        } while (ath10k_ce_completed_send_next(ce_state,
 560                                                           &transfer_context,
 561                                                           &ce_data, &nbytes,
 562                                                           &transfer_id) == 0);
 563
 564        /*
 565         * If only some of the items within a sendlist have completed,
 566         * don't invoke completion processing until the entire sendlist
 567         * has been sent.
 568         */
 569        if (!process)
 570                return;
 571
 572        ath10k_pci_process_ce(ar);
 573}
 574
 575/* Called by lower (CE) layer when data is received from the Target. */
 576static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
 577                                    void *transfer_context, u32 ce_data,
 578                                    unsigned int nbytes,
 579                                    unsigned int transfer_id,
 580                                    unsigned int flags)
 581{
 582        struct ath10k *ar = ce_state->ar;
 583        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 584        struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 585        struct ath10k_pci_compl *compl;
 586        struct sk_buff *skb;
 587
 588        do {
 589                compl = get_free_compl(pipe_info);
 590                if (!compl)
 591                        break;
 592
 593                compl->send_or_recv = HIF_CE_COMPLETE_RECV;
 594                compl->ce_state = ce_state;
 595                compl->pipe_info = pipe_info;
 596                compl->transfer_context = transfer_context;
 597                compl->nbytes = nbytes;
 598                compl->transfer_id = transfer_id;
 599                compl->flags = flags;
 600
 601                skb = transfer_context;
 602                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
 603                                 skb->len + skb_tailroom(skb),
 604                                 DMA_FROM_DEVICE);
 605                /*
 606                 * Add the completion to the processing queue.
 607                 */
 608                spin_lock_bh(&ar_pci->compl_lock);
 609                list_add_tail(&compl->list, &ar_pci->compl_process);
 610                spin_unlock_bh(&ar_pci->compl_lock);
 611
 612        } while (ath10k_ce_completed_recv_next(ce_state,
 613                                                           &transfer_context,
 614                                                           &ce_data, &nbytes,
 615                                                           &transfer_id,
 616                                                           &flags) == 0);
 617
 618        ath10k_pci_process_ce(ar);
 619}
 620
 621/* Send the first nbytes bytes of the buffer */
 622static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 623                                    unsigned int transfer_id,
 624                                    unsigned int bytes, struct sk_buff *nbuf)
 625{
 626        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
 627        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 628        struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
 629        struct ce_state *ce_hdl = pipe_info->ce_hdl;
 630        struct ce_sendlist sendlist;
 631        unsigned int len;
 632        u32 flags = 0;
 633        int ret;
 634
 635        memset(&sendlist, 0, sizeof(struct ce_sendlist));
 636
 637        len = min(bytes, nbuf->len);
 638        bytes -= len;
 639
 640        if (len & 3)
 641                ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
 642
 643        ath10k_dbg(ATH10K_DBG_PCI,
 644                   "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
 645                   nbuf->data, (unsigned long long) skb_cb->paddr,
 646                   nbuf->len, len);
 647        ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
 648                        "ath10k tx: data: ",
 649                        nbuf->data, nbuf->len);
 650
 651        ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
 652
 653        /* Make sure we have resources to handle this request */
 654        spin_lock_bh(&pipe_info->pipe_lock);
 655        if (!pipe_info->num_sends_allowed) {
 656                ath10k_warn("Pipe: %d is full\n", pipe_id);
 657                spin_unlock_bh(&pipe_info->pipe_lock);
 658                return -ENOSR;
 659        }
 660        pipe_info->num_sends_allowed--;
 661        spin_unlock_bh(&pipe_info->pipe_lock);
 662
 663        ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
 664        if (ret)
 665                ath10k_warn("CE send failed: %p\n", nbuf);
 666
 667        return ret;
 668}
 669
 670static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 671{
 672        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 673        struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
 674        int ret;
 675
 676        spin_lock_bh(&pipe_info->pipe_lock);
 677        ret = pipe_info->num_sends_allowed;
 678        spin_unlock_bh(&pipe_info->pipe_lock);
 679
 680        return ret;
 681}
 682
 683static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 684{
 685        u32 reg_dump_area = 0;
 686        u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
 687        u32 host_addr;
 688        int ret;
 689        u32 i;
 690
 691        ath10k_err("firmware crashed!\n");
 692        ath10k_err("hardware name %s version 0x%x\n",
 693                   ar->hw_params.name, ar->target_version);
 694        ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
 695                   ar->fw_version_minor, ar->fw_version_release,
 696                   ar->fw_version_build);
 697
 698        host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
 699        if (ath10k_pci_diag_read_mem(ar, host_addr,
 700                                     &reg_dump_area, sizeof(u32)) != 0) {
 701                ath10k_warn("could not read hi_failure_state\n");
 702                return;
 703        }
 704
 705        ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
 706
 707        ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
 708                                       &reg_dump_values[0],
 709                                       REG_DUMP_COUNT_QCA988X * sizeof(u32));
 710        if (ret != 0) {
 711                ath10k_err("could not dump FW Dump Area\n");
 712                return;
 713        }
 714
 715        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
 716
 717        ath10k_err("target Register Dump\n");
 718        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
 719                ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
 720                           i,
 721                           reg_dump_values[i],
 722                           reg_dump_values[i + 1],
 723                           reg_dump_values[i + 2],
 724                           reg_dump_values[i + 3]);
 725
 726        ieee80211_queue_work(ar->hw, &ar->restart_work);
 727}
 728
 729static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
 730                                               int force)
 731{
 732        if (!force) {
 733                int resources;
 734                /*
 735                 * Decide whether to actually poll for completions, or just
 736                 * wait for a later chance.
 737                 * If there seem to be plenty of resources left, then just wait
 738                 * since checking involves reading a CE register, which is a
 739                 * relatively expensive operation.
 740                 */
 741                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
 742
 743                /*
 744                 * If at least 50% of the total resources are still available,
 745                 * don't bother checking again yet.
 746                 */
 747                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
 748                        return;
 749        }
 750        ath10k_ce_per_engine_service(ar, pipe);
 751}
 752
 753static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
 754                                         struct ath10k_hif_cb *callbacks)
 755{
 756        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 757
 758        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 759
 760        memcpy(&ar_pci->msg_callbacks_current, callbacks,
 761               sizeof(ar_pci->msg_callbacks_current));
 762}
 763
 764static int ath10k_pci_start_ce(struct ath10k *ar)
 765{
 766        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 767        struct ce_state *ce_diag = ar_pci->ce_diag;
 768        const struct ce_attr *attr;
 769        struct hif_ce_pipe_info *pipe_info;
 770        struct ath10k_pci_compl *compl;
 771        int i, pipe_num, completions, disable_interrupts;
 772
 773        spin_lock_init(&ar_pci->compl_lock);
 774        INIT_LIST_HEAD(&ar_pci->compl_process);
 775
 776        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
 777                pipe_info = &ar_pci->pipe_info[pipe_num];
 778
 779                spin_lock_init(&pipe_info->pipe_lock);
 780                INIT_LIST_HEAD(&pipe_info->compl_free);
 781
 782                /* Handle Diagnostic CE specially */
 783                if (pipe_info->ce_hdl == ce_diag)
 784                        continue;
 785
 786                attr = &host_ce_config_wlan[pipe_num];
 787                completions = 0;
 788
 789                if (attr->src_nentries) {
 790                        disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
 791                        ath10k_ce_send_cb_register(pipe_info->ce_hdl,
 792                                                   ath10k_pci_ce_send_done,
 793                                                   disable_interrupts);
 794                        completions += attr->src_nentries;
 795                        pipe_info->num_sends_allowed = attr->src_nentries - 1;
 796                }
 797
 798                if (attr->dest_nentries) {
 799                        ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
 800                                                   ath10k_pci_ce_recv_data);
 801                        completions += attr->dest_nentries;
 802                }
 803
 804                if (completions == 0)
 805                        continue;
 806
 807                for (i = 0; i < completions; i++) {
 808                        compl = kmalloc(sizeof(struct ath10k_pci_compl),
 809                                        GFP_KERNEL);
 810                        if (!compl) {
 811                                ath10k_warn("No memory for completion state\n");
 812                                ath10k_pci_stop_ce(ar);
 813                                return -ENOMEM;
 814                        }
 815
 816                        compl->send_or_recv = HIF_CE_COMPLETE_FREE;
 817                        list_add_tail(&compl->list, &pipe_info->compl_free);
 818                }
 819        }
 820
 821        return 0;
 822}
 823
 824static void ath10k_pci_stop_ce(struct ath10k *ar)
 825{
 826        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 827        struct ath10k_pci_compl *compl;
 828        struct sk_buff *skb;
 829        int i;
 830
 831        ath10k_ce_disable_interrupts(ar);
 832
 833        /* Cancel the pending tasklet */
 834        tasklet_kill(&ar_pci->intr_tq);
 835
 836        for (i = 0; i < CE_COUNT; i++)
 837                tasklet_kill(&ar_pci->pipe_info[i].intr);
 838
 839        /* Mark pending completions as aborted, so that upper layers free up
 840         * their associated resources */
 841        spin_lock_bh(&ar_pci->compl_lock);
 842        list_for_each_entry(compl, &ar_pci->compl_process, list) {
 843                skb = (struct sk_buff *)compl->transfer_context;
 844                ATH10K_SKB_CB(skb)->is_aborted = true;
 845        }
 846        spin_unlock_bh(&ar_pci->compl_lock);
 847}
 848
 849static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 850{
 851        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 852        struct ath10k_pci_compl *compl, *tmp;
 853        struct hif_ce_pipe_info *pipe_info;
 854        struct sk_buff *netbuf;
 855        int pipe_num;
 856
 857        /* Free pending completions. */
 858        spin_lock_bh(&ar_pci->compl_lock);
 859        if (!list_empty(&ar_pci->compl_process))
 860                ath10k_warn("pending completions still present! possible memory leaks.\n");
 861
 862        list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
 863                list_del(&compl->list);
 864                netbuf = (struct sk_buff *)compl->transfer_context;
 865                dev_kfree_skb_any(netbuf);
 866                kfree(compl);
 867        }
 868        spin_unlock_bh(&ar_pci->compl_lock);
 869
 870        /* Free unused completions for each pipe. */
 871        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
 872                pipe_info = &ar_pci->pipe_info[pipe_num];
 873
 874                spin_lock_bh(&pipe_info->pipe_lock);
 875                list_for_each_entry_safe(compl, tmp,
 876                                         &pipe_info->compl_free, list) {
 877                        list_del(&compl->list);
 878                        kfree(compl);
 879                }
 880                spin_unlock_bh(&pipe_info->pipe_lock);
 881        }
 882}
 883
 884static void ath10k_pci_process_ce(struct ath10k *ar)
 885{
 886        struct ath10k_pci *ar_pci = ar->hif.priv;
 887        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 888        struct ath10k_pci_compl *compl;
 889        struct sk_buff *skb;
 890        unsigned int nbytes;
 891        int ret, send_done = 0;
 892
 893        /* Upper layers aren't ready to handle tx/rx completions in parallel so
 894         * we must serialize all completion processing. */
 895
 896        spin_lock_bh(&ar_pci->compl_lock);
 897        if (ar_pci->compl_processing) {
 898                spin_unlock_bh(&ar_pci->compl_lock);
 899                return;
 900        }
 901        ar_pci->compl_processing = true;
 902        spin_unlock_bh(&ar_pci->compl_lock);
 903
 904        for (;;) {
 905                spin_lock_bh(&ar_pci->compl_lock);
 906                if (list_empty(&ar_pci->compl_process)) {
 907                        spin_unlock_bh(&ar_pci->compl_lock);
 908                        break;
 909                }
 910                compl = list_first_entry(&ar_pci->compl_process,
 911                                         struct ath10k_pci_compl, list);
 912                list_del(&compl->list);
 913                spin_unlock_bh(&ar_pci->compl_lock);
 914
 915                if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
 916                        cb->tx_completion(ar,
 917                                          compl->transfer_context,
 918                                          compl->transfer_id);
 919                        send_done = 1;
 920                } else {
 921                        ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
 922                        if (ret) {
 923                                ath10k_warn("Unable to post recv buffer for pipe: %d\n",
 924                                            compl->pipe_info->pipe_num);
 925                                break;
 926                        }
 927
 928                        skb = (struct sk_buff *)compl->transfer_context;
 929                        nbytes = compl->nbytes;
 930
 931                        ath10k_dbg(ATH10K_DBG_PCI,
 932                                   "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
 933                                   skb, nbytes);
 934                        ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
 935                                        "ath10k rx: ", skb->data, nbytes);
 936
 937                        if (skb->len + skb_tailroom(skb) >= nbytes) {
 938                                skb_trim(skb, 0);
 939                                skb_put(skb, nbytes);
 940                                cb->rx_completion(ar, skb,
 941                                                  compl->pipe_info->pipe_num);
 942                        } else {
 943                                ath10k_warn("rxed more than expected (nbytes %d, max %d)",
 944                                            nbytes,
 945                                            skb->len + skb_tailroom(skb));
 946                        }
 947                }
 948
 949                compl->send_or_recv = HIF_CE_COMPLETE_FREE;
 950
 951                /*
 952                 * Add completion back to the pipe's free list.
 953                 */
 954                spin_lock_bh(&compl->pipe_info->pipe_lock);
 955                list_add_tail(&compl->list, &compl->pipe_info->compl_free);
 956                compl->pipe_info->num_sends_allowed += send_done;
 957                spin_unlock_bh(&compl->pipe_info->pipe_lock);
 958        }
 959
 960        spin_lock_bh(&ar_pci->compl_lock);
 961        ar_pci->compl_processing = false;
 962        spin_unlock_bh(&ar_pci->compl_lock);
 963}
 964
 965/* TODO - temporary mapping while we have too few CE's */
 966static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
 967                                              u16 service_id, u8 *ul_pipe,
 968                                              u8 *dl_pipe, int *ul_is_polled,
 969                                              int *dl_is_polled)
 970{
 971        int ret = 0;
 972
 973        /* polling for received messages not supported */
 974        *dl_is_polled = 0;
 975
 976        switch (service_id) {
 977        case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
 978                /*
 979                 * Host->target HTT gets its own pipe, so it can be polled
 980                 * while other pipes are interrupt driven.
 981                 */
 982                *ul_pipe = 4;
 983                /*
 984                 * Use the same target->host pipe for HTC ctrl, HTC raw
 985                 * streams, and HTT.
 986                 */
 987                *dl_pipe = 1;
 988                break;
 989
 990        case ATH10K_HTC_SVC_ID_RSVD_CTRL:
 991        case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
 992                /*
 993                 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
 994                 * HTC_CTRL_RSVD_SVC could share the same pipe as the
 995                 * WMI services.  So, if another CE is needed, change
 996                 * this to *ul_pipe = 3, which frees up CE 0.
 997                 */
 998                /* *ul_pipe = 3; */
 999                *ul_pipe = 0;
1000                *dl_pipe = 1;
1001                break;
1002
1003        case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1004        case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1005        case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1006        case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1007
1008        case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1009                *ul_pipe = 3;
1010                *dl_pipe = 2;
1011                break;
1012
1013                /* pipe 5 unused   */
1014                /* pipe 6 reserved */
1015                /* pipe 7 reserved */
1016
1017        default:
1018                ret = -1;
1019                break;
1020        }
1021        *ul_is_polled =
1022                (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1023
1024        return ret;
1025}
1026
1027static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1028                                                u8 *ul_pipe, u8 *dl_pipe)
1029{
1030        int ul_is_polled, dl_is_polled;
1031
1032        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1033                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1034                                                 ul_pipe,
1035                                                 dl_pipe,
1036                                                 &ul_is_polled,
1037                                                 &dl_is_polled);
1038}
1039
1040static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1041                                   int num)
1042{
1043        struct ath10k *ar = pipe_info->hif_ce_state;
1044        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045        struct ce_state *ce_state = pipe_info->ce_hdl;
1046        struct sk_buff *skb;
1047        dma_addr_t ce_data;
1048        int i, ret = 0;
1049
1050        if (pipe_info->buf_sz == 0)
1051                return 0;
1052
1053        for (i = 0; i < num; i++) {
1054                skb = dev_alloc_skb(pipe_info->buf_sz);
1055                if (!skb) {
1056                        ath10k_warn("could not allocate skbuff for pipe %d\n",
1057                                    num);
1058                        ret = -ENOMEM;
1059                        goto err;
1060                }
1061
1062                WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1063
1064                ce_data = dma_map_single(ar->dev, skb->data,
1065                                         skb->len + skb_tailroom(skb),
1066                                         DMA_FROM_DEVICE);
1067
1068                if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1069                        ath10k_warn("could not dma map skbuff\n");
1070                        dev_kfree_skb_any(skb);
1071                        ret = -EIO;
1072                        goto err;
1073                }
1074
1075                ATH10K_SKB_CB(skb)->paddr = ce_data;
1076
1077                pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1078                                               pipe_info->buf_sz,
1079                                               PCI_DMA_FROMDEVICE);
1080
1081                ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1082                                                 ce_data);
1083                if (ret) {
1084                        ath10k_warn("could not enqueue to pipe %d (%d)\n",
1085                                    num, ret);
1086                        goto err;
1087                }
1088        }
1089
1090        return ret;
1091
1092err:
1093        ath10k_pci_rx_pipe_cleanup(pipe_info);
1094        return ret;
1095}
1096
1097static int ath10k_pci_post_rx(struct ath10k *ar)
1098{
1099        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1100        struct hif_ce_pipe_info *pipe_info;
1101        const struct ce_attr *attr;
1102        int pipe_num, ret = 0;
1103
1104        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1105                pipe_info = &ar_pci->pipe_info[pipe_num];
1106                attr = &host_ce_config_wlan[pipe_num];
1107
1108                if (attr->dest_nentries == 0)
1109                        continue;
1110
1111                ret = ath10k_pci_post_rx_pipe(pipe_info,
1112                                              attr->dest_nentries - 1);
1113                if (ret) {
1114                        ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1115                                    pipe_num);
1116
1117                        for (; pipe_num >= 0; pipe_num--) {
1118                                pipe_info = &ar_pci->pipe_info[pipe_num];
1119                                ath10k_pci_rx_pipe_cleanup(pipe_info);
1120                        }
1121                        return ret;
1122                }
1123        }
1124
1125        return 0;
1126}
1127
1128static int ath10k_pci_hif_start(struct ath10k *ar)
1129{
1130        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1131        int ret;
1132
1133        ret = ath10k_pci_start_ce(ar);
1134        if (ret) {
1135                ath10k_warn("could not start CE (%d)\n", ret);
1136                return ret;
1137        }
1138
1139        /* Post buffers once to start things off. */
1140        ret = ath10k_pci_post_rx(ar);
1141        if (ret) {
1142                ath10k_warn("could not post rx pipes (%d)\n", ret);
1143                return ret;
1144        }
1145
1146        ar_pci->started = 1;
1147        return 0;
1148}
1149
1150static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1151{
1152        struct ath10k *ar;
1153        struct ath10k_pci *ar_pci;
1154        struct ce_state *ce_hdl;
1155        u32 buf_sz;
1156        struct sk_buff *netbuf;
1157        u32 ce_data;
1158
1159        buf_sz = pipe_info->buf_sz;
1160
1161        /* Unused Copy Engine */
1162        if (buf_sz == 0)
1163                return;
1164
1165        ar = pipe_info->hif_ce_state;
1166        ar_pci = ath10k_pci_priv(ar);
1167
1168        if (!ar_pci->started)
1169                return;
1170
1171        ce_hdl = pipe_info->ce_hdl;
1172
1173        while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1174                                          &ce_data) == 0) {
1175                dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1176                                 netbuf->len + skb_tailroom(netbuf),
1177                                 DMA_FROM_DEVICE);
1178                dev_kfree_skb_any(netbuf);
1179        }
1180}
1181
1182static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1183{
1184        struct ath10k *ar;
1185        struct ath10k_pci *ar_pci;
1186        struct ce_state *ce_hdl;
1187        struct sk_buff *netbuf;
1188        u32 ce_data;
1189        unsigned int nbytes;
1190        unsigned int id;
1191        u32 buf_sz;
1192
1193        buf_sz = pipe_info->buf_sz;
1194
1195        /* Unused Copy Engine */
1196        if (buf_sz == 0)
1197                return;
1198
1199        ar = pipe_info->hif_ce_state;
1200        ar_pci = ath10k_pci_priv(ar);
1201
1202        if (!ar_pci->started)
1203                return;
1204
1205        ce_hdl = pipe_info->ce_hdl;
1206
1207        while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1208                                          &ce_data, &nbytes, &id) == 0) {
1209                if (netbuf != CE_SENDLIST_ITEM_CTXT)
1210                        /*
1211                         * Indicate the completion to higer layer to free
1212                         * the buffer
1213                         */
1214                        ATH10K_SKB_CB(netbuf)->is_aborted = true;
1215                        ar_pci->msg_callbacks_current.tx_completion(ar,
1216                                                                    netbuf,
1217                                                                    id);
1218        }
1219}
1220
1221/*
1222 * Cleanup residual buffers for device shutdown:
1223 *    buffers that were enqueued for receive
1224 *    buffers that were to be sent
1225 * Note: Buffers that had completed but which were
1226 * not yet processed are on a completion queue. They
1227 * are handled when the completion thread shuts down.
1228 */
1229static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1230{
1231        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1232        int pipe_num;
1233
1234        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1235                struct hif_ce_pipe_info *pipe_info;
1236
1237                pipe_info = &ar_pci->pipe_info[pipe_num];
1238                ath10k_pci_rx_pipe_cleanup(pipe_info);
1239                ath10k_pci_tx_pipe_cleanup(pipe_info);
1240        }
1241}
1242
1243static void ath10k_pci_ce_deinit(struct ath10k *ar)
1244{
1245        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1246        struct hif_ce_pipe_info *pipe_info;
1247        int pipe_num;
1248
1249        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1250                pipe_info = &ar_pci->pipe_info[pipe_num];
1251                if (pipe_info->ce_hdl) {
1252                        ath10k_ce_deinit(pipe_info->ce_hdl);
1253                        pipe_info->ce_hdl = NULL;
1254                        pipe_info->buf_sz = 0;
1255                }
1256        }
1257}
1258
1259static void ath10k_pci_disable_irqs(struct ath10k *ar)
1260{
1261        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1262        int i;
1263
1264        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1265                disable_irq(ar_pci->pdev->irq + i);
1266}
1267
1268static void ath10k_pci_hif_stop(struct ath10k *ar)
1269{
1270        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1271
1272        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1273
1274        /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1275         * by ath10k_pci_start_intr(). */
1276        ath10k_pci_disable_irqs(ar);
1277
1278        ath10k_pci_stop_ce(ar);
1279
1280        /* At this point, asynchronous threads are stopped, the target should
1281         * not DMA nor interrupt. We process the leftovers and then free
1282         * everything else up. */
1283
1284        ath10k_pci_process_ce(ar);
1285        ath10k_pci_cleanup_ce(ar);
1286        ath10k_pci_buffer_cleanup(ar);
1287
1288        ar_pci->started = 0;
1289}
1290
1291static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1292                                           void *req, u32 req_len,
1293                                           void *resp, u32 *resp_len)
1294{
1295        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1296        struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1297        struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1298        dma_addr_t req_paddr = 0;
1299        dma_addr_t resp_paddr = 0;
1300        struct bmi_xfer xfer = {};
1301        void *treq, *tresp = NULL;
1302        int ret = 0;
1303
1304        if (resp && !resp_len)
1305                return -EINVAL;
1306
1307        if (resp && resp_len && *resp_len == 0)
1308                return -EINVAL;
1309
1310        treq = kmemdup(req, req_len, GFP_KERNEL);
1311        if (!treq)
1312                return -ENOMEM;
1313
1314        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1315        ret = dma_mapping_error(ar->dev, req_paddr);
1316        if (ret)
1317                goto err_dma;
1318
1319        if (resp && resp_len) {
1320                tresp = kzalloc(*resp_len, GFP_KERNEL);
1321                if (!tresp) {
1322                        ret = -ENOMEM;
1323                        goto err_req;
1324                }
1325
1326                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1327                                            DMA_FROM_DEVICE);
1328                ret = dma_mapping_error(ar->dev, resp_paddr);
1329                if (ret)
1330                        goto err_req;
1331
1332                xfer.wait_for_resp = true;
1333                xfer.resp_len = 0;
1334
1335                ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1336        }
1337
1338        init_completion(&xfer.done);
1339
1340        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1341        if (ret)
1342                goto err_resp;
1343
1344        ret = wait_for_completion_timeout(&xfer.done,
1345                                          BMI_COMMUNICATION_TIMEOUT_HZ);
1346        if (ret <= 0) {
1347                u32 unused_buffer;
1348                unsigned int unused_nbytes;
1349                unsigned int unused_id;
1350
1351                ret = -ETIMEDOUT;
1352                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1353                                           &unused_nbytes, &unused_id);
1354        } else {
1355                /* non-zero means we did not time out */
1356                ret = 0;
1357        }
1358
1359err_resp:
1360        if (resp) {
1361                u32 unused_buffer;
1362
1363                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1364                dma_unmap_single(ar->dev, resp_paddr,
1365                                 *resp_len, DMA_FROM_DEVICE);
1366        }
1367err_req:
1368        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1369
1370        if (ret == 0 && resp_len) {
1371                *resp_len = min(*resp_len, xfer.resp_len);
1372                memcpy(resp, tresp, xfer.resp_len);
1373        }
1374err_dma:
1375        kfree(treq);
1376        kfree(tresp);
1377
1378        return ret;
1379}
1380
1381static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1382                                     void *transfer_context,
1383                                     u32 data,
1384                                     unsigned int nbytes,
1385                                     unsigned int transfer_id)
1386{
1387        struct bmi_xfer *xfer = transfer_context;
1388
1389        if (xfer->wait_for_resp)
1390                return;
1391
1392        complete(&xfer->done);
1393}
1394
1395static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1396                                     void *transfer_context,
1397                                     u32 data,
1398                                     unsigned int nbytes,
1399                                     unsigned int transfer_id,
1400                                     unsigned int flags)
1401{
1402        struct bmi_xfer *xfer = transfer_context;
1403
1404        if (!xfer->wait_for_resp) {
1405                ath10k_warn("unexpected: BMI data received; ignoring\n");
1406                return;
1407        }
1408
1409        xfer->resp_len = nbytes;
1410        complete(&xfer->done);
1411}
1412
1413/*
1414 * Map from service/endpoint to Copy Engine.
1415 * This table is derived from the CE_PCI TABLE, above.
1416 * It is passed to the Target at startup for use by firmware.
1417 */
1418static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1419        {
1420                 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1421                 PIPEDIR_OUT,           /* out = UL = host -> target */
1422                 3,
1423        },
1424        {
1425                 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1426                 PIPEDIR_IN,            /* in = DL = target -> host */
1427                 2,
1428        },
1429        {
1430                 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1431                 PIPEDIR_OUT,           /* out = UL = host -> target */
1432                 3,
1433        },
1434        {
1435                 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1436                 PIPEDIR_IN,            /* in = DL = target -> host */
1437                 2,
1438        },
1439        {
1440                 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1441                 PIPEDIR_OUT,           /* out = UL = host -> target */
1442                 3,
1443        },
1444        {
1445                 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1446                 PIPEDIR_IN,            /* in = DL = target -> host */
1447                 2,
1448        },
1449        {
1450                 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1451                 PIPEDIR_OUT,           /* out = UL = host -> target */
1452                 3,
1453        },
1454        {
1455                 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1456                 PIPEDIR_IN,            /* in = DL = target -> host */
1457                 2,
1458        },
1459        {
1460                 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1461                 PIPEDIR_OUT,           /* out = UL = host -> target */
1462                 3,
1463        },
1464        {
1465                 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1466                 PIPEDIR_IN,            /* in = DL = target -> host */
1467                 2,
1468        },
1469        {
1470                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1471                 PIPEDIR_OUT,           /* out = UL = host -> target */
1472                 0,             /* could be moved to 3 (share with WMI) */
1473        },
1474        {
1475                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1476                 PIPEDIR_IN,            /* in = DL = target -> host */
1477                 1,
1478        },
1479        {
1480                 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1481                 PIPEDIR_OUT,           /* out = UL = host -> target */
1482                 0,
1483        },
1484        {
1485                 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1486                 PIPEDIR_IN,            /* in = DL = target -> host */
1487                 1,
1488        },
1489        {
1490                 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1491                 PIPEDIR_OUT,           /* out = UL = host -> target */
1492                 4,
1493        },
1494        {
1495                 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1496                 PIPEDIR_IN,            /* in = DL = target -> host */
1497                 1,
1498        },
1499
1500        /* (Additions here) */
1501
1502        {                               /* Must be last */
1503                 0,
1504                 0,
1505                 0,
1506        },
1507};
1508
1509/*
1510 * Send an interrupt to the device to wake up the Target CPU
1511 * so it has an opportunity to notice any changed state.
1512 */
1513static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1514{
1515        int ret;
1516        u32 core_ctrl;
1517
1518        ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1519                                              CORE_CTRL_ADDRESS,
1520                                          &core_ctrl);
1521        if (ret) {
1522                ath10k_warn("Unable to read core ctrl\n");
1523                return ret;
1524        }
1525
1526        /* A_INUM_FIRMWARE interrupt to Target CPU */
1527        core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1528
1529        ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1530                                               CORE_CTRL_ADDRESS,
1531                                           core_ctrl);
1532        if (ret)
1533                ath10k_warn("Unable to set interrupt mask\n");
1534
1535        return ret;
1536}
1537
1538static int ath10k_pci_init_config(struct ath10k *ar)
1539{
1540        u32 interconnect_targ_addr;
1541        u32 pcie_state_targ_addr = 0;
1542        u32 pipe_cfg_targ_addr = 0;
1543        u32 svc_to_pipe_map = 0;
1544        u32 pcie_config_flags = 0;
1545        u32 ealloc_value;
1546        u32 ealloc_targ_addr;
1547        u32 flag2_value;
1548        u32 flag2_targ_addr;
1549        int ret = 0;
1550
1551        /* Download to Target the CE Config and the service-to-CE map */
1552        interconnect_targ_addr =
1553                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1554
1555        /* Supply Target-side CE configuration */
1556        ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1557                                          &pcie_state_targ_addr);
1558        if (ret != 0) {
1559                ath10k_err("Failed to get pcie state addr: %d\n", ret);
1560                return ret;
1561        }
1562
1563        if (pcie_state_targ_addr == 0) {
1564                ret = -EIO;
1565                ath10k_err("Invalid pcie state addr\n");
1566                return ret;
1567        }
1568
1569        ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1570                                          offsetof(struct pcie_state,
1571                                                   pipe_cfg_addr),
1572                                          &pipe_cfg_targ_addr);
1573        if (ret != 0) {
1574                ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1575                return ret;
1576        }
1577
1578        if (pipe_cfg_targ_addr == 0) {
1579                ret = -EIO;
1580                ath10k_err("Invalid pipe cfg addr\n");
1581                return ret;
1582        }
1583
1584        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1585                                 target_ce_config_wlan,
1586                                 sizeof(target_ce_config_wlan));
1587
1588        if (ret != 0) {
1589                ath10k_err("Failed to write pipe cfg: %d\n", ret);
1590                return ret;
1591        }
1592
1593        ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1594                                          offsetof(struct pcie_state,
1595                                                   svc_to_pipe_map),
1596                                          &svc_to_pipe_map);
1597        if (ret != 0) {
1598                ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1599                return ret;
1600        }
1601
1602        if (svc_to_pipe_map == 0) {
1603                ret = -EIO;
1604                ath10k_err("Invalid svc_to_pipe map\n");
1605                return ret;
1606        }
1607
1608        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1609                                 target_service_to_ce_map_wlan,
1610                                 sizeof(target_service_to_ce_map_wlan));
1611        if (ret != 0) {
1612                ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1613                return ret;
1614        }
1615
1616        ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617                                          offsetof(struct pcie_state,
1618                                                   config_flags),
1619                                          &pcie_config_flags);
1620        if (ret != 0) {
1621                ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1622                return ret;
1623        }
1624
1625        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1626
1627        ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1628                                 offsetof(struct pcie_state, config_flags),
1629                                 &pcie_config_flags,
1630                                 sizeof(pcie_config_flags));
1631        if (ret != 0) {
1632                ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1633                return ret;
1634        }
1635
1636        /* configure early allocation */
1637        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1638
1639        ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1640        if (ret != 0) {
1641                ath10k_err("Faile to get early alloc val: %d\n", ret);
1642                return ret;
1643        }
1644
1645        /* first bank is switched to IRAM */
1646        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1647                         HI_EARLY_ALLOC_MAGIC_MASK);
1648        ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1649                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1650
1651        ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1652        if (ret != 0) {
1653                ath10k_err("Failed to set early alloc val: %d\n", ret);
1654                return ret;
1655        }
1656
1657        /* Tell Target to proceed with initialization */
1658        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1659
1660        ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1661        if (ret != 0) {
1662                ath10k_err("Failed to get option val: %d\n", ret);
1663                return ret;
1664        }
1665
1666        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1667
1668        ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1669        if (ret != 0) {
1670                ath10k_err("Failed to set option val: %d\n", ret);
1671                return ret;
1672        }
1673
1674        return 0;
1675}
1676
1677
1678
1679static int ath10k_pci_ce_init(struct ath10k *ar)
1680{
1681        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1682        struct hif_ce_pipe_info *pipe_info;
1683        const struct ce_attr *attr;
1684        int pipe_num;
1685
1686        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1687                pipe_info = &ar_pci->pipe_info[pipe_num];
1688                pipe_info->pipe_num = pipe_num;
1689                pipe_info->hif_ce_state = ar;
1690                attr = &host_ce_config_wlan[pipe_num];
1691
1692                pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1693                if (pipe_info->ce_hdl == NULL) {
1694                        ath10k_err("Unable to initialize CE for pipe: %d\n",
1695                                   pipe_num);
1696
1697                        /* It is safe to call it here. It checks if ce_hdl is
1698                         * valid for each pipe */
1699                        ath10k_pci_ce_deinit(ar);
1700                        return -1;
1701                }
1702
1703                if (pipe_num == ar_pci->ce_count - 1) {
1704                        /*
1705                         * Reserve the ultimate CE for
1706                         * diagnostic Window support
1707                         */
1708                        ar_pci->ce_diag =
1709                        ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1710                        continue;
1711                }
1712
1713                pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1714        }
1715
1716        /*
1717         * Initially, establish CE completion handlers for use with BMI.
1718         * These are overwritten with generic handlers after we exit BMI phase.
1719         */
1720        pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1721        ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1722                                   ath10k_pci_bmi_send_done, 0);
1723
1724        pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1725        ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1726                                   ath10k_pci_bmi_recv_data);
1727
1728        return 0;
1729}
1730
1731static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1732{
1733        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1734        u32 fw_indicator_address, fw_indicator;
1735
1736        ath10k_pci_wake(ar);
1737
1738        fw_indicator_address = ar_pci->fw_indicator_address;
1739        fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1740
1741        if (fw_indicator & FW_IND_EVENT_PENDING) {
1742                /* ACK: clear Target-side pending event */
1743                ath10k_pci_write32(ar, fw_indicator_address,
1744                                   fw_indicator & ~FW_IND_EVENT_PENDING);
1745
1746                if (ar_pci->started) {
1747                        ath10k_pci_hif_dump_area(ar);
1748                } else {
1749                        /*
1750                         * Probable Target failure before we're prepared
1751                         * to handle it.  Generally unexpected.
1752                         */
1753                        ath10k_warn("early firmware event indicated\n");
1754                }
1755        }
1756
1757        ath10k_pci_sleep(ar);
1758}
1759
1760static int ath10k_pci_hif_power_up(struct ath10k *ar)
1761{
1762        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1763        int ret;
1764
1765        ret = ath10k_pci_start_intr(ar);
1766        if (ret) {
1767                ath10k_err("could not start interrupt handling (%d)\n", ret);
1768                goto err;
1769        }
1770
1771        /*
1772         * Bring the target up cleanly.
1773         *
1774         * The target may be in an undefined state with an AUX-powered Target
1775         * and a Host in WoW mode. If the Host crashes, loses power, or is
1776         * restarted (without unloading the driver) then the Target is left
1777         * (aux) powered and running. On a subsequent driver load, the Target
1778         * is in an unexpected state. We try to catch that here in order to
1779         * reset the Target and retry the probe.
1780         */
1781        ath10k_pci_device_reset(ar);
1782
1783        ret = ath10k_pci_reset_target(ar);
1784        if (ret)
1785                goto err_irq;
1786
1787        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1788                /* Force AWAKE forever */
1789                ath10k_do_pci_wake(ar);
1790
1791        ret = ath10k_pci_ce_init(ar);
1792        if (ret)
1793                goto err_ps;
1794
1795        ret = ath10k_pci_init_config(ar);
1796        if (ret)
1797                goto err_ce;
1798
1799        ret = ath10k_pci_wake_target_cpu(ar);
1800        if (ret) {
1801                ath10k_err("could not wake up target CPU (%d)\n", ret);
1802                goto err_ce;
1803        }
1804
1805        return 0;
1806
1807err_ce:
1808        ath10k_pci_ce_deinit(ar);
1809err_ps:
1810        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1811                ath10k_do_pci_sleep(ar);
1812err_irq:
1813        ath10k_pci_stop_intr(ar);
1814err:
1815        return ret;
1816}
1817
1818static void ath10k_pci_hif_power_down(struct ath10k *ar)
1819{
1820        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1821
1822        ath10k_pci_stop_intr(ar);
1823
1824        ath10k_pci_ce_deinit(ar);
1825        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1826                ath10k_do_pci_sleep(ar);
1827}
1828
1829#ifdef CONFIG_PM
1830
1831#define ATH10K_PCI_PM_CONTROL 0x44
1832
1833static int ath10k_pci_hif_suspend(struct ath10k *ar)
1834{
1835        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1836        struct pci_dev *pdev = ar_pci->pdev;
1837        u32 val;
1838
1839        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1840
1841        if ((val & 0x000000ff) != 0x3) {
1842                pci_save_state(pdev);
1843                pci_disable_device(pdev);
1844                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1845                                       (val & 0xffffff00) | 0x03);
1846        }
1847
1848        return 0;
1849}
1850
1851static int ath10k_pci_hif_resume(struct ath10k *ar)
1852{
1853        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1854        struct pci_dev *pdev = ar_pci->pdev;
1855        u32 val;
1856
1857        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1858
1859        if ((val & 0x000000ff) != 0) {
1860                pci_restore_state(pdev);
1861                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1862                                       val & 0xffffff00);
1863                /*
1864                 * Suspend/Resume resets the PCI configuration space,
1865                 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1866                 * to keep PCI Tx retries from interfering with C3 CPU state
1867                 */
1868                pci_read_config_dword(pdev, 0x40, &val);
1869
1870                if ((val & 0x0000ff00) != 0)
1871                        pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1872        }
1873
1874        return 0;
1875}
1876#endif
1877
1878static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1879        .send_head              = ath10k_pci_hif_send_head,
1880        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1881        .start                  = ath10k_pci_hif_start,
1882        .stop                   = ath10k_pci_hif_stop,
1883        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1884        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1885        .send_complete_check    = ath10k_pci_hif_send_complete_check,
1886        .set_callbacks          = ath10k_pci_hif_set_callbacks,
1887        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1888        .power_up               = ath10k_pci_hif_power_up,
1889        .power_down             = ath10k_pci_hif_power_down,
1890#ifdef CONFIG_PM
1891        .suspend                = ath10k_pci_hif_suspend,
1892        .resume                 = ath10k_pci_hif_resume,
1893#endif
1894};
1895
1896static void ath10k_pci_ce_tasklet(unsigned long ptr)
1897{
1898        struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1899        struct ath10k_pci *ar_pci = pipe->ar_pci;
1900
1901        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1902}
1903
1904static void ath10k_msi_err_tasklet(unsigned long data)
1905{
1906        struct ath10k *ar = (struct ath10k *)data;
1907
1908        ath10k_pci_fw_interrupt_handler(ar);
1909}
1910
1911/*
1912 * Handler for a per-engine interrupt on a PARTICULAR CE.
1913 * This is used in cases where each CE has a private MSI interrupt.
1914 */
1915static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1916{
1917        struct ath10k *ar = arg;
1918        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1919        int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1920
1921        if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1922                ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1923                return IRQ_HANDLED;
1924        }
1925
1926        /*
1927         * NOTE: We are able to derive ce_id from irq because we
1928         * use a one-to-one mapping for CE's 0..5.
1929         * CE's 6 & 7 do not use interrupts at all.
1930         *
1931         * This mapping must be kept in sync with the mapping
1932         * used by firmware.
1933         */
1934        tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1935        return IRQ_HANDLED;
1936}
1937
1938static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1939{
1940        struct ath10k *ar = arg;
1941        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1942
1943        tasklet_schedule(&ar_pci->msi_fw_err);
1944        return IRQ_HANDLED;
1945}
1946
1947/*
1948 * Top-level interrupt handler for all PCI interrupts from a Target.
1949 * When a block of MSI interrupts is allocated, this top-level handler
1950 * is not used; instead, we directly call the correct sub-handler.
1951 */
1952static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1953{
1954        struct ath10k *ar = arg;
1955        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1956
1957        if (ar_pci->num_msi_intrs == 0) {
1958                /*
1959                 * IMPORTANT: INTR_CLR regiser has to be set after
1960                 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1961                 * really cleared.
1962                 */
1963                iowrite32(0, ar_pci->mem +
1964                          (SOC_CORE_BASE_ADDRESS |
1965                           PCIE_INTR_ENABLE_ADDRESS));
1966                iowrite32(PCIE_INTR_FIRMWARE_MASK |
1967                          PCIE_INTR_CE_MASK_ALL,
1968                          ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1969                                         PCIE_INTR_CLR_ADDRESS));
1970                /*
1971                 * IMPORTANT: this extra read transaction is required to
1972                 * flush the posted write buffer.
1973                 */
1974                (void) ioread32(ar_pci->mem +
1975                                (SOC_CORE_BASE_ADDRESS |
1976                                 PCIE_INTR_ENABLE_ADDRESS));
1977        }
1978
1979        tasklet_schedule(&ar_pci->intr_tq);
1980
1981        return IRQ_HANDLED;
1982}
1983
1984static void ath10k_pci_tasklet(unsigned long data)
1985{
1986        struct ath10k *ar = (struct ath10k *)data;
1987        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1988
1989        ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1990        ath10k_ce_per_engine_service_any(ar);
1991
1992        if (ar_pci->num_msi_intrs == 0) {
1993                /* Enable Legacy PCI line interrupts */
1994                iowrite32(PCIE_INTR_FIRMWARE_MASK |
1995                          PCIE_INTR_CE_MASK_ALL,
1996                          ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1997                                         PCIE_INTR_ENABLE_ADDRESS));
1998                /*
1999                 * IMPORTANT: this extra read transaction is required to
2000                 * flush the posted write buffer
2001                 */
2002                (void) ioread32(ar_pci->mem +
2003                                (SOC_CORE_BASE_ADDRESS |
2004                                 PCIE_INTR_ENABLE_ADDRESS));
2005        }
2006}
2007
2008static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2009{
2010        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2011        int ret;
2012        int i;
2013
2014        ret = pci_enable_msi_block(ar_pci->pdev, num);
2015        if (ret)
2016                return ret;
2017
2018        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2019                          ath10k_pci_msi_fw_handler,
2020                          IRQF_SHARED, "ath10k_pci", ar);
2021        if (ret) {
2022                ath10k_warn("request_irq(%d) failed %d\n",
2023                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2024
2025                pci_disable_msi(ar_pci->pdev);
2026                return ret;
2027        }
2028
2029        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2030                ret = request_irq(ar_pci->pdev->irq + i,
2031                                  ath10k_pci_per_engine_handler,
2032                                  IRQF_SHARED, "ath10k_pci", ar);
2033                if (ret) {
2034                        ath10k_warn("request_irq(%d) failed %d\n",
2035                                    ar_pci->pdev->irq + i, ret);
2036
2037                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2038                                free_irq(ar_pci->pdev->irq + i, ar);
2039
2040                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2041                        pci_disable_msi(ar_pci->pdev);
2042                        return ret;
2043                }
2044        }
2045
2046        ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2047        return 0;
2048}
2049
2050static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2051{
2052        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2053        int ret;
2054
2055        ret = pci_enable_msi(ar_pci->pdev);
2056        if (ret < 0)
2057                return ret;
2058
2059        ret = request_irq(ar_pci->pdev->irq,
2060                          ath10k_pci_interrupt_handler,
2061                          IRQF_SHARED, "ath10k_pci", ar);
2062        if (ret < 0) {
2063                pci_disable_msi(ar_pci->pdev);
2064                return ret;
2065        }
2066
2067        ath10k_info("MSI interrupt handling\n");
2068        return 0;
2069}
2070
2071static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2072{
2073        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2074        int ret;
2075
2076        ret = request_irq(ar_pci->pdev->irq,
2077                          ath10k_pci_interrupt_handler,
2078                          IRQF_SHARED, "ath10k_pci", ar);
2079        if (ret < 0)
2080                return ret;
2081
2082        /*
2083         * Make sure to wake the Target before enabling Legacy
2084         * Interrupt.
2085         */
2086        iowrite32(PCIE_SOC_WAKE_V_MASK,
2087                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2088                  PCIE_SOC_WAKE_ADDRESS);
2089
2090        ath10k_pci_wait(ar);
2091
2092        /*
2093         * A potential race occurs here: The CORE_BASE write
2094         * depends on target correctly decoding AXI address but
2095         * host won't know when target writes BAR to CORE_CTRL.
2096         * This write might get lost if target has NOT written BAR.
2097         * For now, fix the race by repeating the write in below
2098         * synchronization checking.
2099         */
2100        iowrite32(PCIE_INTR_FIRMWARE_MASK |
2101                  PCIE_INTR_CE_MASK_ALL,
2102                  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2103                                 PCIE_INTR_ENABLE_ADDRESS));
2104        iowrite32(PCIE_SOC_WAKE_RESET,
2105                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2106                  PCIE_SOC_WAKE_ADDRESS);
2107
2108        ath10k_info("legacy interrupt handling\n");
2109        return 0;
2110}
2111
2112static int ath10k_pci_start_intr(struct ath10k *ar)
2113{
2114        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2115        int num = MSI_NUM_REQUEST;
2116        int ret;
2117        int i;
2118
2119        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2120        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2121                     (unsigned long) ar);
2122
2123        for (i = 0; i < CE_COUNT; i++) {
2124                ar_pci->pipe_info[i].ar_pci = ar_pci;
2125                tasklet_init(&ar_pci->pipe_info[i].intr,
2126                             ath10k_pci_ce_tasklet,
2127                             (unsigned long)&ar_pci->pipe_info[i]);
2128        }
2129
2130        if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2131                num = 1;
2132
2133        if (num > 1) {
2134                ret = ath10k_pci_start_intr_msix(ar, num);
2135                if (ret == 0)
2136                        goto exit;
2137
2138                ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2139                num = 1;
2140        }
2141
2142        if (num == 1) {
2143                ret = ath10k_pci_start_intr_msi(ar);
2144                if (ret == 0)
2145                        goto exit;
2146
2147                ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2148                            ret);
2149                num = 0;
2150        }
2151
2152        ret = ath10k_pci_start_intr_legacy(ar);
2153
2154exit:
2155        ar_pci->num_msi_intrs = num;
2156        ar_pci->ce_count = CE_COUNT;
2157        return ret;
2158}
2159
2160static void ath10k_pci_stop_intr(struct ath10k *ar)
2161{
2162        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2163        int i;
2164
2165        /* There's at least one interrupt irregardless whether its legacy INTR
2166         * or MSI or MSI-X */
2167        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2168                free_irq(ar_pci->pdev->irq + i, ar);
2169
2170        if (ar_pci->num_msi_intrs > 0)
2171                pci_disable_msi(ar_pci->pdev);
2172}
2173
2174static int ath10k_pci_reset_target(struct ath10k *ar)
2175{
2176        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2177        int wait_limit = 300; /* 3 sec */
2178
2179        /* Wait for Target to finish initialization before we proceed. */
2180        iowrite32(PCIE_SOC_WAKE_V_MASK,
2181                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2182                  PCIE_SOC_WAKE_ADDRESS);
2183
2184        ath10k_pci_wait(ar);
2185
2186        while (wait_limit-- &&
2187               !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2188                 FW_IND_INITIALIZED)) {
2189                if (ar_pci->num_msi_intrs == 0)
2190                        /* Fix potential race by repeating CORE_BASE writes */
2191                        iowrite32(PCIE_INTR_FIRMWARE_MASK |
2192                                  PCIE_INTR_CE_MASK_ALL,
2193                                  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2194                                                 PCIE_INTR_ENABLE_ADDRESS));
2195                mdelay(10);
2196        }
2197
2198        if (wait_limit < 0) {
2199                ath10k_err("Target stalled\n");
2200                iowrite32(PCIE_SOC_WAKE_RESET,
2201                          ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2202                          PCIE_SOC_WAKE_ADDRESS);
2203                return -EIO;
2204        }
2205
2206        iowrite32(PCIE_SOC_WAKE_RESET,
2207                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2208                  PCIE_SOC_WAKE_ADDRESS);
2209
2210        return 0;
2211}
2212
2213static void ath10k_pci_device_reset(struct ath10k *ar)
2214{
2215        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216        void __iomem *mem = ar_pci->mem;
2217        int i;
2218        u32 val;
2219
2220        if (!SOC_GLOBAL_RESET_ADDRESS)
2221                return;
2222
2223        if (!mem)
2224                return;
2225
2226        ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2227                               PCIE_SOC_WAKE_V_MASK);
2228        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2229                if (ath10k_pci_target_is_awake(ar))
2230                        break;
2231                msleep(1);
2232        }
2233
2234        /* Put Target, including PCIe, into RESET. */
2235        val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2236        val |= 1;
2237        ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2238
2239        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2240                if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2241                                          RTC_STATE_COLD_RESET_MASK)
2242                        break;
2243                msleep(1);
2244        }
2245
2246        /* Pull Target, including PCIe, out of RESET. */
2247        val &= ~1;
2248        ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2249
2250        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2251                if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2252                                            RTC_STATE_COLD_RESET_MASK))
2253                        break;
2254                msleep(1);
2255        }
2256
2257        ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2258}
2259
2260static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2261{
2262        int i;
2263
2264        for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2265                if (!test_bit(i, ar_pci->features))
2266                        continue;
2267
2268                switch (i) {
2269                case ATH10K_PCI_FEATURE_MSI_X:
2270                        ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2271                        break;
2272                case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2273                        ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2274                        break;
2275                case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2276                        ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2277                        break;
2278                }
2279        }
2280}
2281
2282static int ath10k_pci_probe(struct pci_dev *pdev,
2283                            const struct pci_device_id *pci_dev)
2284{
2285        void __iomem *mem;
2286        int ret = 0;
2287        struct ath10k *ar;
2288        struct ath10k_pci *ar_pci;
2289        u32 lcr_val;
2290
2291        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2292
2293        ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2294        if (ar_pci == NULL)
2295                return -ENOMEM;
2296
2297        ar_pci->pdev = pdev;
2298        ar_pci->dev = &pdev->dev;
2299
2300        switch (pci_dev->device) {
2301        case QCA988X_1_0_DEVICE_ID:
2302                set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2303                break;
2304        case QCA988X_2_0_DEVICE_ID:
2305                set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2306                break;
2307        default:
2308                ret = -ENODEV;
2309                ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2310                goto err_ar_pci;
2311        }
2312
2313        if (ath10k_target_ps)
2314                set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2315
2316        ath10k_pci_dump_features(ar_pci);
2317
2318        ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2319        if (!ar) {
2320                ath10k_err("ath10k_core_create failed!\n");
2321                ret = -EINVAL;
2322                goto err_ar_pci;
2323        }
2324
2325        /* Enable QCA988X_1.0 HW workarounds */
2326        if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2327                spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2328
2329        ar_pci->ar = ar;
2330        ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2331        atomic_set(&ar_pci->keep_awake_count, 0);
2332
2333        pci_set_drvdata(pdev, ar);
2334
2335        /*
2336         * Without any knowledge of the Host, the Target may have been reset or
2337         * power cycled and its Config Space may no longer reflect the PCI
2338         * address space that was assigned earlier by the PCI infrastructure.
2339         * Refresh it now.
2340         */
2341        ret = pci_assign_resource(pdev, BAR_NUM);
2342        if (ret) {
2343                ath10k_err("cannot assign PCI space: %d\n", ret);
2344                goto err_ar;
2345        }
2346
2347        ret = pci_enable_device(pdev);
2348        if (ret) {
2349                ath10k_err("cannot enable PCI device: %d\n", ret);
2350                goto err_ar;
2351        }
2352
2353        /* Request MMIO resources */
2354        ret = pci_request_region(pdev, BAR_NUM, "ath");
2355        if (ret) {
2356                ath10k_err("PCI MMIO reservation error: %d\n", ret);
2357                goto err_device;
2358        }
2359
2360        /*
2361         * Target structures have a limit of 32 bit DMA pointers.
2362         * DMA pointers can be wider than 32 bits by default on some systems.
2363         */
2364        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2365        if (ret) {
2366                ath10k_err("32-bit DMA not available: %d\n", ret);
2367                goto err_region;
2368        }
2369
2370        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2371        if (ret) {
2372                ath10k_err("cannot enable 32-bit consistent DMA\n");
2373                goto err_region;
2374        }
2375
2376        /* Set bus master bit in PCI_COMMAND to enable DMA */
2377        pci_set_master(pdev);
2378
2379        /*
2380         * Temporary FIX: disable ASPM
2381         * Will be removed after the OTP is programmed
2382         */
2383        pci_read_config_dword(pdev, 0x80, &lcr_val);
2384        pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2385
2386        /* Arrange for access to Target SoC registers. */
2387        mem = pci_iomap(pdev, BAR_NUM, 0);
2388        if (!mem) {
2389                ath10k_err("PCI iomap error\n");
2390                ret = -EIO;
2391                goto err_master;
2392        }
2393
2394        ar_pci->mem = mem;
2395
2396        spin_lock_init(&ar_pci->ce_lock);
2397
2398        ar_pci->cacheline_sz = dma_get_cache_alignment();
2399
2400        ret = ath10k_core_register(ar);
2401        if (ret) {
2402                ath10k_err("could not register driver core (%d)\n", ret);
2403                goto err_iomap;
2404        }
2405
2406        return 0;
2407
2408err_iomap:
2409        pci_iounmap(pdev, mem);
2410err_master:
2411        pci_clear_master(pdev);
2412err_region:
2413        pci_release_region(pdev, BAR_NUM);
2414err_device:
2415        pci_disable_device(pdev);
2416err_ar:
2417        pci_set_drvdata(pdev, NULL);
2418        ath10k_core_destroy(ar);
2419err_ar_pci:
2420        /* call HIF PCI free here */
2421        kfree(ar_pci);
2422
2423        return ret;
2424}
2425
2426static void ath10k_pci_remove(struct pci_dev *pdev)
2427{
2428        struct ath10k *ar = pci_get_drvdata(pdev);
2429        struct ath10k_pci *ar_pci;
2430
2431        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2432
2433        if (!ar)
2434                return;
2435
2436        ar_pci = ath10k_pci_priv(ar);
2437
2438        if (!ar_pci)
2439                return;
2440
2441        tasklet_kill(&ar_pci->msi_fw_err);
2442
2443        ath10k_core_unregister(ar);
2444
2445        pci_set_drvdata(pdev, NULL);
2446        pci_iounmap(pdev, ar_pci->mem);
2447        pci_release_region(pdev, BAR_NUM);
2448        pci_clear_master(pdev);
2449        pci_disable_device(pdev);
2450
2451        ath10k_core_destroy(ar);
2452        kfree(ar_pci);
2453}
2454
2455MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2456
2457static struct pci_driver ath10k_pci_driver = {
2458        .name = "ath10k_pci",
2459        .id_table = ath10k_pci_id_table,
2460        .probe = ath10k_pci_probe,
2461        .remove = ath10k_pci_remove,
2462};
2463
2464static int __init ath10k_pci_init(void)
2465{
2466        int ret;
2467
2468        ret = pci_register_driver(&ath10k_pci_driver);
2469        if (ret)
2470                ath10k_err("pci_register_driver failed [%d]\n", ret);
2471
2472        return ret;
2473}
2474module_init(ath10k_pci_init);
2475
2476static void __exit ath10k_pci_exit(void)
2477{
2478        pci_unregister_driver(&ath10k_pci_driver);
2479}
2480
2481module_exit(ath10k_pci_exit);
2482
2483MODULE_AUTHOR("Qualcomm Atheros");
2484MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2485MODULE_LICENSE("Dual BSD/GPL");
2486MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2487MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2488MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2489MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2490MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2491MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2492