linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_irq_mode {
  37        ATH10K_PCI_IRQ_AUTO = 0,
  38        ATH10K_PCI_IRQ_LEGACY = 1,
  39        ATH10K_PCI_IRQ_MSI = 2,
  40};
  41
  42enum ath10k_pci_reset_mode {
  43        ATH10K_PCI_RESET_AUTO = 0,
  44        ATH10K_PCI_RESET_WARM_ONLY = 1,
  45};
  46
  47static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  48static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  49
  50module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  52
  53module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  55
  56/* how long wait to wait for target to initialise, in ms */
  57#define ATH10K_PCI_TARGET_WAIT 3000
  58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  59
  60#define QCA988X_2_0_DEVICE_ID   (0x003c)
  61
  62static const struct pci_device_id ath10k_pci_id_table[] = {
  63        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  64        {0}
  65};
  66
  67static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  68static int ath10k_pci_cold_reset(struct ath10k *ar);
  69static int ath10k_pci_warm_reset(struct ath10k *ar);
  70static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  71static int ath10k_pci_init_irq(struct ath10k *ar);
  72static int ath10k_pci_deinit_irq(struct ath10k *ar);
  73static int ath10k_pci_request_irq(struct ath10k *ar);
  74static void ath10k_pci_free_irq(struct ath10k *ar);
  75static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  76                               struct ath10k_ce_pipe *rx_pipe,
  77                               struct bmi_xfer *xfer);
  78
  79static const struct ce_attr host_ce_config_wlan[] = {
  80        /* CE0: host->target HTC control and raw streams */
  81        {
  82                .flags = CE_ATTR_FLAGS,
  83                .src_nentries = 16,
  84                .src_sz_max = 256,
  85                .dest_nentries = 0,
  86        },
  87
  88        /* CE1: target->host HTT + HTC control */
  89        {
  90                .flags = CE_ATTR_FLAGS,
  91                .src_nentries = 0,
  92                .src_sz_max = 512,
  93                .dest_nentries = 512,
  94        },
  95
  96        /* CE2: target->host WMI */
  97        {
  98                .flags = CE_ATTR_FLAGS,
  99                .src_nentries = 0,
 100                .src_sz_max = 2048,
 101                .dest_nentries = 32,
 102        },
 103
 104        /* CE3: host->target WMI */
 105        {
 106                .flags = CE_ATTR_FLAGS,
 107                .src_nentries = 32,
 108                .src_sz_max = 2048,
 109                .dest_nentries = 0,
 110        },
 111
 112        /* CE4: host->target HTT */
 113        {
 114                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 115                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 116                .src_sz_max = 256,
 117                .dest_nentries = 0,
 118        },
 119
 120        /* CE5: unused */
 121        {
 122                .flags = CE_ATTR_FLAGS,
 123                .src_nentries = 0,
 124                .src_sz_max = 0,
 125                .dest_nentries = 0,
 126        },
 127
 128        /* CE6: target autonomous hif_memcpy */
 129        {
 130                .flags = CE_ATTR_FLAGS,
 131                .src_nentries = 0,
 132                .src_sz_max = 0,
 133                .dest_nentries = 0,
 134        },
 135
 136        /* CE7: ce_diag, the Diagnostic Window */
 137        {
 138                .flags = CE_ATTR_FLAGS,
 139                .src_nentries = 2,
 140                .src_sz_max = DIAG_TRANSFER_LIMIT,
 141                .dest_nentries = 2,
 142        },
 143};
 144
 145/* Target firmware's Copy Engine configuration. */
 146static const struct ce_pipe_config target_ce_config_wlan[] = {
 147        /* CE0: host->target HTC control and raw streams */
 148        {
 149                .pipenum = __cpu_to_le32(0),
 150                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 151                .nentries = __cpu_to_le32(32),
 152                .nbytes_max = __cpu_to_le32(256),
 153                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 154                .reserved = __cpu_to_le32(0),
 155        },
 156
 157        /* CE1: target->host HTT + HTC control */
 158        {
 159                .pipenum = __cpu_to_le32(1),
 160                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 161                .nentries = __cpu_to_le32(32),
 162                .nbytes_max = __cpu_to_le32(512),
 163                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 164                .reserved = __cpu_to_le32(0),
 165        },
 166
 167        /* CE2: target->host WMI */
 168        {
 169                .pipenum = __cpu_to_le32(2),
 170                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 171                .nentries = __cpu_to_le32(32),
 172                .nbytes_max = __cpu_to_le32(2048),
 173                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 174                .reserved = __cpu_to_le32(0),
 175        },
 176
 177        /* CE3: host->target WMI */
 178        {
 179                .pipenum = __cpu_to_le32(3),
 180                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 181                .nentries = __cpu_to_le32(32),
 182                .nbytes_max = __cpu_to_le32(2048),
 183                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 184                .reserved = __cpu_to_le32(0),
 185        },
 186
 187        /* CE4: host->target HTT */
 188        {
 189                .pipenum = __cpu_to_le32(4),
 190                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 191                .nentries = __cpu_to_le32(256),
 192                .nbytes_max = __cpu_to_le32(256),
 193                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 194                .reserved = __cpu_to_le32(0),
 195        },
 196
 197        /* NB: 50% of src nentries, since tx has 2 frags */
 198
 199        /* CE5: unused */
 200        {
 201                .pipenum = __cpu_to_le32(5),
 202                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 203                .nentries = __cpu_to_le32(32),
 204                .nbytes_max = __cpu_to_le32(2048),
 205                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 206                .reserved = __cpu_to_le32(0),
 207        },
 208
 209        /* CE6: Reserved for target autonomous hif_memcpy */
 210        {
 211                .pipenum = __cpu_to_le32(6),
 212                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 213                .nentries = __cpu_to_le32(32),
 214                .nbytes_max = __cpu_to_le32(4096),
 215                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 216                .reserved = __cpu_to_le32(0),
 217        },
 218
 219        /* CE7 used only by Host */
 220};
 221
 222/*
 223 * Map from service/endpoint to Copy Engine.
 224 * This table is derived from the CE_PCI TABLE, above.
 225 * It is passed to the Target at startup for use by firmware.
 226 */
 227static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
 228        {
 229                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 230                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 231                __cpu_to_le32(3),
 232        },
 233        {
 234                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 235                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 236                __cpu_to_le32(2),
 237        },
 238        {
 239                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 240                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 241                __cpu_to_le32(3),
 242        },
 243        {
 244                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 245                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 246                __cpu_to_le32(2),
 247        },
 248        {
 249                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 250                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 251                __cpu_to_le32(3),
 252        },
 253        {
 254                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 255                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 256                __cpu_to_le32(2),
 257        },
 258        {
 259                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 260                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 261                __cpu_to_le32(3),
 262        },
 263        {
 264                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 265                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 266                __cpu_to_le32(2),
 267        },
 268        {
 269                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 270                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 271                __cpu_to_le32(3),
 272        },
 273        {
 274                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 275                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 276                __cpu_to_le32(2),
 277        },
 278        {
 279                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 280                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 281                __cpu_to_le32(0),
 282        },
 283        {
 284                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 285                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 286                __cpu_to_le32(1),
 287        },
 288        { /* not used */
 289                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 290                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 291                __cpu_to_le32(0),
 292        },
 293        { /* not used */
 294                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 295                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 296                __cpu_to_le32(1),
 297        },
 298        {
 299                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 300                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 301                __cpu_to_le32(4),
 302        },
 303        {
 304                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 305                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 306                __cpu_to_le32(1),
 307        },
 308
 309        /* (Additions here) */
 310
 311        { /* must be last */
 312                __cpu_to_le32(0),
 313                __cpu_to_le32(0),
 314                __cpu_to_le32(0),
 315        },
 316};
 317
 318static bool ath10k_pci_irq_pending(struct ath10k *ar)
 319{
 320        u32 cause;
 321
 322        /* Check if the shared legacy irq is for us */
 323        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 324                                  PCIE_INTR_CAUSE_ADDRESS);
 325        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 326                return true;
 327
 328        return false;
 329}
 330
 331static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 332{
 333        /* IMPORTANT: INTR_CLR register has to be set after
 334         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 335         * really cleared. */
 336        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 337                           0);
 338        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 339                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 340
 341        /* IMPORTANT: this extra read transaction is required to
 342         * flush the posted write buffer. */
 343        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 344                                PCIE_INTR_ENABLE_ADDRESS);
 345}
 346
 347static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 348{
 349        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 350                           PCIE_INTR_ENABLE_ADDRESS,
 351                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 352
 353        /* IMPORTANT: this extra read transaction is required to
 354         * flush the posted write buffer. */
 355        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 356                                PCIE_INTR_ENABLE_ADDRESS);
 357}
 358
 359static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 360{
 361        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 362
 363        if (ar_pci->num_msi_intrs > 1)
 364                return "msi-x";
 365
 366        if (ar_pci->num_msi_intrs == 1)
 367                return "msi";
 368
 369        return "legacy";
 370}
 371
 372static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 373{
 374        struct ath10k *ar = pipe->hif_ce_state;
 375        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 376        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 377        struct sk_buff *skb;
 378        dma_addr_t paddr;
 379        int ret;
 380
 381        lockdep_assert_held(&ar_pci->ce_lock);
 382
 383        skb = dev_alloc_skb(pipe->buf_sz);
 384        if (!skb)
 385                return -ENOMEM;
 386
 387        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 388
 389        paddr = dma_map_single(ar->dev, skb->data,
 390                               skb->len + skb_tailroom(skb),
 391                               DMA_FROM_DEVICE);
 392        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 393                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 394                dev_kfree_skb_any(skb);
 395                return -EIO;
 396        }
 397
 398        ATH10K_SKB_CB(skb)->paddr = paddr;
 399
 400        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 401        if (ret) {
 402                ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 403                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 404                                 DMA_FROM_DEVICE);
 405                dev_kfree_skb_any(skb);
 406                return ret;
 407        }
 408
 409        return 0;
 410}
 411
 412static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 413{
 414        struct ath10k *ar = pipe->hif_ce_state;
 415        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 416        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 417        int ret, num;
 418
 419        lockdep_assert_held(&ar_pci->ce_lock);
 420
 421        if (pipe->buf_sz == 0)
 422                return;
 423
 424        if (!ce_pipe->dest_ring)
 425                return;
 426
 427        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 428        while (num--) {
 429                ret = __ath10k_pci_rx_post_buf(pipe);
 430                if (ret) {
 431                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 432                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 433                                  ATH10K_PCI_RX_POST_RETRY_MS);
 434                        break;
 435                }
 436        }
 437}
 438
 439static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 440{
 441        struct ath10k *ar = pipe->hif_ce_state;
 442        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 443
 444        spin_lock_bh(&ar_pci->ce_lock);
 445        __ath10k_pci_rx_post_pipe(pipe);
 446        spin_unlock_bh(&ar_pci->ce_lock);
 447}
 448
 449static void ath10k_pci_rx_post(struct ath10k *ar)
 450{
 451        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 452        int i;
 453
 454        spin_lock_bh(&ar_pci->ce_lock);
 455        for (i = 0; i < CE_COUNT; i++)
 456                __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 457        spin_unlock_bh(&ar_pci->ce_lock);
 458}
 459
 460static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 461{
 462        struct ath10k *ar = (void *)ptr;
 463
 464        ath10k_pci_rx_post(ar);
 465}
 466
 467/*
 468 * Diagnostic read/write access is provided for startup/config/debug usage.
 469 * Caller must guarantee proper alignment, when applicable, and single user
 470 * at any moment.
 471 */
 472static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 473                                    int nbytes)
 474{
 475        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 476        int ret = 0;
 477        u32 buf;
 478        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 479        unsigned int id;
 480        unsigned int flags;
 481        struct ath10k_ce_pipe *ce_diag;
 482        /* Host buffer address in CE space */
 483        u32 ce_data;
 484        dma_addr_t ce_data_base = 0;
 485        void *data_buf = NULL;
 486        int i;
 487
 488        ce_diag = ar_pci->ce_diag;
 489
 490        /*
 491         * Allocate a temporary bounce buffer to hold caller's data
 492         * to be DMA'ed from Target. This guarantees
 493         *   1) 4-byte alignment
 494         *   2) Buffer in DMA-able space
 495         */
 496        orig_nbytes = nbytes;
 497        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 498                                                       orig_nbytes,
 499                                                       &ce_data_base,
 500                                                       GFP_ATOMIC);
 501
 502        if (!data_buf) {
 503                ret = -ENOMEM;
 504                goto done;
 505        }
 506        memset(data_buf, 0, orig_nbytes);
 507
 508        remaining_bytes = orig_nbytes;
 509        ce_data = ce_data_base;
 510        while (remaining_bytes) {
 511                nbytes = min_t(unsigned int, remaining_bytes,
 512                               DIAG_TRANSFER_LIMIT);
 513
 514                ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
 515                if (ret != 0)
 516                        goto done;
 517
 518                /* Request CE to send from Target(!) address to Host buffer */
 519                /*
 520                 * The address supplied by the caller is in the
 521                 * Target CPU virtual address space.
 522                 *
 523                 * In order to use this address with the diagnostic CE,
 524                 * convert it from Target CPU virtual address space
 525                 * to CE address space
 526                 */
 527                address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
 528                                                     address);
 529
 530                ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
 531                                     0);
 532                if (ret)
 533                        goto done;
 534
 535                i = 0;
 536                while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
 537                                                     &completed_nbytes,
 538                                                     &id) != 0) {
 539                        mdelay(1);
 540                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 541                                ret = -EBUSY;
 542                                goto done;
 543                        }
 544                }
 545
 546                if (nbytes != completed_nbytes) {
 547                        ret = -EIO;
 548                        goto done;
 549                }
 550
 551                if (buf != (u32)address) {
 552                        ret = -EIO;
 553                        goto done;
 554                }
 555
 556                i = 0;
 557                while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
 558                                                     &completed_nbytes,
 559                                                     &id, &flags) != 0) {
 560                        mdelay(1);
 561
 562                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 563                                ret = -EBUSY;
 564                                goto done;
 565                        }
 566                }
 567
 568                if (nbytes != completed_nbytes) {
 569                        ret = -EIO;
 570                        goto done;
 571                }
 572
 573                if (buf != ce_data) {
 574                        ret = -EIO;
 575                        goto done;
 576                }
 577
 578                remaining_bytes -= nbytes;
 579                address += nbytes;
 580                ce_data += nbytes;
 581        }
 582
 583done:
 584        if (ret == 0)
 585                memcpy(data, data_buf, orig_nbytes);
 586        else
 587                ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 588                            address, ret);
 589
 590        if (data_buf)
 591                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 592                                  ce_data_base);
 593
 594        return ret;
 595}
 596
 597static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 598{
 599        __le32 val = 0;
 600        int ret;
 601
 602        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 603        *value = __le32_to_cpu(val);
 604
 605        return ret;
 606}
 607
 608static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
 609                                     u32 src, u32 len)
 610{
 611        u32 host_addr, addr;
 612        int ret;
 613
 614        host_addr = host_interest_item_address(src);
 615
 616        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
 617        if (ret != 0) {
 618                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
 619                            src, ret);
 620                return ret;
 621        }
 622
 623        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
 624        if (ret != 0) {
 625                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
 626                            addr, len, ret);
 627                return ret;
 628        }
 629
 630        return 0;
 631}
 632
 633#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
 634        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
 635
 636static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 637                                     const void *data, int nbytes)
 638{
 639        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 640        int ret = 0;
 641        u32 buf;
 642        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 643        unsigned int id;
 644        unsigned int flags;
 645        struct ath10k_ce_pipe *ce_diag;
 646        void *data_buf = NULL;
 647        u32 ce_data;    /* Host buffer address in CE space */
 648        dma_addr_t ce_data_base = 0;
 649        int i;
 650
 651        ce_diag = ar_pci->ce_diag;
 652
 653        /*
 654         * Allocate a temporary bounce buffer to hold caller's data
 655         * to be DMA'ed to Target. This guarantees
 656         *   1) 4-byte alignment
 657         *   2) Buffer in DMA-able space
 658         */
 659        orig_nbytes = nbytes;
 660        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 661                                                       orig_nbytes,
 662                                                       &ce_data_base,
 663                                                       GFP_ATOMIC);
 664        if (!data_buf) {
 665                ret = -ENOMEM;
 666                goto done;
 667        }
 668
 669        /* Copy caller's data to allocated DMA buf */
 670        memcpy(data_buf, data, orig_nbytes);
 671
 672        /*
 673         * The address supplied by the caller is in the
 674         * Target CPU virtual address space.
 675         *
 676         * In order to use this address with the diagnostic CE,
 677         * convert it from
 678         *    Target CPU virtual address space
 679         * to
 680         *    CE address space
 681         */
 682        address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
 683
 684        remaining_bytes = orig_nbytes;
 685        ce_data = ce_data_base;
 686        while (remaining_bytes) {
 687                /* FIXME: check cast */
 688                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 689
 690                /* Set up to receive directly into Target(!) address */
 691                ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
 692                if (ret != 0)
 693                        goto done;
 694
 695                /*
 696                 * Request CE to send caller-supplied data that
 697                 * was copied to bounce buffer to Target(!) address.
 698                 */
 699                ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
 700                                     nbytes, 0, 0);
 701                if (ret != 0)
 702                        goto done;
 703
 704                i = 0;
 705                while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
 706                                                     &completed_nbytes,
 707                                                     &id) != 0) {
 708                        mdelay(1);
 709
 710                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 711                                ret = -EBUSY;
 712                                goto done;
 713                        }
 714                }
 715
 716                if (nbytes != completed_nbytes) {
 717                        ret = -EIO;
 718                        goto done;
 719                }
 720
 721                if (buf != ce_data) {
 722                        ret = -EIO;
 723                        goto done;
 724                }
 725
 726                i = 0;
 727                while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
 728                                                     &completed_nbytes,
 729                                                     &id, &flags) != 0) {
 730                        mdelay(1);
 731
 732                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 733                                ret = -EBUSY;
 734                                goto done;
 735                        }
 736                }
 737
 738                if (nbytes != completed_nbytes) {
 739                        ret = -EIO;
 740                        goto done;
 741                }
 742
 743                if (buf != address) {
 744                        ret = -EIO;
 745                        goto done;
 746                }
 747
 748                remaining_bytes -= nbytes;
 749                address += nbytes;
 750                ce_data += nbytes;
 751        }
 752
 753done:
 754        if (data_buf) {
 755                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 756                                  ce_data_base);
 757        }
 758
 759        if (ret != 0)
 760                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
 761                            address, ret);
 762
 763        return ret;
 764}
 765
 766static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
 767{
 768        __le32 val = __cpu_to_le32(value);
 769
 770        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
 771}
 772
 773static bool ath10k_pci_is_awake(struct ath10k *ar)
 774{
 775        u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
 776
 777        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 778}
 779
 780static int ath10k_pci_wake_wait(struct ath10k *ar)
 781{
 782        int tot_delay = 0;
 783        int curr_delay = 5;
 784
 785        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 786                if (ath10k_pci_is_awake(ar))
 787                        return 0;
 788
 789                udelay(curr_delay);
 790                tot_delay += curr_delay;
 791
 792                if (curr_delay < 50)
 793                        curr_delay += 5;
 794        }
 795
 796        return -ETIMEDOUT;
 797}
 798
 799static int ath10k_pci_wake(struct ath10k *ar)
 800{
 801        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 802                               PCIE_SOC_WAKE_V_MASK);
 803        return ath10k_pci_wake_wait(ar);
 804}
 805
 806static void ath10k_pci_sleep(struct ath10k *ar)
 807{
 808        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 809                               PCIE_SOC_WAKE_RESET);
 810}
 811
 812/* Called by lower (CE) layer when a send to Target completes. */
 813static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 814{
 815        struct ath10k *ar = ce_state->ar;
 816        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 817        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 818        void *transfer_context;
 819        u32 ce_data;
 820        unsigned int nbytes;
 821        unsigned int transfer_id;
 822
 823        while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
 824                                             &ce_data, &nbytes,
 825                                             &transfer_id) == 0) {
 826                /* no need to call tx completion for NULL pointers */
 827                if (transfer_context == NULL)
 828                        continue;
 829
 830                cb->tx_completion(ar, transfer_context, transfer_id);
 831        }
 832}
 833
 834/* Called by lower (CE) layer when data is received from the Target. */
 835static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 836{
 837        struct ath10k *ar = ce_state->ar;
 838        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 839        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 840        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 841        struct sk_buff *skb;
 842        void *transfer_context;
 843        u32 ce_data;
 844        unsigned int nbytes, max_nbytes;
 845        unsigned int transfer_id;
 846        unsigned int flags;
 847
 848        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
 849                                             &ce_data, &nbytes, &transfer_id,
 850                                             &flags) == 0) {
 851                skb = transfer_context;
 852                max_nbytes = skb->len + skb_tailroom(skb);
 853                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
 854                                 max_nbytes, DMA_FROM_DEVICE);
 855
 856                if (unlikely(max_nbytes < nbytes)) {
 857                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
 858                                    nbytes, max_nbytes);
 859                        dev_kfree_skb_any(skb);
 860                        continue;
 861                }
 862
 863                skb_put(skb, nbytes);
 864                cb->rx_completion(ar, skb, pipe_info->pipe_num);
 865        }
 866
 867        ath10k_pci_rx_post_pipe(pipe_info);
 868}
 869
 870static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
 871                                struct ath10k_hif_sg_item *items, int n_items)
 872{
 873        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 874        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
 875        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
 876        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
 877        unsigned int nentries_mask;
 878        unsigned int sw_index;
 879        unsigned int write_index;
 880        int err, i = 0;
 881
 882        spin_lock_bh(&ar_pci->ce_lock);
 883
 884        nentries_mask = src_ring->nentries_mask;
 885        sw_index = src_ring->sw_index;
 886        write_index = src_ring->write_index;
 887
 888        if (unlikely(CE_RING_DELTA(nentries_mask,
 889                                   write_index, sw_index - 1) < n_items)) {
 890                err = -ENOBUFS;
 891                goto err;
 892        }
 893
 894        for (i = 0; i < n_items - 1; i++) {
 895                ath10k_dbg(ar, ATH10K_DBG_PCI,
 896                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 897                           i, items[i].paddr, items[i].len, n_items);
 898                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 899                                items[i].vaddr, items[i].len);
 900
 901                err = ath10k_ce_send_nolock(ce_pipe,
 902                                            items[i].transfer_context,
 903                                            items[i].paddr,
 904                                            items[i].len,
 905                                            items[i].transfer_id,
 906                                            CE_SEND_FLAG_GATHER);
 907                if (err)
 908                        goto err;
 909        }
 910
 911        /* `i` is equal to `n_items -1` after for() */
 912
 913        ath10k_dbg(ar, ATH10K_DBG_PCI,
 914                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 915                   i, items[i].paddr, items[i].len, n_items);
 916        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 917                        items[i].vaddr, items[i].len);
 918
 919        err = ath10k_ce_send_nolock(ce_pipe,
 920                                    items[i].transfer_context,
 921                                    items[i].paddr,
 922                                    items[i].len,
 923                                    items[i].transfer_id,
 924                                    0);
 925        if (err)
 926                goto err;
 927
 928        spin_unlock_bh(&ar_pci->ce_lock);
 929        return 0;
 930
 931err:
 932        for (; i > 0; i--)
 933                __ath10k_ce_send_revert(ce_pipe);
 934
 935        spin_unlock_bh(&ar_pci->ce_lock);
 936        return err;
 937}
 938
 939static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 940{
 941        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 942
 943        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
 944
 945        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 946}
 947
 948static void ath10k_pci_dump_registers(struct ath10k *ar,
 949                                      struct ath10k_fw_crash_data *crash_data)
 950{
 951        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
 952        int i, ret;
 953
 954        lockdep_assert_held(&ar->data_lock);
 955
 956        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
 957                                      hi_failure_state,
 958                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
 959        if (ret) {
 960                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
 961                return;
 962        }
 963
 964        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
 965
 966        ath10k_err(ar, "firmware register dump:\n");
 967        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
 968                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
 969                           i,
 970                           __le32_to_cpu(reg_dump_values[i]),
 971                           __le32_to_cpu(reg_dump_values[i + 1]),
 972                           __le32_to_cpu(reg_dump_values[i + 2]),
 973                           __le32_to_cpu(reg_dump_values[i + 3]));
 974
 975        if (!crash_data)
 976                return;
 977
 978        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
 979                crash_data->registers[i] = reg_dump_values[i];
 980}
 981
 982static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 983{
 984        struct ath10k_fw_crash_data *crash_data;
 985        char uuid[50];
 986
 987        spin_lock_bh(&ar->data_lock);
 988
 989        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
 990
 991        if (crash_data)
 992                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
 993        else
 994                scnprintf(uuid, sizeof(uuid), "n/a");
 995
 996        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
 997        ath10k_print_driver_info(ar);
 998        ath10k_pci_dump_registers(ar, crash_data);
 999
1000        spin_unlock_bh(&ar->data_lock);
1001
1002        queue_work(ar->workqueue, &ar->restart_work);
1003}
1004
1005static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1006                                               int force)
1007{
1008        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1009
1010        if (!force) {
1011                int resources;
1012                /*
1013                 * Decide whether to actually poll for completions, or just
1014                 * wait for a later chance.
1015                 * If there seem to be plenty of resources left, then just wait
1016                 * since checking involves reading a CE register, which is a
1017                 * relatively expensive operation.
1018                 */
1019                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1020
1021                /*
1022                 * If at least 50% of the total resources are still available,
1023                 * don't bother checking again yet.
1024                 */
1025                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1026                        return;
1027        }
1028        ath10k_ce_per_engine_service(ar, pipe);
1029}
1030
1031static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1032                                         struct ath10k_hif_cb *callbacks)
1033{
1034        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1035
1036        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1037
1038        memcpy(&ar_pci->msg_callbacks_current, callbacks,
1039               sizeof(ar_pci->msg_callbacks_current));
1040}
1041
1042static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1043{
1044        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045        int i;
1046
1047        tasklet_kill(&ar_pci->intr_tq);
1048        tasklet_kill(&ar_pci->msi_fw_err);
1049
1050        for (i = 0; i < CE_COUNT; i++)
1051                tasklet_kill(&ar_pci->pipe_info[i].intr);
1052
1053        del_timer_sync(&ar_pci->rx_post_retry);
1054}
1055
1056static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1057                                              u16 service_id, u8 *ul_pipe,
1058                                              u8 *dl_pipe, int *ul_is_polled,
1059                                              int *dl_is_polled)
1060{
1061        const struct service_to_pipe *entry;
1062        bool ul_set = false, dl_set = false;
1063        int i;
1064
1065        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1066
1067        /* polling for received messages not supported */
1068        *dl_is_polled = 0;
1069
1070        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1071                entry = &target_service_to_ce_map_wlan[i];
1072
1073                if (__le32_to_cpu(entry->service_id) != service_id)
1074                        continue;
1075
1076                switch (__le32_to_cpu(entry->pipedir)) {
1077                case PIPEDIR_NONE:
1078                        break;
1079                case PIPEDIR_IN:
1080                        WARN_ON(dl_set);
1081                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1082                        dl_set = true;
1083                        break;
1084                case PIPEDIR_OUT:
1085                        WARN_ON(ul_set);
1086                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1087                        ul_set = true;
1088                        break;
1089                case PIPEDIR_INOUT:
1090                        WARN_ON(dl_set);
1091                        WARN_ON(ul_set);
1092                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1093                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1094                        dl_set = true;
1095                        ul_set = true;
1096                        break;
1097                }
1098        }
1099
1100        if (WARN_ON(!ul_set || !dl_set))
1101                return -ENOENT;
1102
1103        *ul_is_polled =
1104                (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1105
1106        return 0;
1107}
1108
1109static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1110                                            u8 *ul_pipe, u8 *dl_pipe)
1111{
1112        int ul_is_polled, dl_is_polled;
1113
1114        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1115
1116        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1117                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1118                                                 ul_pipe,
1119                                                 dl_pipe,
1120                                                 &ul_is_polled,
1121                                                 &dl_is_polled);
1122}
1123
1124static void ath10k_pci_irq_disable(struct ath10k *ar)
1125{
1126        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1127        int i;
1128
1129        ath10k_ce_disable_interrupts(ar);
1130        ath10k_pci_disable_and_clear_legacy_irq(ar);
1131        /* FIXME: How to mask all MSI interrupts? */
1132
1133        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1134                synchronize_irq(ar_pci->pdev->irq + i);
1135}
1136
1137static void ath10k_pci_irq_enable(struct ath10k *ar)
1138{
1139        ath10k_ce_enable_interrupts(ar);
1140        ath10k_pci_enable_legacy_irq(ar);
1141        /* FIXME: How to unmask all MSI interrupts? */
1142}
1143
1144static int ath10k_pci_hif_start(struct ath10k *ar)
1145{
1146        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1147
1148        ath10k_pci_irq_enable(ar);
1149        ath10k_pci_rx_post(ar);
1150
1151        return 0;
1152}
1153
1154static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1155{
1156        struct ath10k *ar;
1157        struct ath10k_pci *ar_pci;
1158        struct ath10k_ce_pipe *ce_hdl;
1159        u32 buf_sz;
1160        struct sk_buff *netbuf;
1161        u32 ce_data;
1162
1163        buf_sz = pipe_info->buf_sz;
1164
1165        /* Unused Copy Engine */
1166        if (buf_sz == 0)
1167                return;
1168
1169        ar = pipe_info->hif_ce_state;
1170        ar_pci = ath10k_pci_priv(ar);
1171        ce_hdl = pipe_info->ce_hdl;
1172
1173        while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1174                                          &ce_data) == 0) {
1175                dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1176                                 netbuf->len + skb_tailroom(netbuf),
1177                                 DMA_FROM_DEVICE);
1178                dev_kfree_skb_any(netbuf);
1179        }
1180}
1181
1182static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1183{
1184        struct ath10k *ar;
1185        struct ath10k_pci *ar_pci;
1186        struct ath10k_ce_pipe *ce_hdl;
1187        struct sk_buff *netbuf;
1188        u32 ce_data;
1189        unsigned int nbytes;
1190        unsigned int id;
1191        u32 buf_sz;
1192
1193        buf_sz = pipe_info->buf_sz;
1194
1195        /* Unused Copy Engine */
1196        if (buf_sz == 0)
1197                return;
1198
1199        ar = pipe_info->hif_ce_state;
1200        ar_pci = ath10k_pci_priv(ar);
1201        ce_hdl = pipe_info->ce_hdl;
1202
1203        while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1204                                          &ce_data, &nbytes, &id) == 0) {
1205                /* no need to call tx completion for NULL pointers */
1206                if (!netbuf)
1207                        continue;
1208
1209                ar_pci->msg_callbacks_current.tx_completion(ar,
1210                                                            netbuf,
1211                                                            id);
1212        }
1213}
1214
1215/*
1216 * Cleanup residual buffers for device shutdown:
1217 *    buffers that were enqueued for receive
1218 *    buffers that were to be sent
1219 * Note: Buffers that had completed but which were
1220 * not yet processed are on a completion queue. They
1221 * are handled when the completion thread shuts down.
1222 */
1223static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1224{
1225        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226        int pipe_num;
1227
1228        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1229                struct ath10k_pci_pipe *pipe_info;
1230
1231                pipe_info = &ar_pci->pipe_info[pipe_num];
1232                ath10k_pci_rx_pipe_cleanup(pipe_info);
1233                ath10k_pci_tx_pipe_cleanup(pipe_info);
1234        }
1235}
1236
1237static void ath10k_pci_ce_deinit(struct ath10k *ar)
1238{
1239        int i;
1240
1241        for (i = 0; i < CE_COUNT; i++)
1242                ath10k_ce_deinit_pipe(ar, i);
1243}
1244
1245static void ath10k_pci_flush(struct ath10k *ar)
1246{
1247        ath10k_pci_kill_tasklet(ar);
1248        ath10k_pci_buffer_cleanup(ar);
1249}
1250
1251static void ath10k_pci_hif_stop(struct ath10k *ar)
1252{
1253        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1254
1255        /* Most likely the device has HTT Rx ring configured. The only way to
1256         * prevent the device from accessing (and possible corrupting) host
1257         * memory is to reset the chip now.
1258         *
1259         * There's also no known way of masking MSI interrupts on the device.
1260         * For ranged MSI the CE-related interrupts can be masked. However
1261         * regardless how many MSI interrupts are assigned the first one
1262         * is always used for firmware indications (crashes) and cannot be
1263         * masked. To prevent the device from asserting the interrupt reset it
1264         * before proceeding with cleanup.
1265         */
1266        ath10k_pci_warm_reset(ar);
1267
1268        ath10k_pci_irq_disable(ar);
1269        ath10k_pci_flush(ar);
1270}
1271
1272static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1273                                           void *req, u32 req_len,
1274                                           void *resp, u32 *resp_len)
1275{
1276        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1277        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1278        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1279        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1280        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1281        dma_addr_t req_paddr = 0;
1282        dma_addr_t resp_paddr = 0;
1283        struct bmi_xfer xfer = {};
1284        void *treq, *tresp = NULL;
1285        int ret = 0;
1286
1287        might_sleep();
1288
1289        if (resp && !resp_len)
1290                return -EINVAL;
1291
1292        if (resp && resp_len && *resp_len == 0)
1293                return -EINVAL;
1294
1295        treq = kmemdup(req, req_len, GFP_KERNEL);
1296        if (!treq)
1297                return -ENOMEM;
1298
1299        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1300        ret = dma_mapping_error(ar->dev, req_paddr);
1301        if (ret)
1302                goto err_dma;
1303
1304        if (resp && resp_len) {
1305                tresp = kzalloc(*resp_len, GFP_KERNEL);
1306                if (!tresp) {
1307                        ret = -ENOMEM;
1308                        goto err_req;
1309                }
1310
1311                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1312                                            DMA_FROM_DEVICE);
1313                ret = dma_mapping_error(ar->dev, resp_paddr);
1314                if (ret)
1315                        goto err_req;
1316
1317                xfer.wait_for_resp = true;
1318                xfer.resp_len = 0;
1319
1320                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1321        }
1322
1323        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1324        if (ret)
1325                goto err_resp;
1326
1327        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1328        if (ret) {
1329                u32 unused_buffer;
1330                unsigned int unused_nbytes;
1331                unsigned int unused_id;
1332
1333                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1334                                           &unused_nbytes, &unused_id);
1335        } else {
1336                /* non-zero means we did not time out */
1337                ret = 0;
1338        }
1339
1340err_resp:
1341        if (resp) {
1342                u32 unused_buffer;
1343
1344                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1345                dma_unmap_single(ar->dev, resp_paddr,
1346                                 *resp_len, DMA_FROM_DEVICE);
1347        }
1348err_req:
1349        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1350
1351        if (ret == 0 && resp_len) {
1352                *resp_len = min(*resp_len, xfer.resp_len);
1353                memcpy(resp, tresp, xfer.resp_len);
1354        }
1355err_dma:
1356        kfree(treq);
1357        kfree(tresp);
1358
1359        return ret;
1360}
1361
1362static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1363{
1364        struct bmi_xfer *xfer;
1365        u32 ce_data;
1366        unsigned int nbytes;
1367        unsigned int transfer_id;
1368
1369        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1370                                          &nbytes, &transfer_id))
1371                return;
1372
1373        xfer->tx_done = true;
1374}
1375
1376static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1377{
1378        struct ath10k *ar = ce_state->ar;
1379        struct bmi_xfer *xfer;
1380        u32 ce_data;
1381        unsigned int nbytes;
1382        unsigned int transfer_id;
1383        unsigned int flags;
1384
1385        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1386                                          &nbytes, &transfer_id, &flags))
1387                return;
1388
1389        if (!xfer->wait_for_resp) {
1390                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1391                return;
1392        }
1393
1394        xfer->resp_len = nbytes;
1395        xfer->rx_done = true;
1396}
1397
1398static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1399                               struct ath10k_ce_pipe *rx_pipe,
1400                               struct bmi_xfer *xfer)
1401{
1402        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1403
1404        while (time_before_eq(jiffies, timeout)) {
1405                ath10k_pci_bmi_send_done(tx_pipe);
1406                ath10k_pci_bmi_recv_data(rx_pipe);
1407
1408                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1409                        return 0;
1410
1411                schedule();
1412        }
1413
1414        return -ETIMEDOUT;
1415}
1416
1417/*
1418 * Send an interrupt to the device to wake up the Target CPU
1419 * so it has an opportunity to notice any changed state.
1420 */
1421static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1422{
1423        u32 addr, val;
1424
1425        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1426        val = ath10k_pci_read32(ar, addr);
1427        val |= CORE_CTRL_CPU_INTR_MASK;
1428        ath10k_pci_write32(ar, addr, val);
1429
1430        return 0;
1431}
1432
1433static int ath10k_pci_init_config(struct ath10k *ar)
1434{
1435        u32 interconnect_targ_addr;
1436        u32 pcie_state_targ_addr = 0;
1437        u32 pipe_cfg_targ_addr = 0;
1438        u32 svc_to_pipe_map = 0;
1439        u32 pcie_config_flags = 0;
1440        u32 ealloc_value;
1441        u32 ealloc_targ_addr;
1442        u32 flag2_value;
1443        u32 flag2_targ_addr;
1444        int ret = 0;
1445
1446        /* Download to Target the CE Config and the service-to-CE map */
1447        interconnect_targ_addr =
1448                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1449
1450        /* Supply Target-side CE configuration */
1451        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1452                                     &pcie_state_targ_addr);
1453        if (ret != 0) {
1454                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1455                return ret;
1456        }
1457
1458        if (pcie_state_targ_addr == 0) {
1459                ret = -EIO;
1460                ath10k_err(ar, "Invalid pcie state addr\n");
1461                return ret;
1462        }
1463
1464        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1465                                          offsetof(struct pcie_state,
1466                                                   pipe_cfg_addr)),
1467                                     &pipe_cfg_targ_addr);
1468        if (ret != 0) {
1469                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1470                return ret;
1471        }
1472
1473        if (pipe_cfg_targ_addr == 0) {
1474                ret = -EIO;
1475                ath10k_err(ar, "Invalid pipe cfg addr\n");
1476                return ret;
1477        }
1478
1479        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1480                                        target_ce_config_wlan,
1481                                        sizeof(target_ce_config_wlan));
1482
1483        if (ret != 0) {
1484                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1485                return ret;
1486        }
1487
1488        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1489                                          offsetof(struct pcie_state,
1490                                                   svc_to_pipe_map)),
1491                                     &svc_to_pipe_map);
1492        if (ret != 0) {
1493                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1494                return ret;
1495        }
1496
1497        if (svc_to_pipe_map == 0) {
1498                ret = -EIO;
1499                ath10k_err(ar, "Invalid svc_to_pipe map\n");
1500                return ret;
1501        }
1502
1503        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1504                                        target_service_to_ce_map_wlan,
1505                                        sizeof(target_service_to_ce_map_wlan));
1506        if (ret != 0) {
1507                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1508                return ret;
1509        }
1510
1511        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1512                                          offsetof(struct pcie_state,
1513                                                   config_flags)),
1514                                     &pcie_config_flags);
1515        if (ret != 0) {
1516                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1517                return ret;
1518        }
1519
1520        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1521
1522        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1523                                           offsetof(struct pcie_state,
1524                                                    config_flags)),
1525                                      pcie_config_flags);
1526        if (ret != 0) {
1527                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1528                return ret;
1529        }
1530
1531        /* configure early allocation */
1532        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1533
1534        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1535        if (ret != 0) {
1536                ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1537                return ret;
1538        }
1539
1540        /* first bank is switched to IRAM */
1541        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1542                         HI_EARLY_ALLOC_MAGIC_MASK);
1543        ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1544                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1545
1546        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1547        if (ret != 0) {
1548                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1549                return ret;
1550        }
1551
1552        /* Tell Target to proceed with initialization */
1553        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1554
1555        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1556        if (ret != 0) {
1557                ath10k_err(ar, "Failed to get option val: %d\n", ret);
1558                return ret;
1559        }
1560
1561        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1562
1563        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1564        if (ret != 0) {
1565                ath10k_err(ar, "Failed to set option val: %d\n", ret);
1566                return ret;
1567        }
1568
1569        return 0;
1570}
1571
1572static int ath10k_pci_alloc_ce(struct ath10k *ar)
1573{
1574        int i, ret;
1575
1576        for (i = 0; i < CE_COUNT; i++) {
1577                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1578                if (ret) {
1579                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1580                                   i, ret);
1581                        return ret;
1582                }
1583        }
1584
1585        return 0;
1586}
1587
1588static void ath10k_pci_free_ce(struct ath10k *ar)
1589{
1590        int i;
1591
1592        for (i = 0; i < CE_COUNT; i++)
1593                ath10k_ce_free_pipe(ar, i);
1594}
1595
1596static int ath10k_pci_ce_init(struct ath10k *ar)
1597{
1598        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1599        struct ath10k_pci_pipe *pipe_info;
1600        const struct ce_attr *attr;
1601        int pipe_num, ret;
1602
1603        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1604                pipe_info = &ar_pci->pipe_info[pipe_num];
1605                pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1606                pipe_info->pipe_num = pipe_num;
1607                pipe_info->hif_ce_state = ar;
1608                attr = &host_ce_config_wlan[pipe_num];
1609
1610                ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
1611                                          ath10k_pci_ce_send_done,
1612                                          ath10k_pci_ce_recv_data);
1613                if (ret) {
1614                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1615                                   pipe_num, ret);
1616                        return ret;
1617                }
1618
1619                if (pipe_num == CE_COUNT - 1) {
1620                        /*
1621                         * Reserve the ultimate CE for
1622                         * diagnostic Window support
1623                         */
1624                        ar_pci->ce_diag = pipe_info->ce_hdl;
1625                        continue;
1626                }
1627
1628                pipe_info->buf_sz = (size_t)(attr->src_sz_max);
1629        }
1630
1631        return 0;
1632}
1633
1634static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1635{
1636        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1637               FW_IND_EVENT_PENDING;
1638}
1639
1640static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1641{
1642        u32 val;
1643
1644        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1645        val &= ~FW_IND_EVENT_PENDING;
1646        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1647}
1648
1649/* this function effectively clears target memory controller assert line */
1650static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1651{
1652        u32 val;
1653
1654        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1655        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1656                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
1657        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1658
1659        msleep(10);
1660
1661        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1662        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1663                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1664        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1665
1666        msleep(10);
1667}
1668
1669static int ath10k_pci_warm_reset(struct ath10k *ar)
1670{
1671        u32 val;
1672
1673        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
1674
1675        /* debug */
1676        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1677                                PCIE_INTR_CAUSE_ADDRESS);
1678        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1679                   val);
1680
1681        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1682                                CPU_INTR_ADDRESS);
1683        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1684                   val);
1685
1686        /* disable pending irqs */
1687        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1688                           PCIE_INTR_ENABLE_ADDRESS, 0);
1689
1690        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1691                           PCIE_INTR_CLR_ADDRESS, ~0);
1692
1693        msleep(100);
1694
1695        /* clear fw indicator */
1696        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1697
1698        /* clear target LF timer interrupts */
1699        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1700                                SOC_LF_TIMER_CONTROL0_ADDRESS);
1701        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1702                           SOC_LF_TIMER_CONTROL0_ADDRESS,
1703                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1704
1705        /* reset CE */
1706        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1707                                SOC_RESET_CONTROL_ADDRESS);
1708        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1709                           val | SOC_RESET_CONTROL_CE_RST_MASK);
1710        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1711                                SOC_RESET_CONTROL_ADDRESS);
1712        msleep(10);
1713
1714        /* unreset CE */
1715        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1716                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1717        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1718                                SOC_RESET_CONTROL_ADDRESS);
1719        msleep(10);
1720
1721        ath10k_pci_warm_reset_si0(ar);
1722
1723        /* debug */
1724        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1725                                PCIE_INTR_CAUSE_ADDRESS);
1726        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1727                   val);
1728
1729        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1730                                CPU_INTR_ADDRESS);
1731        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1732                   val);
1733
1734        /* CPU warm reset */
1735        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1736                                SOC_RESET_CONTROL_ADDRESS);
1737        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1738                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1739
1740        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1741                                SOC_RESET_CONTROL_ADDRESS);
1742        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
1743                   val);
1744
1745        msleep(100);
1746
1747        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
1748
1749        return 0;
1750}
1751
1752static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1753{
1754        int ret;
1755
1756        /*
1757         * Bring the target up cleanly.
1758         *
1759         * The target may be in an undefined state with an AUX-powered Target
1760         * and a Host in WoW mode. If the Host crashes, loses power, or is
1761         * restarted (without unloading the driver) then the Target is left
1762         * (aux) powered and running. On a subsequent driver load, the Target
1763         * is in an unexpected state. We try to catch that here in order to
1764         * reset the Target and retry the probe.
1765         */
1766        if (cold_reset)
1767                ret = ath10k_pci_cold_reset(ar);
1768        else
1769                ret = ath10k_pci_warm_reset(ar);
1770
1771        if (ret) {
1772                ath10k_err(ar, "failed to reset target: %d\n", ret);
1773                goto err;
1774        }
1775
1776        ret = ath10k_pci_ce_init(ar);
1777        if (ret) {
1778                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1779                goto err;
1780        }
1781
1782        ret = ath10k_pci_wait_for_target_init(ar);
1783        if (ret) {
1784                ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
1785                goto err_ce;
1786        }
1787
1788        ret = ath10k_pci_init_config(ar);
1789        if (ret) {
1790                ath10k_err(ar, "failed to setup init config: %d\n", ret);
1791                goto err_ce;
1792        }
1793
1794        ret = ath10k_pci_wake_target_cpu(ar);
1795        if (ret) {
1796                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
1797                goto err_ce;
1798        }
1799
1800        return 0;
1801
1802err_ce:
1803        ath10k_pci_ce_deinit(ar);
1804        ath10k_pci_warm_reset(ar);
1805err:
1806        return ret;
1807}
1808
1809static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
1810{
1811        int i, ret;
1812
1813        /*
1814         * Sometime warm reset succeeds after retries.
1815         *
1816         * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
1817         * at first try.
1818         */
1819        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1820                ret = __ath10k_pci_hif_power_up(ar, false);
1821                if (ret == 0)
1822                        break;
1823
1824                ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
1825                            i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
1826        }
1827
1828        return ret;
1829}
1830
1831static int ath10k_pci_hif_power_up(struct ath10k *ar)
1832{
1833        int ret;
1834
1835        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
1836
1837        /*
1838         * Hardware CUS232 version 2 has some issues with cold reset and the
1839         * preferred (and safer) way to perform a device reset is through a
1840         * warm reset.
1841         *
1842         * Warm reset doesn't always work though so fall back to cold reset may
1843         * be necessary.
1844         */
1845        ret = ath10k_pci_hif_power_up_warm(ar);
1846        if (ret) {
1847                ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
1848                            ret);
1849
1850                if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1851                        return ret;
1852
1853                ath10k_warn(ar, "trying cold reset\n");
1854
1855                ret = __ath10k_pci_hif_power_up(ar, true);
1856                if (ret) {
1857                        ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
1858                                   ret);
1859                        return ret;
1860                }
1861        }
1862
1863        return 0;
1864}
1865
1866static void ath10k_pci_hif_power_down(struct ath10k *ar)
1867{
1868        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1869
1870        ath10k_pci_warm_reset(ar);
1871}
1872
1873#ifdef CONFIG_PM
1874
1875#define ATH10K_PCI_PM_CONTROL 0x44
1876
1877static int ath10k_pci_hif_suspend(struct ath10k *ar)
1878{
1879        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1880        struct pci_dev *pdev = ar_pci->pdev;
1881        u32 val;
1882
1883        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1884
1885        if ((val & 0x000000ff) != 0x3) {
1886                pci_save_state(pdev);
1887                pci_disable_device(pdev);
1888                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1889                                       (val & 0xffffff00) | 0x03);
1890        }
1891
1892        return 0;
1893}
1894
1895static int ath10k_pci_hif_resume(struct ath10k *ar)
1896{
1897        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1898        struct pci_dev *pdev = ar_pci->pdev;
1899        u32 val;
1900
1901        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1902
1903        if ((val & 0x000000ff) != 0) {
1904                pci_restore_state(pdev);
1905                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1906                                       val & 0xffffff00);
1907                /*
1908                 * Suspend/Resume resets the PCI configuration space,
1909                 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1910                 * to keep PCI Tx retries from interfering with C3 CPU state
1911                 */
1912                pci_read_config_dword(pdev, 0x40, &val);
1913
1914                if ((val & 0x0000ff00) != 0)
1915                        pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1916        }
1917
1918        return 0;
1919}
1920#endif
1921
1922static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1923        .tx_sg                  = ath10k_pci_hif_tx_sg,
1924        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1925        .start                  = ath10k_pci_hif_start,
1926        .stop                   = ath10k_pci_hif_stop,
1927        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1928        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1929        .send_complete_check    = ath10k_pci_hif_send_complete_check,
1930        .set_callbacks          = ath10k_pci_hif_set_callbacks,
1931        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1932        .power_up               = ath10k_pci_hif_power_up,
1933        .power_down             = ath10k_pci_hif_power_down,
1934#ifdef CONFIG_PM
1935        .suspend                = ath10k_pci_hif_suspend,
1936        .resume                 = ath10k_pci_hif_resume,
1937#endif
1938};
1939
1940static void ath10k_pci_ce_tasklet(unsigned long ptr)
1941{
1942        struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1943        struct ath10k_pci *ar_pci = pipe->ar_pci;
1944
1945        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1946}
1947
1948static void ath10k_msi_err_tasklet(unsigned long data)
1949{
1950        struct ath10k *ar = (struct ath10k *)data;
1951
1952        if (!ath10k_pci_has_fw_crashed(ar)) {
1953                ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
1954                return;
1955        }
1956
1957        ath10k_pci_fw_crashed_clear(ar);
1958        ath10k_pci_fw_crashed_dump(ar);
1959}
1960
1961/*
1962 * Handler for a per-engine interrupt on a PARTICULAR CE.
1963 * This is used in cases where each CE has a private MSI interrupt.
1964 */
1965static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1966{
1967        struct ath10k *ar = arg;
1968        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1969        int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1970
1971        if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1972                ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1973                            ce_id);
1974                return IRQ_HANDLED;
1975        }
1976
1977        /*
1978         * NOTE: We are able to derive ce_id from irq because we
1979         * use a one-to-one mapping for CE's 0..5.
1980         * CE's 6 & 7 do not use interrupts at all.
1981         *
1982         * This mapping must be kept in sync with the mapping
1983         * used by firmware.
1984         */
1985        tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1986        return IRQ_HANDLED;
1987}
1988
1989static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1990{
1991        struct ath10k *ar = arg;
1992        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1993
1994        tasklet_schedule(&ar_pci->msi_fw_err);
1995        return IRQ_HANDLED;
1996}
1997
1998/*
1999 * Top-level interrupt handler for all PCI interrupts from a Target.
2000 * When a block of MSI interrupts is allocated, this top-level handler
2001 * is not used; instead, we directly call the correct sub-handler.
2002 */
2003static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2004{
2005        struct ath10k *ar = arg;
2006        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2007
2008        if (ar_pci->num_msi_intrs == 0) {
2009                if (!ath10k_pci_irq_pending(ar))
2010                        return IRQ_NONE;
2011
2012                ath10k_pci_disable_and_clear_legacy_irq(ar);
2013        }
2014
2015        tasklet_schedule(&ar_pci->intr_tq);
2016
2017        return IRQ_HANDLED;
2018}
2019
2020static void ath10k_pci_tasklet(unsigned long data)
2021{
2022        struct ath10k *ar = (struct ath10k *)data;
2023        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2024
2025        if (ath10k_pci_has_fw_crashed(ar)) {
2026                ath10k_pci_fw_crashed_clear(ar);
2027                ath10k_pci_fw_crashed_dump(ar);
2028                return;
2029        }
2030
2031        ath10k_ce_per_engine_service_any(ar);
2032
2033        /* Re-enable legacy irq that was disabled in the irq handler */
2034        if (ar_pci->num_msi_intrs == 0)
2035                ath10k_pci_enable_legacy_irq(ar);
2036}
2037
2038static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2039{
2040        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2041        int ret, i;
2042
2043        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2044                          ath10k_pci_msi_fw_handler,
2045                          IRQF_SHARED, "ath10k_pci", ar);
2046        if (ret) {
2047                ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2048                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2049                return ret;
2050        }
2051
2052        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2053                ret = request_irq(ar_pci->pdev->irq + i,
2054                                  ath10k_pci_per_engine_handler,
2055                                  IRQF_SHARED, "ath10k_pci", ar);
2056                if (ret) {
2057                        ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2058                                    ar_pci->pdev->irq + i, ret);
2059
2060                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2061                                free_irq(ar_pci->pdev->irq + i, ar);
2062
2063                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2064                        return ret;
2065                }
2066        }
2067
2068        return 0;
2069}
2070
2071static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2072{
2073        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2074        int ret;
2075
2076        ret = request_irq(ar_pci->pdev->irq,
2077                          ath10k_pci_interrupt_handler,
2078                          IRQF_SHARED, "ath10k_pci", ar);
2079        if (ret) {
2080                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2081                            ar_pci->pdev->irq, ret);
2082                return ret;
2083        }
2084
2085        return 0;
2086}
2087
2088static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2089{
2090        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2091        int ret;
2092
2093        ret = request_irq(ar_pci->pdev->irq,
2094                          ath10k_pci_interrupt_handler,
2095                          IRQF_SHARED, "ath10k_pci", ar);
2096        if (ret) {
2097                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2098                            ar_pci->pdev->irq, ret);
2099                return ret;
2100        }
2101
2102        return 0;
2103}
2104
2105static int ath10k_pci_request_irq(struct ath10k *ar)
2106{
2107        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2108
2109        switch (ar_pci->num_msi_intrs) {
2110        case 0:
2111                return ath10k_pci_request_irq_legacy(ar);
2112        case 1:
2113                return ath10k_pci_request_irq_msi(ar);
2114        case MSI_NUM_REQUEST:
2115                return ath10k_pci_request_irq_msix(ar);
2116        }
2117
2118        ath10k_warn(ar, "unknown irq configuration upon request\n");
2119        return -EINVAL;
2120}
2121
2122static void ath10k_pci_free_irq(struct ath10k *ar)
2123{
2124        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2125        int i;
2126
2127        /* There's at least one interrupt irregardless whether its legacy INTR
2128         * or MSI or MSI-X */
2129        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2130                free_irq(ar_pci->pdev->irq + i, ar);
2131}
2132
2133static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2134{
2135        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2136        int i;
2137
2138        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2139        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2140                     (unsigned long)ar);
2141
2142        for (i = 0; i < CE_COUNT; i++) {
2143                ar_pci->pipe_info[i].ar_pci = ar_pci;
2144                tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2145                             (unsigned long)&ar_pci->pipe_info[i]);
2146        }
2147}
2148
2149static int ath10k_pci_init_irq(struct ath10k *ar)
2150{
2151        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2152        int ret;
2153
2154        ath10k_pci_init_irq_tasklets(ar);
2155
2156        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2157                ath10k_info(ar, "limiting irq mode to: %d\n",
2158                            ath10k_pci_irq_mode);
2159
2160        /* Try MSI-X */
2161        if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2162                ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2163                ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2164                                           ar_pci->num_msi_intrs);
2165                if (ret > 0)
2166                        return 0;
2167
2168                /* fall-through */
2169        }
2170
2171        /* Try MSI */
2172        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2173                ar_pci->num_msi_intrs = 1;
2174                ret = pci_enable_msi(ar_pci->pdev);
2175                if (ret == 0)
2176                        return 0;
2177
2178                /* fall-through */
2179        }
2180
2181        /* Try legacy irq
2182         *
2183         * A potential race occurs here: The CORE_BASE write
2184         * depends on target correctly decoding AXI address but
2185         * host won't know when target writes BAR to CORE_CTRL.
2186         * This write might get lost if target has NOT written BAR.
2187         * For now, fix the race by repeating the write in below
2188         * synchronization checking. */
2189        ar_pci->num_msi_intrs = 0;
2190
2191        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2192                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2193
2194        return 0;
2195}
2196
2197static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2198{
2199        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2200                           0);
2201}
2202
2203static int ath10k_pci_deinit_irq(struct ath10k *ar)
2204{
2205        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2206
2207        switch (ar_pci->num_msi_intrs) {
2208        case 0:
2209                ath10k_pci_deinit_irq_legacy(ar);
2210                return 0;
2211        case 1:
2212                /* fall-through */
2213        case MSI_NUM_REQUEST:
2214                pci_disable_msi(ar_pci->pdev);
2215                return 0;
2216        default:
2217                pci_disable_msi(ar_pci->pdev);
2218        }
2219
2220        ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2221        return -EINVAL;
2222}
2223
2224static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2225{
2226        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2227        unsigned long timeout;
2228        u32 val;
2229
2230        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2231
2232        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2233
2234        do {
2235                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2236
2237                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2238                           val);
2239
2240                /* target should never return this */
2241                if (val == 0xffffffff)
2242                        continue;
2243
2244                /* the device has crashed so don't bother trying anymore */
2245                if (val & FW_IND_EVENT_PENDING)
2246                        break;
2247
2248                if (val & FW_IND_INITIALIZED)
2249                        break;
2250
2251                if (ar_pci->num_msi_intrs == 0)
2252                        /* Fix potential race by repeating CORE_BASE writes */
2253                        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2254                                           PCIE_INTR_ENABLE_ADDRESS,
2255                                           PCIE_INTR_FIRMWARE_MASK |
2256                                           PCIE_INTR_CE_MASK_ALL);
2257
2258                mdelay(10);
2259        } while (time_before(jiffies, timeout));
2260
2261        if (val == 0xffffffff) {
2262                ath10k_err(ar, "failed to read device register, device is gone\n");
2263                return -EIO;
2264        }
2265
2266        if (val & FW_IND_EVENT_PENDING) {
2267                ath10k_warn(ar, "device has crashed during init\n");
2268                ath10k_pci_fw_crashed_clear(ar);
2269                ath10k_pci_fw_crashed_dump(ar);
2270                return -ECOMM;
2271        }
2272
2273        if (!(val & FW_IND_INITIALIZED)) {
2274                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2275                           val);
2276                return -ETIMEDOUT;
2277        }
2278
2279        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2280        return 0;
2281}
2282
2283static int ath10k_pci_cold_reset(struct ath10k *ar)
2284{
2285        int i;
2286        u32 val;
2287
2288        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2289
2290        /* Put Target, including PCIe, into RESET. */
2291        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2292        val |= 1;
2293        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2294
2295        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2296                if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2297                                          RTC_STATE_COLD_RESET_MASK)
2298                        break;
2299                msleep(1);
2300        }
2301
2302        /* Pull Target, including PCIe, out of RESET. */
2303        val &= ~1;
2304        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2305
2306        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2307                if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2308                                            RTC_STATE_COLD_RESET_MASK))
2309                        break;
2310                msleep(1);
2311        }
2312
2313        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2314
2315        return 0;
2316}
2317
2318static int ath10k_pci_claim(struct ath10k *ar)
2319{
2320        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2321        struct pci_dev *pdev = ar_pci->pdev;
2322        u32 lcr_val;
2323        int ret;
2324
2325        pci_set_drvdata(pdev, ar);
2326
2327        ret = pci_enable_device(pdev);
2328        if (ret) {
2329                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2330                return ret;
2331        }
2332
2333        ret = pci_request_region(pdev, BAR_NUM, "ath");
2334        if (ret) {
2335                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2336                           ret);
2337                goto err_device;
2338        }
2339
2340        /* Target expects 32 bit DMA. Enforce it. */
2341        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2342        if (ret) {
2343                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2344                goto err_region;
2345        }
2346
2347        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2348        if (ret) {
2349                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2350                           ret);
2351                goto err_region;
2352        }
2353
2354        pci_set_master(pdev);
2355
2356        /* Workaround: Disable ASPM */
2357        pci_read_config_dword(pdev, 0x80, &lcr_val);
2358        pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2359
2360        /* Arrange for access to Target SoC registers. */
2361        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2362        if (!ar_pci->mem) {
2363                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2364                ret = -EIO;
2365                goto err_master;
2366        }
2367
2368        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2369        return 0;
2370
2371err_master:
2372        pci_clear_master(pdev);
2373
2374err_region:
2375        pci_release_region(pdev, BAR_NUM);
2376
2377err_device:
2378        pci_disable_device(pdev);
2379
2380        return ret;
2381}
2382
2383static void ath10k_pci_release(struct ath10k *ar)
2384{
2385        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2386        struct pci_dev *pdev = ar_pci->pdev;
2387
2388        pci_iounmap(pdev, ar_pci->mem);
2389        pci_release_region(pdev, BAR_NUM);
2390        pci_clear_master(pdev);
2391        pci_disable_device(pdev);
2392}
2393
2394static int ath10k_pci_probe(struct pci_dev *pdev,
2395                            const struct pci_device_id *pci_dev)
2396{
2397        int ret = 0;
2398        struct ath10k *ar;
2399        struct ath10k_pci *ar_pci;
2400        u32 chip_id;
2401
2402        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2403                                &ath10k_pci_hif_ops);
2404        if (!ar) {
2405                dev_err(&pdev->dev, "failed to allocate core\n");
2406                return -ENOMEM;
2407        }
2408
2409        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2410
2411        ar_pci = ath10k_pci_priv(ar);
2412        ar_pci->pdev = pdev;
2413        ar_pci->dev = &pdev->dev;
2414        ar_pci->ar = ar;
2415
2416        spin_lock_init(&ar_pci->ce_lock);
2417        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2418                    (unsigned long)ar);
2419
2420        ret = ath10k_pci_claim(ar);
2421        if (ret) {
2422                ath10k_err(ar, "failed to claim device: %d\n", ret);
2423                goto err_core_destroy;
2424        }
2425
2426        ret = ath10k_pci_wake(ar);
2427        if (ret) {
2428                ath10k_err(ar, "failed to wake up: %d\n", ret);
2429                goto err_release;
2430        }
2431
2432        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2433        if (chip_id == 0xffffffff) {
2434                ath10k_err(ar, "failed to get chip id\n");
2435                goto err_sleep;
2436        }
2437
2438        ret = ath10k_pci_alloc_ce(ar);
2439        if (ret) {
2440                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2441                           ret);
2442                goto err_sleep;
2443        }
2444
2445        ath10k_pci_ce_deinit(ar);
2446
2447        ret = ath10k_ce_disable_interrupts(ar);
2448        if (ret) {
2449                ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
2450                           ret);
2451                goto err_free_ce;
2452        }
2453
2454        /* Workaround: There's no known way to mask all possible interrupts via
2455         * device CSR. The only way to make sure device doesn't assert
2456         * interrupts is to reset it. Interrupts are then disabled on host
2457         * after handlers are registered.
2458         */
2459        ath10k_pci_warm_reset(ar);
2460
2461        ret = ath10k_pci_init_irq(ar);
2462        if (ret) {
2463                ath10k_err(ar, "failed to init irqs: %d\n", ret);
2464                goto err_free_ce;
2465        }
2466
2467        ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2468                    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2469                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2470
2471        ret = ath10k_pci_request_irq(ar);
2472        if (ret) {
2473                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2474                goto err_deinit_irq;
2475        }
2476
2477        /* This shouldn't race as the device has been reset above. */
2478        ath10k_pci_irq_disable(ar);
2479
2480        ret = ath10k_core_register(ar, chip_id);
2481        if (ret) {
2482                ath10k_err(ar, "failed to register driver core: %d\n", ret);
2483                goto err_free_irq;
2484        }
2485
2486        return 0;
2487
2488err_free_irq:
2489        ath10k_pci_free_irq(ar);
2490        ath10k_pci_kill_tasklet(ar);
2491
2492err_deinit_irq:
2493        ath10k_pci_deinit_irq(ar);
2494
2495err_free_ce:
2496        ath10k_pci_free_ce(ar);
2497
2498err_sleep:
2499        ath10k_pci_sleep(ar);
2500
2501err_release:
2502        ath10k_pci_release(ar);
2503
2504err_core_destroy:
2505        ath10k_core_destroy(ar);
2506
2507        return ret;
2508}
2509
2510static void ath10k_pci_remove(struct pci_dev *pdev)
2511{
2512        struct ath10k *ar = pci_get_drvdata(pdev);
2513        struct ath10k_pci *ar_pci;
2514
2515        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2516
2517        if (!ar)
2518                return;
2519
2520        ar_pci = ath10k_pci_priv(ar);
2521
2522        if (!ar_pci)
2523                return;
2524
2525        ath10k_core_unregister(ar);
2526        ath10k_pci_free_irq(ar);
2527        ath10k_pci_kill_tasklet(ar);
2528        ath10k_pci_deinit_irq(ar);
2529        ath10k_pci_ce_deinit(ar);
2530        ath10k_pci_free_ce(ar);
2531        ath10k_pci_sleep(ar);
2532        ath10k_pci_release(ar);
2533        ath10k_core_destroy(ar);
2534}
2535
2536MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2537
2538static struct pci_driver ath10k_pci_driver = {
2539        .name = "ath10k_pci",
2540        .id_table = ath10k_pci_id_table,
2541        .probe = ath10k_pci_probe,
2542        .remove = ath10k_pci_remove,
2543};
2544
2545static int __init ath10k_pci_init(void)
2546{
2547        int ret;
2548
2549        ret = pci_register_driver(&ath10k_pci_driver);
2550        if (ret)
2551                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2552                       ret);
2553
2554        return ret;
2555}
2556module_init(ath10k_pci_init);
2557
2558static void __exit ath10k_pci_exit(void)
2559{
2560        pci_unregister_driver(&ath10k_pci_driver);
2561}
2562
2563module_exit(ath10k_pci_exit);
2564
2565MODULE_AUTHOR("Qualcomm Atheros");
2566MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2567MODULE_LICENSE("Dual BSD/GPL");
2568MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
2569MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2570