linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_irq_mode {
  37        ATH10K_PCI_IRQ_AUTO = 0,
  38        ATH10K_PCI_IRQ_LEGACY = 1,
  39        ATH10K_PCI_IRQ_MSI = 2,
  40};
  41
  42enum ath10k_pci_reset_mode {
  43        ATH10K_PCI_RESET_AUTO = 0,
  44        ATH10K_PCI_RESET_WARM_ONLY = 1,
  45};
  46
  47static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  48static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  49
  50module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  52
  53module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  55
  56/* how long wait to wait for target to initialise, in ms */
  57#define ATH10K_PCI_TARGET_WAIT 3000
  58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  59
  60#define QCA988X_2_0_DEVICE_ID   (0x003c)
  61#define QCA6174_2_1_DEVICE_ID   (0x003e)
  62
  63static const struct pci_device_id ath10k_pci_id_table[] = {
  64        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  65        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  66        {0}
  67};
  68
  69static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  70        /* QCA988X pre 2.0 chips are not supported because they need some nasty
  71         * hacks. ath10k doesn't have them and these devices crash horribly
  72         * because of that.
  73         */
  74        { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  75        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  76        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  77        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  78        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  79        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  80};
  81
  82static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  83static int ath10k_pci_cold_reset(struct ath10k *ar);
  84static int ath10k_pci_warm_reset(struct ath10k *ar);
  85static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  86static int ath10k_pci_init_irq(struct ath10k *ar);
  87static int ath10k_pci_deinit_irq(struct ath10k *ar);
  88static int ath10k_pci_request_irq(struct ath10k *ar);
  89static void ath10k_pci_free_irq(struct ath10k *ar);
  90static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  91                               struct ath10k_ce_pipe *rx_pipe,
  92                               struct bmi_xfer *xfer);
  93
  94static const struct ce_attr host_ce_config_wlan[] = {
  95        /* CE0: host->target HTC control and raw streams */
  96        {
  97                .flags = CE_ATTR_FLAGS,
  98                .src_nentries = 16,
  99                .src_sz_max = 256,
 100                .dest_nentries = 0,
 101        },
 102
 103        /* CE1: target->host HTT + HTC control */
 104        {
 105                .flags = CE_ATTR_FLAGS,
 106                .src_nentries = 0,
 107                .src_sz_max = 512,
 108                .dest_nentries = 512,
 109        },
 110
 111        /* CE2: target->host WMI */
 112        {
 113                .flags = CE_ATTR_FLAGS,
 114                .src_nentries = 0,
 115                .src_sz_max = 2048,
 116                .dest_nentries = 32,
 117        },
 118
 119        /* CE3: host->target WMI */
 120        {
 121                .flags = CE_ATTR_FLAGS,
 122                .src_nentries = 32,
 123                .src_sz_max = 2048,
 124                .dest_nentries = 0,
 125        },
 126
 127        /* CE4: host->target HTT */
 128        {
 129                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 130                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 131                .src_sz_max = 256,
 132                .dest_nentries = 0,
 133        },
 134
 135        /* CE5: unused */
 136        {
 137                .flags = CE_ATTR_FLAGS,
 138                .src_nentries = 0,
 139                .src_sz_max = 0,
 140                .dest_nentries = 0,
 141        },
 142
 143        /* CE6: target autonomous hif_memcpy */
 144        {
 145                .flags = CE_ATTR_FLAGS,
 146                .src_nentries = 0,
 147                .src_sz_max = 0,
 148                .dest_nentries = 0,
 149        },
 150
 151        /* CE7: ce_diag, the Diagnostic Window */
 152        {
 153                .flags = CE_ATTR_FLAGS,
 154                .src_nentries = 2,
 155                .src_sz_max = DIAG_TRANSFER_LIMIT,
 156                .dest_nentries = 2,
 157        },
 158};
 159
 160/* Target firmware's Copy Engine configuration. */
 161static const struct ce_pipe_config target_ce_config_wlan[] = {
 162        /* CE0: host->target HTC control and raw streams */
 163        {
 164                .pipenum = __cpu_to_le32(0),
 165                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 166                .nentries = __cpu_to_le32(32),
 167                .nbytes_max = __cpu_to_le32(256),
 168                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 169                .reserved = __cpu_to_le32(0),
 170        },
 171
 172        /* CE1: target->host HTT + HTC control */
 173        {
 174                .pipenum = __cpu_to_le32(1),
 175                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 176                .nentries = __cpu_to_le32(32),
 177                .nbytes_max = __cpu_to_le32(512),
 178                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 179                .reserved = __cpu_to_le32(0),
 180        },
 181
 182        /* CE2: target->host WMI */
 183        {
 184                .pipenum = __cpu_to_le32(2),
 185                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 186                .nentries = __cpu_to_le32(32),
 187                .nbytes_max = __cpu_to_le32(2048),
 188                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 189                .reserved = __cpu_to_le32(0),
 190        },
 191
 192        /* CE3: host->target WMI */
 193        {
 194                .pipenum = __cpu_to_le32(3),
 195                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 196                .nentries = __cpu_to_le32(32),
 197                .nbytes_max = __cpu_to_le32(2048),
 198                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 199                .reserved = __cpu_to_le32(0),
 200        },
 201
 202        /* CE4: host->target HTT */
 203        {
 204                .pipenum = __cpu_to_le32(4),
 205                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 206                .nentries = __cpu_to_le32(256),
 207                .nbytes_max = __cpu_to_le32(256),
 208                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 209                .reserved = __cpu_to_le32(0),
 210        },
 211
 212        /* NB: 50% of src nentries, since tx has 2 frags */
 213
 214        /* CE5: unused */
 215        {
 216                .pipenum = __cpu_to_le32(5),
 217                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 218                .nentries = __cpu_to_le32(32),
 219                .nbytes_max = __cpu_to_le32(2048),
 220                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 221                .reserved = __cpu_to_le32(0),
 222        },
 223
 224        /* CE6: Reserved for target autonomous hif_memcpy */
 225        {
 226                .pipenum = __cpu_to_le32(6),
 227                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 228                .nentries = __cpu_to_le32(32),
 229                .nbytes_max = __cpu_to_le32(4096),
 230                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 231                .reserved = __cpu_to_le32(0),
 232        },
 233
 234        /* CE7 used only by Host */
 235};
 236
 237/*
 238 * Map from service/endpoint to Copy Engine.
 239 * This table is derived from the CE_PCI TABLE, above.
 240 * It is passed to the Target at startup for use by firmware.
 241 */
 242static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
 243        {
 244                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 245                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 246                __cpu_to_le32(3),
 247        },
 248        {
 249                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 250                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 251                __cpu_to_le32(2),
 252        },
 253        {
 254                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 255                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 256                __cpu_to_le32(3),
 257        },
 258        {
 259                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 260                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 261                __cpu_to_le32(2),
 262        },
 263        {
 264                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 265                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 266                __cpu_to_le32(3),
 267        },
 268        {
 269                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 270                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 271                __cpu_to_le32(2),
 272        },
 273        {
 274                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 275                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 276                __cpu_to_le32(3),
 277        },
 278        {
 279                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 280                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 281                __cpu_to_le32(2),
 282        },
 283        {
 284                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 285                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 286                __cpu_to_le32(3),
 287        },
 288        {
 289                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 290                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 291                __cpu_to_le32(2),
 292        },
 293        {
 294                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 295                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 296                __cpu_to_le32(0),
 297        },
 298        {
 299                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 300                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 301                __cpu_to_le32(1),
 302        },
 303        { /* not used */
 304                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 305                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 306                __cpu_to_le32(0),
 307        },
 308        { /* not used */
 309                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 310                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 311                __cpu_to_le32(1),
 312        },
 313        {
 314                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 315                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 316                __cpu_to_le32(4),
 317        },
 318        {
 319                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 320                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 321                __cpu_to_le32(1),
 322        },
 323
 324        /* (Additions here) */
 325
 326        { /* must be last */
 327                __cpu_to_le32(0),
 328                __cpu_to_le32(0),
 329                __cpu_to_le32(0),
 330        },
 331};
 332
 333static bool ath10k_pci_irq_pending(struct ath10k *ar)
 334{
 335        u32 cause;
 336
 337        /* Check if the shared legacy irq is for us */
 338        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 339                                  PCIE_INTR_CAUSE_ADDRESS);
 340        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 341                return true;
 342
 343        return false;
 344}
 345
 346static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 347{
 348        /* IMPORTANT: INTR_CLR register has to be set after
 349         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 350         * really cleared. */
 351        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 352                           0);
 353        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 354                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 355
 356        /* IMPORTANT: this extra read transaction is required to
 357         * flush the posted write buffer. */
 358        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 359                                PCIE_INTR_ENABLE_ADDRESS);
 360}
 361
 362static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 363{
 364        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 365                           PCIE_INTR_ENABLE_ADDRESS,
 366                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 367
 368        /* IMPORTANT: this extra read transaction is required to
 369         * flush the posted write buffer. */
 370        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 371                                PCIE_INTR_ENABLE_ADDRESS);
 372}
 373
 374static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 375{
 376        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 377
 378        if (ar_pci->num_msi_intrs > 1)
 379                return "msi-x";
 380
 381        if (ar_pci->num_msi_intrs == 1)
 382                return "msi";
 383
 384        return "legacy";
 385}
 386
 387static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 388{
 389        struct ath10k *ar = pipe->hif_ce_state;
 390        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 391        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 392        struct sk_buff *skb;
 393        dma_addr_t paddr;
 394        int ret;
 395
 396        lockdep_assert_held(&ar_pci->ce_lock);
 397
 398        skb = dev_alloc_skb(pipe->buf_sz);
 399        if (!skb)
 400                return -ENOMEM;
 401
 402        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 403
 404        paddr = dma_map_single(ar->dev, skb->data,
 405                               skb->len + skb_tailroom(skb),
 406                               DMA_FROM_DEVICE);
 407        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 408                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 409                dev_kfree_skb_any(skb);
 410                return -EIO;
 411        }
 412
 413        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 414
 415        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 416        if (ret) {
 417                ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 418                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 419                                 DMA_FROM_DEVICE);
 420                dev_kfree_skb_any(skb);
 421                return ret;
 422        }
 423
 424        return 0;
 425}
 426
 427static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 428{
 429        struct ath10k *ar = pipe->hif_ce_state;
 430        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 431        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 432        int ret, num;
 433
 434        lockdep_assert_held(&ar_pci->ce_lock);
 435
 436        if (pipe->buf_sz == 0)
 437                return;
 438
 439        if (!ce_pipe->dest_ring)
 440                return;
 441
 442        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 443        while (num--) {
 444                ret = __ath10k_pci_rx_post_buf(pipe);
 445                if (ret) {
 446                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 447                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 448                                  ATH10K_PCI_RX_POST_RETRY_MS);
 449                        break;
 450                }
 451        }
 452}
 453
 454static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 455{
 456        struct ath10k *ar = pipe->hif_ce_state;
 457        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 458
 459        spin_lock_bh(&ar_pci->ce_lock);
 460        __ath10k_pci_rx_post_pipe(pipe);
 461        spin_unlock_bh(&ar_pci->ce_lock);
 462}
 463
 464static void ath10k_pci_rx_post(struct ath10k *ar)
 465{
 466        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 467        int i;
 468
 469        spin_lock_bh(&ar_pci->ce_lock);
 470        for (i = 0; i < CE_COUNT; i++)
 471                __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 472        spin_unlock_bh(&ar_pci->ce_lock);
 473}
 474
 475static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 476{
 477        struct ath10k *ar = (void *)ptr;
 478
 479        ath10k_pci_rx_post(ar);
 480}
 481
 482/*
 483 * Diagnostic read/write access is provided for startup/config/debug usage.
 484 * Caller must guarantee proper alignment, when applicable, and single user
 485 * at any moment.
 486 */
 487static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 488                                    int nbytes)
 489{
 490        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 491        int ret = 0;
 492        u32 buf;
 493        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 494        unsigned int id;
 495        unsigned int flags;
 496        struct ath10k_ce_pipe *ce_diag;
 497        /* Host buffer address in CE space */
 498        u32 ce_data;
 499        dma_addr_t ce_data_base = 0;
 500        void *data_buf = NULL;
 501        int i;
 502
 503        spin_lock_bh(&ar_pci->ce_lock);
 504
 505        ce_diag = ar_pci->ce_diag;
 506
 507        /*
 508         * Allocate a temporary bounce buffer to hold caller's data
 509         * to be DMA'ed from Target. This guarantees
 510         *   1) 4-byte alignment
 511         *   2) Buffer in DMA-able space
 512         */
 513        orig_nbytes = nbytes;
 514        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 515                                                       orig_nbytes,
 516                                                       &ce_data_base,
 517                                                       GFP_ATOMIC);
 518
 519        if (!data_buf) {
 520                ret = -ENOMEM;
 521                goto done;
 522        }
 523        memset(data_buf, 0, orig_nbytes);
 524
 525        remaining_bytes = orig_nbytes;
 526        ce_data = ce_data_base;
 527        while (remaining_bytes) {
 528                nbytes = min_t(unsigned int, remaining_bytes,
 529                               DIAG_TRANSFER_LIMIT);
 530
 531                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
 532                if (ret != 0)
 533                        goto done;
 534
 535                /* Request CE to send from Target(!) address to Host buffer */
 536                /*
 537                 * The address supplied by the caller is in the
 538                 * Target CPU virtual address space.
 539                 *
 540                 * In order to use this address with the diagnostic CE,
 541                 * convert it from Target CPU virtual address space
 542                 * to CE address space
 543                 */
 544                address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
 545                                                     address);
 546
 547                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 548                                            0);
 549                if (ret)
 550                        goto done;
 551
 552                i = 0;
 553                while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
 554                                                            &completed_nbytes,
 555                                                            &id) != 0) {
 556                        mdelay(1);
 557                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 558                                ret = -EBUSY;
 559                                goto done;
 560                        }
 561                }
 562
 563                if (nbytes != completed_nbytes) {
 564                        ret = -EIO;
 565                        goto done;
 566                }
 567
 568                if (buf != (u32)address) {
 569                        ret = -EIO;
 570                        goto done;
 571                }
 572
 573                i = 0;
 574                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 575                                                            &completed_nbytes,
 576                                                            &id, &flags) != 0) {
 577                        mdelay(1);
 578
 579                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 580                                ret = -EBUSY;
 581                                goto done;
 582                        }
 583                }
 584
 585                if (nbytes != completed_nbytes) {
 586                        ret = -EIO;
 587                        goto done;
 588                }
 589
 590                if (buf != ce_data) {
 591                        ret = -EIO;
 592                        goto done;
 593                }
 594
 595                remaining_bytes -= nbytes;
 596                address += nbytes;
 597                ce_data += nbytes;
 598        }
 599
 600done:
 601        if (ret == 0)
 602                memcpy(data, data_buf, orig_nbytes);
 603        else
 604                ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 605                            address, ret);
 606
 607        if (data_buf)
 608                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 609                                  ce_data_base);
 610
 611        spin_unlock_bh(&ar_pci->ce_lock);
 612
 613        return ret;
 614}
 615
 616static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 617{
 618        __le32 val = 0;
 619        int ret;
 620
 621        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 622        *value = __le32_to_cpu(val);
 623
 624        return ret;
 625}
 626
 627static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
 628                                     u32 src, u32 len)
 629{
 630        u32 host_addr, addr;
 631        int ret;
 632
 633        host_addr = host_interest_item_address(src);
 634
 635        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
 636        if (ret != 0) {
 637                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
 638                            src, ret);
 639                return ret;
 640        }
 641
 642        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
 643        if (ret != 0) {
 644                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
 645                            addr, len, ret);
 646                return ret;
 647        }
 648
 649        return 0;
 650}
 651
 652#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
 653        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
 654
 655static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 656                                     const void *data, int nbytes)
 657{
 658        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 659        int ret = 0;
 660        u32 buf;
 661        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 662        unsigned int id;
 663        unsigned int flags;
 664        struct ath10k_ce_pipe *ce_diag;
 665        void *data_buf = NULL;
 666        u32 ce_data;    /* Host buffer address in CE space */
 667        dma_addr_t ce_data_base = 0;
 668        int i;
 669
 670        spin_lock_bh(&ar_pci->ce_lock);
 671
 672        ce_diag = ar_pci->ce_diag;
 673
 674        /*
 675         * Allocate a temporary bounce buffer to hold caller's data
 676         * to be DMA'ed to Target. This guarantees
 677         *   1) 4-byte alignment
 678         *   2) Buffer in DMA-able space
 679         */
 680        orig_nbytes = nbytes;
 681        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 682                                                       orig_nbytes,
 683                                                       &ce_data_base,
 684                                                       GFP_ATOMIC);
 685        if (!data_buf) {
 686                ret = -ENOMEM;
 687                goto done;
 688        }
 689
 690        /* Copy caller's data to allocated DMA buf */
 691        memcpy(data_buf, data, orig_nbytes);
 692
 693        /*
 694         * The address supplied by the caller is in the
 695         * Target CPU virtual address space.
 696         *
 697         * In order to use this address with the diagnostic CE,
 698         * convert it from
 699         *    Target CPU virtual address space
 700         * to
 701         *    CE address space
 702         */
 703        address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
 704
 705        remaining_bytes = orig_nbytes;
 706        ce_data = ce_data_base;
 707        while (remaining_bytes) {
 708                /* FIXME: check cast */
 709                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 710
 711                /* Set up to receive directly into Target(!) address */
 712                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
 713                if (ret != 0)
 714                        goto done;
 715
 716                /*
 717                 * Request CE to send caller-supplied data that
 718                 * was copied to bounce buffer to Target(!) address.
 719                 */
 720                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
 721                                            nbytes, 0, 0);
 722                if (ret != 0)
 723                        goto done;
 724
 725                i = 0;
 726                while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
 727                                                            &completed_nbytes,
 728                                                            &id) != 0) {
 729                        mdelay(1);
 730
 731                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 732                                ret = -EBUSY;
 733                                goto done;
 734                        }
 735                }
 736
 737                if (nbytes != completed_nbytes) {
 738                        ret = -EIO;
 739                        goto done;
 740                }
 741
 742                if (buf != ce_data) {
 743                        ret = -EIO;
 744                        goto done;
 745                }
 746
 747                i = 0;
 748                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 749                                                            &completed_nbytes,
 750                                                            &id, &flags) != 0) {
 751                        mdelay(1);
 752
 753                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 754                                ret = -EBUSY;
 755                                goto done;
 756                        }
 757                }
 758
 759                if (nbytes != completed_nbytes) {
 760                        ret = -EIO;
 761                        goto done;
 762                }
 763
 764                if (buf != address) {
 765                        ret = -EIO;
 766                        goto done;
 767                }
 768
 769                remaining_bytes -= nbytes;
 770                address += nbytes;
 771                ce_data += nbytes;
 772        }
 773
 774done:
 775        if (data_buf) {
 776                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 777                                  ce_data_base);
 778        }
 779
 780        if (ret != 0)
 781                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
 782                            address, ret);
 783
 784        spin_unlock_bh(&ar_pci->ce_lock);
 785
 786        return ret;
 787}
 788
 789static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
 790{
 791        __le32 val = __cpu_to_le32(value);
 792
 793        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
 794}
 795
 796static bool ath10k_pci_is_awake(struct ath10k *ar)
 797{
 798        u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
 799
 800        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 801}
 802
 803static int ath10k_pci_wake_wait(struct ath10k *ar)
 804{
 805        int tot_delay = 0;
 806        int curr_delay = 5;
 807
 808        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 809                if (ath10k_pci_is_awake(ar))
 810                        return 0;
 811
 812                udelay(curr_delay);
 813                tot_delay += curr_delay;
 814
 815                if (curr_delay < 50)
 816                        curr_delay += 5;
 817        }
 818
 819        return -ETIMEDOUT;
 820}
 821
 822static int ath10k_pci_wake(struct ath10k *ar)
 823{
 824        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 825                               PCIE_SOC_WAKE_V_MASK);
 826        return ath10k_pci_wake_wait(ar);
 827}
 828
 829static void ath10k_pci_sleep(struct ath10k *ar)
 830{
 831        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 832                               PCIE_SOC_WAKE_RESET);
 833}
 834
 835/* Called by lower (CE) layer when a send to Target completes. */
 836static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 837{
 838        struct ath10k *ar = ce_state->ar;
 839        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 840        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 841        struct sk_buff_head list;
 842        struct sk_buff *skb;
 843        u32 ce_data;
 844        unsigned int nbytes;
 845        unsigned int transfer_id;
 846
 847        __skb_queue_head_init(&list);
 848        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
 849                                             &nbytes, &transfer_id) == 0) {
 850                /* no need to call tx completion for NULL pointers */
 851                if (skb == NULL)
 852                        continue;
 853
 854                __skb_queue_tail(&list, skb);
 855        }
 856
 857        while ((skb = __skb_dequeue(&list)))
 858                cb->tx_completion(ar, skb);
 859}
 860
 861/* Called by lower (CE) layer when data is received from the Target. */
 862static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 863{
 864        struct ath10k *ar = ce_state->ar;
 865        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 866        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 867        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 868        struct sk_buff *skb;
 869        struct sk_buff_head list;
 870        void *transfer_context;
 871        u32 ce_data;
 872        unsigned int nbytes, max_nbytes;
 873        unsigned int transfer_id;
 874        unsigned int flags;
 875
 876        __skb_queue_head_init(&list);
 877        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
 878                                             &ce_data, &nbytes, &transfer_id,
 879                                             &flags) == 0) {
 880                skb = transfer_context;
 881                max_nbytes = skb->len + skb_tailroom(skb);
 882                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
 883                                 max_nbytes, DMA_FROM_DEVICE);
 884
 885                if (unlikely(max_nbytes < nbytes)) {
 886                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
 887                                    nbytes, max_nbytes);
 888                        dev_kfree_skb_any(skb);
 889                        continue;
 890                }
 891
 892                skb_put(skb, nbytes);
 893                __skb_queue_tail(&list, skb);
 894        }
 895
 896        while ((skb = __skb_dequeue(&list))) {
 897                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
 898                           ce_state->id, skb->len);
 899                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
 900                                skb->data, skb->len);
 901
 902                cb->rx_completion(ar, skb);
 903        }
 904
 905        ath10k_pci_rx_post_pipe(pipe_info);
 906}
 907
 908static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
 909                                struct ath10k_hif_sg_item *items, int n_items)
 910{
 911        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 912        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
 913        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
 914        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
 915        unsigned int nentries_mask;
 916        unsigned int sw_index;
 917        unsigned int write_index;
 918        int err, i = 0;
 919
 920        spin_lock_bh(&ar_pci->ce_lock);
 921
 922        nentries_mask = src_ring->nentries_mask;
 923        sw_index = src_ring->sw_index;
 924        write_index = src_ring->write_index;
 925
 926        if (unlikely(CE_RING_DELTA(nentries_mask,
 927                                   write_index, sw_index - 1) < n_items)) {
 928                err = -ENOBUFS;
 929                goto err;
 930        }
 931
 932        for (i = 0; i < n_items - 1; i++) {
 933                ath10k_dbg(ar, ATH10K_DBG_PCI,
 934                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 935                           i, items[i].paddr, items[i].len, n_items);
 936                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 937                                items[i].vaddr, items[i].len);
 938
 939                err = ath10k_ce_send_nolock(ce_pipe,
 940                                            items[i].transfer_context,
 941                                            items[i].paddr,
 942                                            items[i].len,
 943                                            items[i].transfer_id,
 944                                            CE_SEND_FLAG_GATHER);
 945                if (err)
 946                        goto err;
 947        }
 948
 949        /* `i` is equal to `n_items -1` after for() */
 950
 951        ath10k_dbg(ar, ATH10K_DBG_PCI,
 952                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 953                   i, items[i].paddr, items[i].len, n_items);
 954        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 955                        items[i].vaddr, items[i].len);
 956
 957        err = ath10k_ce_send_nolock(ce_pipe,
 958                                    items[i].transfer_context,
 959                                    items[i].paddr,
 960                                    items[i].len,
 961                                    items[i].transfer_id,
 962                                    0);
 963        if (err)
 964                goto err;
 965
 966        spin_unlock_bh(&ar_pci->ce_lock);
 967        return 0;
 968
 969err:
 970        for (; i > 0; i--)
 971                __ath10k_ce_send_revert(ce_pipe);
 972
 973        spin_unlock_bh(&ar_pci->ce_lock);
 974        return err;
 975}
 976
 977static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
 978                                    size_t buf_len)
 979{
 980        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
 981}
 982
 983static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 984{
 985        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 986
 987        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
 988
 989        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 990}
 991
 992static void ath10k_pci_dump_registers(struct ath10k *ar,
 993                                      struct ath10k_fw_crash_data *crash_data)
 994{
 995        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
 996        int i, ret;
 997
 998        lockdep_assert_held(&ar->data_lock);
 999
1000        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1001                                      hi_failure_state,
1002                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1003        if (ret) {
1004                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1005                return;
1006        }
1007
1008        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1009
1010        ath10k_err(ar, "firmware register dump:\n");
1011        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1012                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1013                           i,
1014                           __le32_to_cpu(reg_dump_values[i]),
1015                           __le32_to_cpu(reg_dump_values[i + 1]),
1016                           __le32_to_cpu(reg_dump_values[i + 2]),
1017                           __le32_to_cpu(reg_dump_values[i + 3]));
1018
1019        if (!crash_data)
1020                return;
1021
1022        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1023                crash_data->registers[i] = reg_dump_values[i];
1024}
1025
1026static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1027{
1028        struct ath10k_fw_crash_data *crash_data;
1029        char uuid[50];
1030
1031        spin_lock_bh(&ar->data_lock);
1032
1033        ar->stats.fw_crash_counter++;
1034
1035        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1036
1037        if (crash_data)
1038                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1039        else
1040                scnprintf(uuid, sizeof(uuid), "n/a");
1041
1042        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1043        ath10k_print_driver_info(ar);
1044        ath10k_pci_dump_registers(ar, crash_data);
1045
1046        spin_unlock_bh(&ar->data_lock);
1047
1048        queue_work(ar->workqueue, &ar->restart_work);
1049}
1050
1051static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1052                                               int force)
1053{
1054        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1055
1056        if (!force) {
1057                int resources;
1058                /*
1059                 * Decide whether to actually poll for completions, or just
1060                 * wait for a later chance.
1061                 * If there seem to be plenty of resources left, then just wait
1062                 * since checking involves reading a CE register, which is a
1063                 * relatively expensive operation.
1064                 */
1065                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1066
1067                /*
1068                 * If at least 50% of the total resources are still available,
1069                 * don't bother checking again yet.
1070                 */
1071                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1072                        return;
1073        }
1074        ath10k_ce_per_engine_service(ar, pipe);
1075}
1076
1077static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1078                                         struct ath10k_hif_cb *callbacks)
1079{
1080        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1081
1082        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1083
1084        memcpy(&ar_pci->msg_callbacks_current, callbacks,
1085               sizeof(ar_pci->msg_callbacks_current));
1086}
1087
1088static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1089{
1090        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1091        int i;
1092
1093        tasklet_kill(&ar_pci->intr_tq);
1094        tasklet_kill(&ar_pci->msi_fw_err);
1095
1096        for (i = 0; i < CE_COUNT; i++)
1097                tasklet_kill(&ar_pci->pipe_info[i].intr);
1098
1099        del_timer_sync(&ar_pci->rx_post_retry);
1100}
1101
1102static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1103                                              u16 service_id, u8 *ul_pipe,
1104                                              u8 *dl_pipe, int *ul_is_polled,
1105                                              int *dl_is_polled)
1106{
1107        const struct service_to_pipe *entry;
1108        bool ul_set = false, dl_set = false;
1109        int i;
1110
1111        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1112
1113        /* polling for received messages not supported */
1114        *dl_is_polled = 0;
1115
1116        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1117                entry = &target_service_to_ce_map_wlan[i];
1118
1119                if (__le32_to_cpu(entry->service_id) != service_id)
1120                        continue;
1121
1122                switch (__le32_to_cpu(entry->pipedir)) {
1123                case PIPEDIR_NONE:
1124                        break;
1125                case PIPEDIR_IN:
1126                        WARN_ON(dl_set);
1127                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1128                        dl_set = true;
1129                        break;
1130                case PIPEDIR_OUT:
1131                        WARN_ON(ul_set);
1132                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1133                        ul_set = true;
1134                        break;
1135                case PIPEDIR_INOUT:
1136                        WARN_ON(dl_set);
1137                        WARN_ON(ul_set);
1138                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1139                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1140                        dl_set = true;
1141                        ul_set = true;
1142                        break;
1143                }
1144        }
1145
1146        if (WARN_ON(!ul_set || !dl_set))
1147                return -ENOENT;
1148
1149        *ul_is_polled =
1150                (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1151
1152        return 0;
1153}
1154
1155static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1156                                            u8 *ul_pipe, u8 *dl_pipe)
1157{
1158        int ul_is_polled, dl_is_polled;
1159
1160        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1161
1162        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1163                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1164                                                 ul_pipe,
1165                                                 dl_pipe,
1166                                                 &ul_is_polled,
1167                                                 &dl_is_polled);
1168}
1169
1170static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1171{
1172        u32 val;
1173
1174        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1175        val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1176
1177        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1178}
1179
1180static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1181{
1182        u32 val;
1183
1184        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1185        val |= CORE_CTRL_PCIE_REG_31_MASK;
1186
1187        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1188}
1189
1190static void ath10k_pci_irq_disable(struct ath10k *ar)
1191{
1192        ath10k_ce_disable_interrupts(ar);
1193        ath10k_pci_disable_and_clear_legacy_irq(ar);
1194        ath10k_pci_irq_msi_fw_mask(ar);
1195}
1196
1197static void ath10k_pci_irq_sync(struct ath10k *ar)
1198{
1199        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1200        int i;
1201
1202        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1203                synchronize_irq(ar_pci->pdev->irq + i);
1204}
1205
1206static void ath10k_pci_irq_enable(struct ath10k *ar)
1207{
1208        ath10k_ce_enable_interrupts(ar);
1209        ath10k_pci_enable_legacy_irq(ar);
1210        ath10k_pci_irq_msi_fw_unmask(ar);
1211}
1212
1213static int ath10k_pci_hif_start(struct ath10k *ar)
1214{
1215        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1216
1217        ath10k_pci_irq_enable(ar);
1218        ath10k_pci_rx_post(ar);
1219
1220        return 0;
1221}
1222
1223static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1224{
1225        struct ath10k *ar;
1226        struct ath10k_ce_pipe *ce_pipe;
1227        struct ath10k_ce_ring *ce_ring;
1228        struct sk_buff *skb;
1229        int i;
1230
1231        ar = pci_pipe->hif_ce_state;
1232        ce_pipe = pci_pipe->ce_hdl;
1233        ce_ring = ce_pipe->dest_ring;
1234
1235        if (!ce_ring)
1236                return;
1237
1238        if (!pci_pipe->buf_sz)
1239                return;
1240
1241        for (i = 0; i < ce_ring->nentries; i++) {
1242                skb = ce_ring->per_transfer_context[i];
1243                if (!skb)
1244                        continue;
1245
1246                ce_ring->per_transfer_context[i] = NULL;
1247
1248                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1249                                 skb->len + skb_tailroom(skb),
1250                                 DMA_FROM_DEVICE);
1251                dev_kfree_skb_any(skb);
1252        }
1253}
1254
1255static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1256{
1257        struct ath10k *ar;
1258        struct ath10k_pci *ar_pci;
1259        struct ath10k_ce_pipe *ce_pipe;
1260        struct ath10k_ce_ring *ce_ring;
1261        struct ce_desc *ce_desc;
1262        struct sk_buff *skb;
1263        unsigned int id;
1264        int i;
1265
1266        ar = pci_pipe->hif_ce_state;
1267        ar_pci = ath10k_pci_priv(ar);
1268        ce_pipe = pci_pipe->ce_hdl;
1269        ce_ring = ce_pipe->src_ring;
1270
1271        if (!ce_ring)
1272                return;
1273
1274        if (!pci_pipe->buf_sz)
1275                return;
1276
1277        ce_desc = ce_ring->shadow_base;
1278        if (WARN_ON(!ce_desc))
1279                return;
1280
1281        for (i = 0; i < ce_ring->nentries; i++) {
1282                skb = ce_ring->per_transfer_context[i];
1283                if (!skb)
1284                        continue;
1285
1286                ce_ring->per_transfer_context[i] = NULL;
1287                id = MS(__le16_to_cpu(ce_desc[i].flags),
1288                        CE_DESC_FLAGS_META_DATA);
1289
1290                ar_pci->msg_callbacks_current.tx_completion(ar, skb);
1291        }
1292}
1293
1294/*
1295 * Cleanup residual buffers for device shutdown:
1296 *    buffers that were enqueued for receive
1297 *    buffers that were to be sent
1298 * Note: Buffers that had completed but which were
1299 * not yet processed are on a completion queue. They
1300 * are handled when the completion thread shuts down.
1301 */
1302static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1303{
1304        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1305        int pipe_num;
1306
1307        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1308                struct ath10k_pci_pipe *pipe_info;
1309
1310                pipe_info = &ar_pci->pipe_info[pipe_num];
1311                ath10k_pci_rx_pipe_cleanup(pipe_info);
1312                ath10k_pci_tx_pipe_cleanup(pipe_info);
1313        }
1314}
1315
1316static void ath10k_pci_ce_deinit(struct ath10k *ar)
1317{
1318        int i;
1319
1320        for (i = 0; i < CE_COUNT; i++)
1321                ath10k_ce_deinit_pipe(ar, i);
1322}
1323
1324static void ath10k_pci_flush(struct ath10k *ar)
1325{
1326        ath10k_pci_kill_tasklet(ar);
1327        ath10k_pci_buffer_cleanup(ar);
1328}
1329
1330static void ath10k_pci_hif_stop(struct ath10k *ar)
1331{
1332        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1333
1334        /* Most likely the device has HTT Rx ring configured. The only way to
1335         * prevent the device from accessing (and possible corrupting) host
1336         * memory is to reset the chip now.
1337         *
1338         * There's also no known way of masking MSI interrupts on the device.
1339         * For ranged MSI the CE-related interrupts can be masked. However
1340         * regardless how many MSI interrupts are assigned the first one
1341         * is always used for firmware indications (crashes) and cannot be
1342         * masked. To prevent the device from asserting the interrupt reset it
1343         * before proceeding with cleanup.
1344         */
1345        ath10k_pci_warm_reset(ar);
1346
1347        ath10k_pci_irq_disable(ar);
1348        ath10k_pci_irq_sync(ar);
1349        ath10k_pci_flush(ar);
1350}
1351
1352static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1353                                           void *req, u32 req_len,
1354                                           void *resp, u32 *resp_len)
1355{
1356        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1357        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1358        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1359        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1360        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1361        dma_addr_t req_paddr = 0;
1362        dma_addr_t resp_paddr = 0;
1363        struct bmi_xfer xfer = {};
1364        void *treq, *tresp = NULL;
1365        int ret = 0;
1366
1367        might_sleep();
1368
1369        if (resp && !resp_len)
1370                return -EINVAL;
1371
1372        if (resp && resp_len && *resp_len == 0)
1373                return -EINVAL;
1374
1375        treq = kmemdup(req, req_len, GFP_KERNEL);
1376        if (!treq)
1377                return -ENOMEM;
1378
1379        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1380        ret = dma_mapping_error(ar->dev, req_paddr);
1381        if (ret)
1382                goto err_dma;
1383
1384        if (resp && resp_len) {
1385                tresp = kzalloc(*resp_len, GFP_KERNEL);
1386                if (!tresp) {
1387                        ret = -ENOMEM;
1388                        goto err_req;
1389                }
1390
1391                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1392                                            DMA_FROM_DEVICE);
1393                ret = dma_mapping_error(ar->dev, resp_paddr);
1394                if (ret)
1395                        goto err_req;
1396
1397                xfer.wait_for_resp = true;
1398                xfer.resp_len = 0;
1399
1400                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1401        }
1402
1403        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1404        if (ret)
1405                goto err_resp;
1406
1407        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1408        if (ret) {
1409                u32 unused_buffer;
1410                unsigned int unused_nbytes;
1411                unsigned int unused_id;
1412
1413                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1414                                           &unused_nbytes, &unused_id);
1415        } else {
1416                /* non-zero means we did not time out */
1417                ret = 0;
1418        }
1419
1420err_resp:
1421        if (resp) {
1422                u32 unused_buffer;
1423
1424                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1425                dma_unmap_single(ar->dev, resp_paddr,
1426                                 *resp_len, DMA_FROM_DEVICE);
1427        }
1428err_req:
1429        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1430
1431        if (ret == 0 && resp_len) {
1432                *resp_len = min(*resp_len, xfer.resp_len);
1433                memcpy(resp, tresp, xfer.resp_len);
1434        }
1435err_dma:
1436        kfree(treq);
1437        kfree(tresp);
1438
1439        return ret;
1440}
1441
1442static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1443{
1444        struct bmi_xfer *xfer;
1445        u32 ce_data;
1446        unsigned int nbytes;
1447        unsigned int transfer_id;
1448
1449        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1450                                          &nbytes, &transfer_id))
1451                return;
1452
1453        xfer->tx_done = true;
1454}
1455
1456static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1457{
1458        struct ath10k *ar = ce_state->ar;
1459        struct bmi_xfer *xfer;
1460        u32 ce_data;
1461        unsigned int nbytes;
1462        unsigned int transfer_id;
1463        unsigned int flags;
1464
1465        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1466                                          &nbytes, &transfer_id, &flags))
1467                return;
1468
1469        if (WARN_ON_ONCE(!xfer))
1470                return;
1471
1472        if (!xfer->wait_for_resp) {
1473                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1474                return;
1475        }
1476
1477        xfer->resp_len = nbytes;
1478        xfer->rx_done = true;
1479}
1480
1481static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1482                               struct ath10k_ce_pipe *rx_pipe,
1483                               struct bmi_xfer *xfer)
1484{
1485        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1486
1487        while (time_before_eq(jiffies, timeout)) {
1488                ath10k_pci_bmi_send_done(tx_pipe);
1489                ath10k_pci_bmi_recv_data(rx_pipe);
1490
1491                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1492                        return 0;
1493
1494                schedule();
1495        }
1496
1497        return -ETIMEDOUT;
1498}
1499
1500/*
1501 * Send an interrupt to the device to wake up the Target CPU
1502 * so it has an opportunity to notice any changed state.
1503 */
1504static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1505{
1506        u32 addr, val;
1507
1508        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1509        val = ath10k_pci_read32(ar, addr);
1510        val |= CORE_CTRL_CPU_INTR_MASK;
1511        ath10k_pci_write32(ar, addr, val);
1512
1513        return 0;
1514}
1515
1516static int ath10k_pci_get_num_banks(struct ath10k *ar)
1517{
1518        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1519
1520        switch (ar_pci->pdev->device) {
1521        case QCA988X_2_0_DEVICE_ID:
1522                return 1;
1523        case QCA6174_2_1_DEVICE_ID:
1524                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1525                case QCA6174_HW_1_0_CHIP_ID_REV:
1526                case QCA6174_HW_1_1_CHIP_ID_REV:
1527                        return 3;
1528                case QCA6174_HW_1_3_CHIP_ID_REV:
1529                        return 2;
1530                case QCA6174_HW_2_1_CHIP_ID_REV:
1531                case QCA6174_HW_2_2_CHIP_ID_REV:
1532                        return 6;
1533                case QCA6174_HW_3_0_CHIP_ID_REV:
1534                case QCA6174_HW_3_1_CHIP_ID_REV:
1535                case QCA6174_HW_3_2_CHIP_ID_REV:
1536                        return 9;
1537                }
1538                break;
1539        }
1540
1541        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1542        return 1;
1543}
1544
1545static int ath10k_pci_init_config(struct ath10k *ar)
1546{
1547        u32 interconnect_targ_addr;
1548        u32 pcie_state_targ_addr = 0;
1549        u32 pipe_cfg_targ_addr = 0;
1550        u32 svc_to_pipe_map = 0;
1551        u32 pcie_config_flags = 0;
1552        u32 ealloc_value;
1553        u32 ealloc_targ_addr;
1554        u32 flag2_value;
1555        u32 flag2_targ_addr;
1556        int ret = 0;
1557
1558        /* Download to Target the CE Config and the service-to-CE map */
1559        interconnect_targ_addr =
1560                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1561
1562        /* Supply Target-side CE configuration */
1563        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1564                                     &pcie_state_targ_addr);
1565        if (ret != 0) {
1566                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1567                return ret;
1568        }
1569
1570        if (pcie_state_targ_addr == 0) {
1571                ret = -EIO;
1572                ath10k_err(ar, "Invalid pcie state addr\n");
1573                return ret;
1574        }
1575
1576        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1577                                          offsetof(struct pcie_state,
1578                                                   pipe_cfg_addr)),
1579                                     &pipe_cfg_targ_addr);
1580        if (ret != 0) {
1581                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1582                return ret;
1583        }
1584
1585        if (pipe_cfg_targ_addr == 0) {
1586                ret = -EIO;
1587                ath10k_err(ar, "Invalid pipe cfg addr\n");
1588                return ret;
1589        }
1590
1591        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1592                                        target_ce_config_wlan,
1593                                        sizeof(target_ce_config_wlan));
1594
1595        if (ret != 0) {
1596                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1597                return ret;
1598        }
1599
1600        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1601                                          offsetof(struct pcie_state,
1602                                                   svc_to_pipe_map)),
1603                                     &svc_to_pipe_map);
1604        if (ret != 0) {
1605                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1606                return ret;
1607        }
1608
1609        if (svc_to_pipe_map == 0) {
1610                ret = -EIO;
1611                ath10k_err(ar, "Invalid svc_to_pipe map\n");
1612                return ret;
1613        }
1614
1615        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1616                                        target_service_to_ce_map_wlan,
1617                                        sizeof(target_service_to_ce_map_wlan));
1618        if (ret != 0) {
1619                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1620                return ret;
1621        }
1622
1623        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1624                                          offsetof(struct pcie_state,
1625                                                   config_flags)),
1626                                     &pcie_config_flags);
1627        if (ret != 0) {
1628                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1629                return ret;
1630        }
1631
1632        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1633
1634        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1635                                           offsetof(struct pcie_state,
1636                                                    config_flags)),
1637                                      pcie_config_flags);
1638        if (ret != 0) {
1639                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1640                return ret;
1641        }
1642
1643        /* configure early allocation */
1644        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1645
1646        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1647        if (ret != 0) {
1648                ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1649                return ret;
1650        }
1651
1652        /* first bank is switched to IRAM */
1653        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1654                         HI_EARLY_ALLOC_MAGIC_MASK);
1655        ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1656                          HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1657                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1658
1659        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1660        if (ret != 0) {
1661                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1662                return ret;
1663        }
1664
1665        /* Tell Target to proceed with initialization */
1666        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1667
1668        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1669        if (ret != 0) {
1670                ath10k_err(ar, "Failed to get option val: %d\n", ret);
1671                return ret;
1672        }
1673
1674        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1675
1676        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1677        if (ret != 0) {
1678                ath10k_err(ar, "Failed to set option val: %d\n", ret);
1679                return ret;
1680        }
1681
1682        return 0;
1683}
1684
1685static int ath10k_pci_alloc_pipes(struct ath10k *ar)
1686{
1687        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1688        struct ath10k_pci_pipe *pipe;
1689        int i, ret;
1690
1691        for (i = 0; i < CE_COUNT; i++) {
1692                pipe = &ar_pci->pipe_info[i];
1693                pipe->ce_hdl = &ar_pci->ce_states[i];
1694                pipe->pipe_num = i;
1695                pipe->hif_ce_state = ar;
1696
1697                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1698                                           ath10k_pci_ce_send_done,
1699                                           ath10k_pci_ce_recv_data);
1700                if (ret) {
1701                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1702                                   i, ret);
1703                        return ret;
1704                }
1705
1706                /* Last CE is Diagnostic Window */
1707                if (i == CE_COUNT - 1) {
1708                        ar_pci->ce_diag = pipe->ce_hdl;
1709                        continue;
1710                }
1711
1712                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
1713        }
1714
1715        return 0;
1716}
1717
1718static void ath10k_pci_free_pipes(struct ath10k *ar)
1719{
1720        int i;
1721
1722        for (i = 0; i < CE_COUNT; i++)
1723                ath10k_ce_free_pipe(ar, i);
1724}
1725
1726static int ath10k_pci_init_pipes(struct ath10k *ar)
1727{
1728        int i, ret;
1729
1730        for (i = 0; i < CE_COUNT; i++) {
1731                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
1732                if (ret) {
1733                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1734                                   i, ret);
1735                        return ret;
1736                }
1737        }
1738
1739        return 0;
1740}
1741
1742static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1743{
1744        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1745               FW_IND_EVENT_PENDING;
1746}
1747
1748static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1749{
1750        u32 val;
1751
1752        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1753        val &= ~FW_IND_EVENT_PENDING;
1754        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1755}
1756
1757/* this function effectively clears target memory controller assert line */
1758static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1759{
1760        u32 val;
1761
1762        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1763        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1764                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
1765        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1766
1767        msleep(10);
1768
1769        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1770        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1771                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1772        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1773
1774        msleep(10);
1775}
1776
1777static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
1778{
1779        u32 val;
1780
1781        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1782
1783        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1784                                SOC_RESET_CONTROL_ADDRESS);
1785        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1786                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1787}
1788
1789static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
1790{
1791        u32 val;
1792
1793        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1794                                SOC_RESET_CONTROL_ADDRESS);
1795
1796        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1797                           val | SOC_RESET_CONTROL_CE_RST_MASK);
1798        msleep(10);
1799        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1800                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1801}
1802
1803static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
1804{
1805        u32 val;
1806
1807        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808                                SOC_LF_TIMER_CONTROL0_ADDRESS);
1809        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810                           SOC_LF_TIMER_CONTROL0_ADDRESS,
1811                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1812}
1813
1814static int ath10k_pci_warm_reset(struct ath10k *ar)
1815{
1816        int ret;
1817
1818        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
1819
1820        spin_lock_bh(&ar->data_lock);
1821        ar->stats.fw_warm_reset_counter++;
1822        spin_unlock_bh(&ar->data_lock);
1823
1824        ath10k_pci_irq_disable(ar);
1825
1826        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
1827         * were to access copy engine while host performs copy engine reset
1828         * then it is possible for the device to confuse pci-e controller to
1829         * the point of bringing host system to a complete stop (i.e. hang).
1830         */
1831        ath10k_pci_warm_reset_si0(ar);
1832        ath10k_pci_warm_reset_cpu(ar);
1833        ath10k_pci_init_pipes(ar);
1834        ath10k_pci_wait_for_target_init(ar);
1835
1836        ath10k_pci_warm_reset_clear_lf(ar);
1837        ath10k_pci_warm_reset_ce(ar);
1838        ath10k_pci_warm_reset_cpu(ar);
1839        ath10k_pci_init_pipes(ar);
1840
1841        ret = ath10k_pci_wait_for_target_init(ar);
1842        if (ret) {
1843                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
1844                return ret;
1845        }
1846
1847        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
1848
1849        return 0;
1850}
1851
1852static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
1853{
1854        int i, ret;
1855        u32 val;
1856
1857        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
1858
1859        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
1860         * It is thus preferred to use warm reset which is safer but may not be
1861         * able to recover the device from all possible fail scenarios.
1862         *
1863         * Warm reset doesn't always work on first try so attempt it a few
1864         * times before giving up.
1865         */
1866        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1867                ret = ath10k_pci_warm_reset(ar);
1868                if (ret) {
1869                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
1870                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
1871                                    ret);
1872                        continue;
1873                }
1874
1875                /* FIXME: Sometimes copy engine doesn't recover after warm
1876                 * reset. In most cases this needs cold reset. In some of these
1877                 * cases the device is in such a state that a cold reset may
1878                 * lock up the host.
1879                 *
1880                 * Reading any host interest register via copy engine is
1881                 * sufficient to verify if device is capable of booting
1882                 * firmware blob.
1883                 */
1884                ret = ath10k_pci_init_pipes(ar);
1885                if (ret) {
1886                        ath10k_warn(ar, "failed to init copy engine: %d\n",
1887                                    ret);
1888                        continue;
1889                }
1890
1891                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
1892                                             &val);
1893                if (ret) {
1894                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
1895                                    ret);
1896                        continue;
1897                }
1898
1899                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
1900                return 0;
1901        }
1902
1903        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
1904                ath10k_warn(ar, "refusing cold reset as requested\n");
1905                return -EPERM;
1906        }
1907
1908        ret = ath10k_pci_cold_reset(ar);
1909        if (ret) {
1910                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
1911                return ret;
1912        }
1913
1914        ret = ath10k_pci_wait_for_target_init(ar);
1915        if (ret) {
1916                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
1917                            ret);
1918                return ret;
1919        }
1920
1921        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
1922
1923        return 0;
1924}
1925
1926static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
1927{
1928        int ret;
1929
1930        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
1931
1932        /* FIXME: QCA6174 requires cold + warm reset to work. */
1933
1934        ret = ath10k_pci_cold_reset(ar);
1935        if (ret) {
1936                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
1937                return ret;
1938        }
1939
1940        ret = ath10k_pci_wait_for_target_init(ar);
1941        if (ret) {
1942                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
1943                                ret);
1944                return ret;
1945        }
1946
1947        ret = ath10k_pci_warm_reset(ar);
1948        if (ret) {
1949                ath10k_warn(ar, "failed to warm reset: %d\n", ret);
1950                return ret;
1951        }
1952
1953        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
1954
1955        return 0;
1956}
1957
1958static int ath10k_pci_chip_reset(struct ath10k *ar)
1959{
1960        if (QCA_REV_988X(ar))
1961                return ath10k_pci_qca988x_chip_reset(ar);
1962        else if (QCA_REV_6174(ar))
1963                return ath10k_pci_qca6174_chip_reset(ar);
1964        else
1965                return -ENOTSUPP;
1966}
1967
1968static int ath10k_pci_hif_power_up(struct ath10k *ar)
1969{
1970        int ret;
1971
1972        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
1973
1974        ret = ath10k_pci_wake(ar);
1975        if (ret) {
1976                ath10k_err(ar, "failed to wake up target: %d\n", ret);
1977                return ret;
1978        }
1979
1980        /*
1981         * Bring the target up cleanly.
1982         *
1983         * The target may be in an undefined state with an AUX-powered Target
1984         * and a Host in WoW mode. If the Host crashes, loses power, or is
1985         * restarted (without unloading the driver) then the Target is left
1986         * (aux) powered and running. On a subsequent driver load, the Target
1987         * is in an unexpected state. We try to catch that here in order to
1988         * reset the Target and retry the probe.
1989         */
1990        ret = ath10k_pci_chip_reset(ar);
1991        if (ret) {
1992                if (ath10k_pci_has_fw_crashed(ar)) {
1993                        ath10k_warn(ar, "firmware crashed during chip reset\n");
1994                        ath10k_pci_fw_crashed_clear(ar);
1995                        ath10k_pci_fw_crashed_dump(ar);
1996                }
1997
1998                ath10k_err(ar, "failed to reset chip: %d\n", ret);
1999                goto err_sleep;
2000        }
2001
2002        ret = ath10k_pci_init_pipes(ar);
2003        if (ret) {
2004                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2005                goto err_sleep;
2006        }
2007
2008        ret = ath10k_pci_init_config(ar);
2009        if (ret) {
2010                ath10k_err(ar, "failed to setup init config: %d\n", ret);
2011                goto err_ce;
2012        }
2013
2014        ret = ath10k_pci_wake_target_cpu(ar);
2015        if (ret) {
2016                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2017                goto err_ce;
2018        }
2019
2020        return 0;
2021
2022err_ce:
2023        ath10k_pci_ce_deinit(ar);
2024
2025err_sleep:
2026        ath10k_pci_sleep(ar);
2027        return ret;
2028}
2029
2030static void ath10k_pci_hif_power_down(struct ath10k *ar)
2031{
2032        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2033
2034        /* Currently hif_power_up performs effectively a reset and hif_stop
2035         * resets the chip as well so there's no point in resetting here.
2036         */
2037
2038        ath10k_pci_sleep(ar);
2039}
2040
2041#ifdef CONFIG_PM
2042
2043#define ATH10K_PCI_PM_CONTROL 0x44
2044
2045static int ath10k_pci_hif_suspend(struct ath10k *ar)
2046{
2047        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2048        struct pci_dev *pdev = ar_pci->pdev;
2049        u32 val;
2050
2051        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2052
2053        if ((val & 0x000000ff) != 0x3) {
2054                pci_save_state(pdev);
2055                pci_disable_device(pdev);
2056                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2057                                       (val & 0xffffff00) | 0x03);
2058        }
2059
2060        return 0;
2061}
2062
2063static int ath10k_pci_hif_resume(struct ath10k *ar)
2064{
2065        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2066        struct pci_dev *pdev = ar_pci->pdev;
2067        u32 val;
2068
2069        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2070
2071        if ((val & 0x000000ff) != 0) {
2072                pci_restore_state(pdev);
2073                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2074                                       val & 0xffffff00);
2075                /*
2076                 * Suspend/Resume resets the PCI configuration space,
2077                 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2078                 * to keep PCI Tx retries from interfering with C3 CPU state
2079                 */
2080                pci_read_config_dword(pdev, 0x40, &val);
2081
2082                if ((val & 0x0000ff00) != 0)
2083                        pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2084        }
2085
2086        return 0;
2087}
2088#endif
2089
2090static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2091        .tx_sg                  = ath10k_pci_hif_tx_sg,
2092        .diag_read              = ath10k_pci_hif_diag_read,
2093        .diag_write             = ath10k_pci_diag_write_mem,
2094        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2095        .start                  = ath10k_pci_hif_start,
2096        .stop                   = ath10k_pci_hif_stop,
2097        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2098        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2099        .send_complete_check    = ath10k_pci_hif_send_complete_check,
2100        .set_callbacks          = ath10k_pci_hif_set_callbacks,
2101        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2102        .power_up               = ath10k_pci_hif_power_up,
2103        .power_down             = ath10k_pci_hif_power_down,
2104        .read32                 = ath10k_pci_read32,
2105        .write32                = ath10k_pci_write32,
2106#ifdef CONFIG_PM
2107        .suspend                = ath10k_pci_hif_suspend,
2108        .resume                 = ath10k_pci_hif_resume,
2109#endif
2110};
2111
2112static void ath10k_pci_ce_tasklet(unsigned long ptr)
2113{
2114        struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2115        struct ath10k_pci *ar_pci = pipe->ar_pci;
2116
2117        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2118}
2119
2120static void ath10k_msi_err_tasklet(unsigned long data)
2121{
2122        struct ath10k *ar = (struct ath10k *)data;
2123
2124        if (!ath10k_pci_has_fw_crashed(ar)) {
2125                ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2126                return;
2127        }
2128
2129        ath10k_pci_irq_disable(ar);
2130        ath10k_pci_fw_crashed_clear(ar);
2131        ath10k_pci_fw_crashed_dump(ar);
2132}
2133
2134/*
2135 * Handler for a per-engine interrupt on a PARTICULAR CE.
2136 * This is used in cases where each CE has a private MSI interrupt.
2137 */
2138static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2139{
2140        struct ath10k *ar = arg;
2141        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2142        int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2143
2144        if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2145                ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2146                            ce_id);
2147                return IRQ_HANDLED;
2148        }
2149
2150        /*
2151         * NOTE: We are able to derive ce_id from irq because we
2152         * use a one-to-one mapping for CE's 0..5.
2153         * CE's 6 & 7 do not use interrupts at all.
2154         *
2155         * This mapping must be kept in sync with the mapping
2156         * used by firmware.
2157         */
2158        tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2159        return IRQ_HANDLED;
2160}
2161
2162static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2163{
2164        struct ath10k *ar = arg;
2165        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2166
2167        tasklet_schedule(&ar_pci->msi_fw_err);
2168        return IRQ_HANDLED;
2169}
2170
2171/*
2172 * Top-level interrupt handler for all PCI interrupts from a Target.
2173 * When a block of MSI interrupts is allocated, this top-level handler
2174 * is not used; instead, we directly call the correct sub-handler.
2175 */
2176static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2177{
2178        struct ath10k *ar = arg;
2179        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2180
2181        if (ar_pci->num_msi_intrs == 0) {
2182                if (!ath10k_pci_irq_pending(ar))
2183                        return IRQ_NONE;
2184
2185                ath10k_pci_disable_and_clear_legacy_irq(ar);
2186        }
2187
2188        tasklet_schedule(&ar_pci->intr_tq);
2189
2190        return IRQ_HANDLED;
2191}
2192
2193static void ath10k_pci_tasklet(unsigned long data)
2194{
2195        struct ath10k *ar = (struct ath10k *)data;
2196        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2197
2198        if (ath10k_pci_has_fw_crashed(ar)) {
2199                ath10k_pci_irq_disable(ar);
2200                ath10k_pci_fw_crashed_clear(ar);
2201                ath10k_pci_fw_crashed_dump(ar);
2202                return;
2203        }
2204
2205        ath10k_ce_per_engine_service_any(ar);
2206
2207        /* Re-enable legacy irq that was disabled in the irq handler */
2208        if (ar_pci->num_msi_intrs == 0)
2209                ath10k_pci_enable_legacy_irq(ar);
2210}
2211
2212static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2213{
2214        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2215        int ret, i;
2216
2217        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2218                          ath10k_pci_msi_fw_handler,
2219                          IRQF_SHARED, "ath10k_pci", ar);
2220        if (ret) {
2221                ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2222                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2223                return ret;
2224        }
2225
2226        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2227                ret = request_irq(ar_pci->pdev->irq + i,
2228                                  ath10k_pci_per_engine_handler,
2229                                  IRQF_SHARED, "ath10k_pci", ar);
2230                if (ret) {
2231                        ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2232                                    ar_pci->pdev->irq + i, ret);
2233
2234                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2235                                free_irq(ar_pci->pdev->irq + i, ar);
2236
2237                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2238                        return ret;
2239                }
2240        }
2241
2242        return 0;
2243}
2244
2245static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2246{
2247        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2248        int ret;
2249
2250        ret = request_irq(ar_pci->pdev->irq,
2251                          ath10k_pci_interrupt_handler,
2252                          IRQF_SHARED, "ath10k_pci", ar);
2253        if (ret) {
2254                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2255                            ar_pci->pdev->irq, ret);
2256                return ret;
2257        }
2258
2259        return 0;
2260}
2261
2262static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2263{
2264        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2265        int ret;
2266
2267        ret = request_irq(ar_pci->pdev->irq,
2268                          ath10k_pci_interrupt_handler,
2269                          IRQF_SHARED, "ath10k_pci", ar);
2270        if (ret) {
2271                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2272                            ar_pci->pdev->irq, ret);
2273                return ret;
2274        }
2275
2276        return 0;
2277}
2278
2279static int ath10k_pci_request_irq(struct ath10k *ar)
2280{
2281        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2282
2283        switch (ar_pci->num_msi_intrs) {
2284        case 0:
2285                return ath10k_pci_request_irq_legacy(ar);
2286        case 1:
2287                return ath10k_pci_request_irq_msi(ar);
2288        case MSI_NUM_REQUEST:
2289                return ath10k_pci_request_irq_msix(ar);
2290        }
2291
2292        ath10k_warn(ar, "unknown irq configuration upon request\n");
2293        return -EINVAL;
2294}
2295
2296static void ath10k_pci_free_irq(struct ath10k *ar)
2297{
2298        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2299        int i;
2300
2301        /* There's at least one interrupt irregardless whether its legacy INTR
2302         * or MSI or MSI-X */
2303        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2304                free_irq(ar_pci->pdev->irq + i, ar);
2305}
2306
2307static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2308{
2309        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2310        int i;
2311
2312        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2313        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2314                     (unsigned long)ar);
2315
2316        for (i = 0; i < CE_COUNT; i++) {
2317                ar_pci->pipe_info[i].ar_pci = ar_pci;
2318                tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2319                             (unsigned long)&ar_pci->pipe_info[i]);
2320        }
2321}
2322
2323static int ath10k_pci_init_irq(struct ath10k *ar)
2324{
2325        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2326        int ret;
2327
2328        ath10k_pci_init_irq_tasklets(ar);
2329
2330        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2331                ath10k_info(ar, "limiting irq mode to: %d\n",
2332                            ath10k_pci_irq_mode);
2333
2334        /* Try MSI-X */
2335        if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2336                ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2337                ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2338                                           ar_pci->num_msi_intrs);
2339                if (ret > 0)
2340                        return 0;
2341
2342                /* fall-through */
2343        }
2344
2345        /* Try MSI */
2346        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2347                ar_pci->num_msi_intrs = 1;
2348                ret = pci_enable_msi(ar_pci->pdev);
2349                if (ret == 0)
2350                        return 0;
2351
2352                /* fall-through */
2353        }
2354
2355        /* Try legacy irq
2356         *
2357         * A potential race occurs here: The CORE_BASE write
2358         * depends on target correctly decoding AXI address but
2359         * host won't know when target writes BAR to CORE_CTRL.
2360         * This write might get lost if target has NOT written BAR.
2361         * For now, fix the race by repeating the write in below
2362         * synchronization checking. */
2363        ar_pci->num_msi_intrs = 0;
2364
2365        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2366                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2367
2368        return 0;
2369}
2370
2371static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2372{
2373        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2374                           0);
2375}
2376
2377static int ath10k_pci_deinit_irq(struct ath10k *ar)
2378{
2379        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2380
2381        switch (ar_pci->num_msi_intrs) {
2382        case 0:
2383                ath10k_pci_deinit_irq_legacy(ar);
2384                return 0;
2385        case 1:
2386                /* fall-through */
2387        case MSI_NUM_REQUEST:
2388                pci_disable_msi(ar_pci->pdev);
2389                return 0;
2390        default:
2391                pci_disable_msi(ar_pci->pdev);
2392        }
2393
2394        ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2395        return -EINVAL;
2396}
2397
2398static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2399{
2400        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2401        unsigned long timeout;
2402        u32 val;
2403
2404        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2405
2406        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2407
2408        do {
2409                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2410
2411                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2412                           val);
2413
2414                /* target should never return this */
2415                if (val == 0xffffffff)
2416                        continue;
2417
2418                /* the device has crashed so don't bother trying anymore */
2419                if (val & FW_IND_EVENT_PENDING)
2420                        break;
2421
2422                if (val & FW_IND_INITIALIZED)
2423                        break;
2424
2425                if (ar_pci->num_msi_intrs == 0)
2426                        /* Fix potential race by repeating CORE_BASE writes */
2427                        ath10k_pci_enable_legacy_irq(ar);
2428
2429                mdelay(10);
2430        } while (time_before(jiffies, timeout));
2431
2432        ath10k_pci_disable_and_clear_legacy_irq(ar);
2433        ath10k_pci_irq_msi_fw_mask(ar);
2434
2435        if (val == 0xffffffff) {
2436                ath10k_err(ar, "failed to read device register, device is gone\n");
2437                return -EIO;
2438        }
2439
2440        if (val & FW_IND_EVENT_PENDING) {
2441                ath10k_warn(ar, "device has crashed during init\n");
2442                return -ECOMM;
2443        }
2444
2445        if (!(val & FW_IND_INITIALIZED)) {
2446                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2447                           val);
2448                return -ETIMEDOUT;
2449        }
2450
2451        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2452        return 0;
2453}
2454
2455static int ath10k_pci_cold_reset(struct ath10k *ar)
2456{
2457        int i;
2458        u32 val;
2459
2460        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2461
2462        spin_lock_bh(&ar->data_lock);
2463
2464        ar->stats.fw_cold_reset_counter++;
2465
2466        spin_unlock_bh(&ar->data_lock);
2467
2468        /* Put Target, including PCIe, into RESET. */
2469        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2470        val |= 1;
2471        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2472
2473        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2474                if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2475                                          RTC_STATE_COLD_RESET_MASK)
2476                        break;
2477                msleep(1);
2478        }
2479
2480        /* Pull Target, including PCIe, out of RESET. */
2481        val &= ~1;
2482        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2483
2484        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2485                if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2486                                            RTC_STATE_COLD_RESET_MASK))
2487                        break;
2488                msleep(1);
2489        }
2490
2491        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2492
2493        return 0;
2494}
2495
2496static int ath10k_pci_claim(struct ath10k *ar)
2497{
2498        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2499        struct pci_dev *pdev = ar_pci->pdev;
2500        u32 lcr_val;
2501        int ret;
2502
2503        pci_set_drvdata(pdev, ar);
2504
2505        ret = pci_enable_device(pdev);
2506        if (ret) {
2507                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2508                return ret;
2509        }
2510
2511        ret = pci_request_region(pdev, BAR_NUM, "ath");
2512        if (ret) {
2513                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2514                           ret);
2515                goto err_device;
2516        }
2517
2518        /* Target expects 32 bit DMA. Enforce it. */
2519        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2520        if (ret) {
2521                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2522                goto err_region;
2523        }
2524
2525        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2526        if (ret) {
2527                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2528                           ret);
2529                goto err_region;
2530        }
2531
2532        pci_set_master(pdev);
2533
2534        /* Workaround: Disable ASPM */
2535        pci_read_config_dword(pdev, 0x80, &lcr_val);
2536        pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2537
2538        /* Arrange for access to Target SoC registers. */
2539        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2540        if (!ar_pci->mem) {
2541                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2542                ret = -EIO;
2543                goto err_master;
2544        }
2545
2546        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2547        return 0;
2548
2549err_master:
2550        pci_clear_master(pdev);
2551
2552err_region:
2553        pci_release_region(pdev, BAR_NUM);
2554
2555err_device:
2556        pci_disable_device(pdev);
2557
2558        return ret;
2559}
2560
2561static void ath10k_pci_release(struct ath10k *ar)
2562{
2563        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2564        struct pci_dev *pdev = ar_pci->pdev;
2565
2566        pci_iounmap(pdev, ar_pci->mem);
2567        pci_release_region(pdev, BAR_NUM);
2568        pci_clear_master(pdev);
2569        pci_disable_device(pdev);
2570}
2571
2572static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2573{
2574        const struct ath10k_pci_supp_chip *supp_chip;
2575        int i;
2576        u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2577
2578        for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2579                supp_chip = &ath10k_pci_supp_chips[i];
2580
2581                if (supp_chip->dev_id == dev_id &&
2582                    supp_chip->rev_id == rev_id)
2583                        return true;
2584        }
2585
2586        return false;
2587}
2588
2589static int ath10k_pci_probe(struct pci_dev *pdev,
2590                            const struct pci_device_id *pci_dev)
2591{
2592        int ret = 0;
2593        struct ath10k *ar;
2594        struct ath10k_pci *ar_pci;
2595        enum ath10k_hw_rev hw_rev;
2596        u32 chip_id;
2597
2598        switch (pci_dev->device) {
2599        case QCA988X_2_0_DEVICE_ID:
2600                hw_rev = ATH10K_HW_QCA988X;
2601                break;
2602        case QCA6174_2_1_DEVICE_ID:
2603                hw_rev = ATH10K_HW_QCA6174;
2604                break;
2605        default:
2606                WARN_ON(1);
2607                return -ENOTSUPP;
2608        }
2609
2610        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2611                                hw_rev, &ath10k_pci_hif_ops);
2612        if (!ar) {
2613                dev_err(&pdev->dev, "failed to allocate core\n");
2614                return -ENOMEM;
2615        }
2616
2617        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2618
2619        ar_pci = ath10k_pci_priv(ar);
2620        ar_pci->pdev = pdev;
2621        ar_pci->dev = &pdev->dev;
2622        ar_pci->ar = ar;
2623
2624        spin_lock_init(&ar_pci->ce_lock);
2625        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2626                    (unsigned long)ar);
2627
2628        ret = ath10k_pci_claim(ar);
2629        if (ret) {
2630                ath10k_err(ar, "failed to claim device: %d\n", ret);
2631                goto err_core_destroy;
2632        }
2633
2634        ret = ath10k_pci_wake(ar);
2635        if (ret) {
2636                ath10k_err(ar, "failed to wake up: %d\n", ret);
2637                goto err_release;
2638        }
2639
2640        ret = ath10k_pci_alloc_pipes(ar);
2641        if (ret) {
2642                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2643                           ret);
2644                goto err_sleep;
2645        }
2646
2647        ath10k_pci_ce_deinit(ar);
2648        ath10k_pci_irq_disable(ar);
2649
2650        ret = ath10k_pci_init_irq(ar);
2651        if (ret) {
2652                ath10k_err(ar, "failed to init irqs: %d\n", ret);
2653                goto err_free_pipes;
2654        }
2655
2656        ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2657                    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2658                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2659
2660        ret = ath10k_pci_request_irq(ar);
2661        if (ret) {
2662                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2663                goto err_deinit_irq;
2664        }
2665
2666        ret = ath10k_pci_chip_reset(ar);
2667        if (ret) {
2668                ath10k_err(ar, "failed to reset chip: %d\n", ret);
2669                goto err_free_irq;
2670        }
2671
2672        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2673        if (chip_id == 0xffffffff) {
2674                ath10k_err(ar, "failed to get chip id\n");
2675                goto err_free_irq;
2676        }
2677
2678        if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2679                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2680                           pdev->device, chip_id);
2681                goto err_sleep;
2682        }
2683
2684        ath10k_pci_sleep(ar);
2685
2686        ret = ath10k_core_register(ar, chip_id);
2687        if (ret) {
2688                ath10k_err(ar, "failed to register driver core: %d\n", ret);
2689                goto err_free_irq;
2690        }
2691
2692        return 0;
2693
2694err_free_irq:
2695        ath10k_pci_free_irq(ar);
2696        ath10k_pci_kill_tasklet(ar);
2697
2698err_deinit_irq:
2699        ath10k_pci_deinit_irq(ar);
2700
2701err_free_pipes:
2702        ath10k_pci_free_pipes(ar);
2703
2704err_sleep:
2705        ath10k_pci_sleep(ar);
2706
2707err_release:
2708        ath10k_pci_release(ar);
2709
2710err_core_destroy:
2711        ath10k_core_destroy(ar);
2712
2713        return ret;
2714}
2715
2716static void ath10k_pci_remove(struct pci_dev *pdev)
2717{
2718        struct ath10k *ar = pci_get_drvdata(pdev);
2719        struct ath10k_pci *ar_pci;
2720
2721        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2722
2723        if (!ar)
2724                return;
2725
2726        ar_pci = ath10k_pci_priv(ar);
2727
2728        if (!ar_pci)
2729                return;
2730
2731        ath10k_core_unregister(ar);
2732        ath10k_pci_free_irq(ar);
2733        ath10k_pci_kill_tasklet(ar);
2734        ath10k_pci_deinit_irq(ar);
2735        ath10k_pci_ce_deinit(ar);
2736        ath10k_pci_free_pipes(ar);
2737        ath10k_pci_release(ar);
2738        ath10k_core_destroy(ar);
2739}
2740
2741MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2742
2743static struct pci_driver ath10k_pci_driver = {
2744        .name = "ath10k_pci",
2745        .id_table = ath10k_pci_id_table,
2746        .probe = ath10k_pci_probe,
2747        .remove = ath10k_pci_remove,
2748};
2749
2750static int __init ath10k_pci_init(void)
2751{
2752        int ret;
2753
2754        ret = pci_register_driver(&ath10k_pci_driver);
2755        if (ret)
2756                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2757                       ret);
2758
2759        return ret;
2760}
2761module_init(ath10k_pci_init);
2762
2763static void __exit ath10k_pci_exit(void)
2764{
2765        pci_unregister_driver(&ath10k_pci_driver);
2766}
2767
2768module_exit(ath10k_pci_exit);
2769
2770MODULE_AUTHOR("Qualcomm Atheros");
2771MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2772MODULE_LICENSE("Dual BSD/GPL");
2773MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2774MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
2775MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
2776MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2777