linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26#include "coredump.h"
  27
  28#include "targaddrs.h"
  29#include "bmi.h"
  30
  31#include "hif.h"
  32#include "htc.h"
  33
  34#include "ce.h"
  35#include "pci.h"
  36
  37enum ath10k_pci_reset_mode {
  38        ATH10K_PCI_RESET_AUTO = 0,
  39        ATH10K_PCI_RESET_WARM_ONLY = 1,
  40};
  41
  42static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  43static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  44
  45module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  46MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  47
  48module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  49MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  50
  51/* how long wait to wait for target to initialise, in ms */
  52#define ATH10K_PCI_TARGET_WAIT 3000
  53#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  54
  55/* Maximum number of bytes that can be handled atomically by
  56 * diag read and write.
  57 */
  58#define ATH10K_DIAG_TRANSFER_LIMIT      0x5000
  59
  60#define QCA99X0_PCIE_BAR0_START_REG    0x81030
  61#define QCA99X0_CPU_MEM_ADDR_REG       0x4d00c
  62#define QCA99X0_CPU_MEM_DATA_REG       0x4d010
  63
  64static const struct pci_device_id ath10k_pci_id_table[] = {
  65        /* PCI-E QCA988X V2 (Ubiquiti branded) */
  66        { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
  67
  68        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  69        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  70        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  71        { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  72        { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
  73        { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
  74        { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  75        { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
  76        {0}
  77};
  78
  79static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  80        /* QCA988X pre 2.0 chips are not supported because they need some nasty
  81         * hacks. ath10k doesn't have them and these devices crash horribly
  82         * because of that.
  83         */
  84        { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
  85        { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  86
  87        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  88        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  89        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  90        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  91        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  92
  93        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  94        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  95        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  96        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  97        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  98
  99        { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
 100
 101        { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
 102
 103        { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
 104
 105        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
 106        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
 107
 108        { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
 109};
 110
 111static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 112static int ath10k_pci_cold_reset(struct ath10k *ar);
 113static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 114static int ath10k_pci_init_irq(struct ath10k *ar);
 115static int ath10k_pci_deinit_irq(struct ath10k *ar);
 116static int ath10k_pci_request_irq(struct ath10k *ar);
 117static void ath10k_pci_free_irq(struct ath10k *ar);
 118static int ath10k_pci_bmi_wait(struct ath10k *ar,
 119                               struct ath10k_ce_pipe *tx_pipe,
 120                               struct ath10k_ce_pipe *rx_pipe,
 121                               struct bmi_xfer *xfer);
 122static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 123static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 124static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 125static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 126static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 127static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 128static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 129
 130static struct ce_attr host_ce_config_wlan[] = {
 131        /* CE0: host->target HTC control and raw streams */
 132        {
 133                .flags = CE_ATTR_FLAGS,
 134                .src_nentries = 16,
 135                .src_sz_max = 256,
 136                .dest_nentries = 0,
 137                .send_cb = ath10k_pci_htc_tx_cb,
 138        },
 139
 140        /* CE1: target->host HTT + HTC control */
 141        {
 142                .flags = CE_ATTR_FLAGS,
 143                .src_nentries = 0,
 144                .src_sz_max = 2048,
 145                .dest_nentries = 512,
 146                .recv_cb = ath10k_pci_htt_htc_rx_cb,
 147        },
 148
 149        /* CE2: target->host WMI */
 150        {
 151                .flags = CE_ATTR_FLAGS,
 152                .src_nentries = 0,
 153                .src_sz_max = 2048,
 154                .dest_nentries = 128,
 155                .recv_cb = ath10k_pci_htc_rx_cb,
 156        },
 157
 158        /* CE3: host->target WMI */
 159        {
 160                .flags = CE_ATTR_FLAGS,
 161                .src_nentries = 32,
 162                .src_sz_max = 2048,
 163                .dest_nentries = 0,
 164                .send_cb = ath10k_pci_htc_tx_cb,
 165        },
 166
 167        /* CE4: host->target HTT */
 168        {
 169                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 170                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 171                .src_sz_max = 256,
 172                .dest_nentries = 0,
 173                .send_cb = ath10k_pci_htt_tx_cb,
 174        },
 175
 176        /* CE5: target->host HTT (HIF->HTT) */
 177        {
 178                .flags = CE_ATTR_FLAGS,
 179                .src_nentries = 0,
 180                .src_sz_max = 512,
 181                .dest_nentries = 512,
 182                .recv_cb = ath10k_pci_htt_rx_cb,
 183        },
 184
 185        /* CE6: target autonomous hif_memcpy */
 186        {
 187                .flags = CE_ATTR_FLAGS,
 188                .src_nentries = 0,
 189                .src_sz_max = 0,
 190                .dest_nentries = 0,
 191        },
 192
 193        /* CE7: ce_diag, the Diagnostic Window */
 194        {
 195                .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
 196                .src_nentries = 2,
 197                .src_sz_max = DIAG_TRANSFER_LIMIT,
 198                .dest_nentries = 2,
 199        },
 200
 201        /* CE8: target->host pktlog */
 202        {
 203                .flags = CE_ATTR_FLAGS,
 204                .src_nentries = 0,
 205                .src_sz_max = 2048,
 206                .dest_nentries = 128,
 207                .recv_cb = ath10k_pci_pktlog_rx_cb,
 208        },
 209
 210        /* CE9 target autonomous qcache memcpy */
 211        {
 212                .flags = CE_ATTR_FLAGS,
 213                .src_nentries = 0,
 214                .src_sz_max = 0,
 215                .dest_nentries = 0,
 216        },
 217
 218        /* CE10: target autonomous hif memcpy */
 219        {
 220                .flags = CE_ATTR_FLAGS,
 221                .src_nentries = 0,
 222                .src_sz_max = 0,
 223                .dest_nentries = 0,
 224        },
 225
 226        /* CE11: target autonomous hif memcpy */
 227        {
 228                .flags = CE_ATTR_FLAGS,
 229                .src_nentries = 0,
 230                .src_sz_max = 0,
 231                .dest_nentries = 0,
 232        },
 233};
 234
 235/* Target firmware's Copy Engine configuration. */
 236static struct ce_pipe_config target_ce_config_wlan[] = {
 237        /* CE0: host->target HTC control and raw streams */
 238        {
 239                .pipenum = __cpu_to_le32(0),
 240                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 241                .nentries = __cpu_to_le32(32),
 242                .nbytes_max = __cpu_to_le32(256),
 243                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 244                .reserved = __cpu_to_le32(0),
 245        },
 246
 247        /* CE1: target->host HTT + HTC control */
 248        {
 249                .pipenum = __cpu_to_le32(1),
 250                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 251                .nentries = __cpu_to_le32(32),
 252                .nbytes_max = __cpu_to_le32(2048),
 253                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 254                .reserved = __cpu_to_le32(0),
 255        },
 256
 257        /* CE2: target->host WMI */
 258        {
 259                .pipenum = __cpu_to_le32(2),
 260                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 261                .nentries = __cpu_to_le32(64),
 262                .nbytes_max = __cpu_to_le32(2048),
 263                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 264                .reserved = __cpu_to_le32(0),
 265        },
 266
 267        /* CE3: host->target WMI */
 268        {
 269                .pipenum = __cpu_to_le32(3),
 270                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 271                .nentries = __cpu_to_le32(32),
 272                .nbytes_max = __cpu_to_le32(2048),
 273                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 274                .reserved = __cpu_to_le32(0),
 275        },
 276
 277        /* CE4: host->target HTT */
 278        {
 279                .pipenum = __cpu_to_le32(4),
 280                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 281                .nentries = __cpu_to_le32(256),
 282                .nbytes_max = __cpu_to_le32(256),
 283                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 284                .reserved = __cpu_to_le32(0),
 285        },
 286
 287        /* NB: 50% of src nentries, since tx has 2 frags */
 288
 289        /* CE5: target->host HTT (HIF->HTT) */
 290        {
 291                .pipenum = __cpu_to_le32(5),
 292                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 293                .nentries = __cpu_to_le32(32),
 294                .nbytes_max = __cpu_to_le32(512),
 295                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 296                .reserved = __cpu_to_le32(0),
 297        },
 298
 299        /* CE6: Reserved for target autonomous hif_memcpy */
 300        {
 301                .pipenum = __cpu_to_le32(6),
 302                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 303                .nentries = __cpu_to_le32(32),
 304                .nbytes_max = __cpu_to_le32(4096),
 305                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 306                .reserved = __cpu_to_le32(0),
 307        },
 308
 309        /* CE7 used only by Host */
 310        {
 311                .pipenum = __cpu_to_le32(7),
 312                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 313                .nentries = __cpu_to_le32(0),
 314                .nbytes_max = __cpu_to_le32(0),
 315                .flags = __cpu_to_le32(0),
 316                .reserved = __cpu_to_le32(0),
 317        },
 318
 319        /* CE8 target->host packtlog */
 320        {
 321                .pipenum = __cpu_to_le32(8),
 322                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 323                .nentries = __cpu_to_le32(64),
 324                .nbytes_max = __cpu_to_le32(2048),
 325                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 326                .reserved = __cpu_to_le32(0),
 327        },
 328
 329        /* CE9 target autonomous qcache memcpy */
 330        {
 331                .pipenum = __cpu_to_le32(9),
 332                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 333                .nentries = __cpu_to_le32(32),
 334                .nbytes_max = __cpu_to_le32(2048),
 335                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 336                .reserved = __cpu_to_le32(0),
 337        },
 338
 339        /* It not necessary to send target wlan configuration for CE10 & CE11
 340         * as these CEs are not actively used in target.
 341         */
 342};
 343
 344/*
 345 * Map from service/endpoint to Copy Engine.
 346 * This table is derived from the CE_PCI TABLE, above.
 347 * It is passed to the Target at startup for use by firmware.
 348 */
 349static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 350        {
 351                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 352                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 353                __cpu_to_le32(3),
 354        },
 355        {
 356                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 357                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 358                __cpu_to_le32(2),
 359        },
 360        {
 361                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 362                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 363                __cpu_to_le32(3),
 364        },
 365        {
 366                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 367                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 368                __cpu_to_le32(2),
 369        },
 370        {
 371                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 372                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 373                __cpu_to_le32(3),
 374        },
 375        {
 376                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 377                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 378                __cpu_to_le32(2),
 379        },
 380        {
 381                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 382                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 383                __cpu_to_le32(3),
 384        },
 385        {
 386                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 387                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 388                __cpu_to_le32(2),
 389        },
 390        {
 391                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 392                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 393                __cpu_to_le32(3),
 394        },
 395        {
 396                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 397                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 398                __cpu_to_le32(2),
 399        },
 400        {
 401                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 402                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 403                __cpu_to_le32(0),
 404        },
 405        {
 406                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 407                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 408                __cpu_to_le32(1),
 409        },
 410        { /* not used */
 411                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 412                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 413                __cpu_to_le32(0),
 414        },
 415        { /* not used */
 416                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 417                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 418                __cpu_to_le32(1),
 419        },
 420        {
 421                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 422                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 423                __cpu_to_le32(4),
 424        },
 425        {
 426                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 427                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 428                __cpu_to_le32(5),
 429        },
 430
 431        /* (Additions here) */
 432
 433        { /* must be last */
 434                __cpu_to_le32(0),
 435                __cpu_to_le32(0),
 436                __cpu_to_le32(0),
 437        },
 438};
 439
 440static bool ath10k_pci_is_awake(struct ath10k *ar)
 441{
 442        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 443        u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 444                           RTC_STATE_ADDRESS);
 445
 446        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 447}
 448
 449static void __ath10k_pci_wake(struct ath10k *ar)
 450{
 451        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 452
 453        lockdep_assert_held(&ar_pci->ps_lock);
 454
 455        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 456                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 457
 458        iowrite32(PCIE_SOC_WAKE_V_MASK,
 459                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 460                  PCIE_SOC_WAKE_ADDRESS);
 461}
 462
 463static void __ath10k_pci_sleep(struct ath10k *ar)
 464{
 465        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 466
 467        lockdep_assert_held(&ar_pci->ps_lock);
 468
 469        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 470                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 471
 472        iowrite32(PCIE_SOC_WAKE_RESET,
 473                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 474                  PCIE_SOC_WAKE_ADDRESS);
 475        ar_pci->ps_awake = false;
 476}
 477
 478static int ath10k_pci_wake_wait(struct ath10k *ar)
 479{
 480        int tot_delay = 0;
 481        int curr_delay = 5;
 482
 483        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 484                if (ath10k_pci_is_awake(ar)) {
 485                        if (tot_delay > PCIE_WAKE_LATE_US)
 486                                ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
 487                                            tot_delay / 1000);
 488                        return 0;
 489                }
 490
 491                udelay(curr_delay);
 492                tot_delay += curr_delay;
 493
 494                if (curr_delay < 50)
 495                        curr_delay += 5;
 496        }
 497
 498        return -ETIMEDOUT;
 499}
 500
 501static int ath10k_pci_force_wake(struct ath10k *ar)
 502{
 503        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 504        unsigned long flags;
 505        int ret = 0;
 506
 507        if (ar_pci->pci_ps)
 508                return ret;
 509
 510        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 511
 512        if (!ar_pci->ps_awake) {
 513                iowrite32(PCIE_SOC_WAKE_V_MASK,
 514                          ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 515                          PCIE_SOC_WAKE_ADDRESS);
 516
 517                ret = ath10k_pci_wake_wait(ar);
 518                if (ret == 0)
 519                        ar_pci->ps_awake = true;
 520        }
 521
 522        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 523
 524        return ret;
 525}
 526
 527static void ath10k_pci_force_sleep(struct ath10k *ar)
 528{
 529        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 530        unsigned long flags;
 531
 532        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 533
 534        iowrite32(PCIE_SOC_WAKE_RESET,
 535                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 536                  PCIE_SOC_WAKE_ADDRESS);
 537        ar_pci->ps_awake = false;
 538
 539        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 540}
 541
 542static int ath10k_pci_wake(struct ath10k *ar)
 543{
 544        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 545        unsigned long flags;
 546        int ret = 0;
 547
 548        if (ar_pci->pci_ps == 0)
 549                return ret;
 550
 551        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 552
 553        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 554                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 555
 556        /* This function can be called very frequently. To avoid excessive
 557         * CPU stalls for MMIO reads use a cache var to hold the device state.
 558         */
 559        if (!ar_pci->ps_awake) {
 560                __ath10k_pci_wake(ar);
 561
 562                ret = ath10k_pci_wake_wait(ar);
 563                if (ret == 0)
 564                        ar_pci->ps_awake = true;
 565        }
 566
 567        if (ret == 0) {
 568                ar_pci->ps_wake_refcount++;
 569                WARN_ON(ar_pci->ps_wake_refcount == 0);
 570        }
 571
 572        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 573
 574        return ret;
 575}
 576
 577static void ath10k_pci_sleep(struct ath10k *ar)
 578{
 579        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 580        unsigned long flags;
 581
 582        if (ar_pci->pci_ps == 0)
 583                return;
 584
 585        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 586
 587        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 588                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 589
 590        if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 591                goto skip;
 592
 593        ar_pci->ps_wake_refcount--;
 594
 595        mod_timer(&ar_pci->ps_timer, jiffies +
 596                  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 597
 598skip:
 599        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 600}
 601
 602static void ath10k_pci_ps_timer(struct timer_list *t)
 603{
 604        struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
 605        struct ath10k *ar = ar_pci->ar;
 606        unsigned long flags;
 607
 608        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 609
 610        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 611                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 612
 613        if (ar_pci->ps_wake_refcount > 0)
 614                goto skip;
 615
 616        __ath10k_pci_sleep(ar);
 617
 618skip:
 619        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 620}
 621
 622static void ath10k_pci_sleep_sync(struct ath10k *ar)
 623{
 624        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 625        unsigned long flags;
 626
 627        if (ar_pci->pci_ps == 0) {
 628                ath10k_pci_force_sleep(ar);
 629                return;
 630        }
 631
 632        del_timer_sync(&ar_pci->ps_timer);
 633
 634        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 635        WARN_ON(ar_pci->ps_wake_refcount > 0);
 636        __ath10k_pci_sleep(ar);
 637        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 638}
 639
 640static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 641{
 642        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 643        int ret;
 644
 645        if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 646                ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 647                            offset, offset + sizeof(value), ar_pci->mem_len);
 648                return;
 649        }
 650
 651        ret = ath10k_pci_wake(ar);
 652        if (ret) {
 653                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 654                            value, offset, ret);
 655                return;
 656        }
 657
 658        iowrite32(value, ar_pci->mem + offset);
 659        ath10k_pci_sleep(ar);
 660}
 661
 662static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 663{
 664        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 665        u32 val;
 666        int ret;
 667
 668        if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 669                ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 670                            offset, offset + sizeof(val), ar_pci->mem_len);
 671                return 0;
 672        }
 673
 674        ret = ath10k_pci_wake(ar);
 675        if (ret) {
 676                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 677                            offset, ret);
 678                return 0xffffffff;
 679        }
 680
 681        val = ioread32(ar_pci->mem + offset);
 682        ath10k_pci_sleep(ar);
 683
 684        return val;
 685}
 686
 687inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 688{
 689        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 690
 691        ce->bus_ops->write32(ar, offset, value);
 692}
 693
 694inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 695{
 696        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 697
 698        return ce->bus_ops->read32(ar, offset);
 699}
 700
 701u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 702{
 703        return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 704}
 705
 706void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 707{
 708        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 709}
 710
 711u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 712{
 713        return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 714}
 715
 716void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 717{
 718        ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 719}
 720
 721bool ath10k_pci_irq_pending(struct ath10k *ar)
 722{
 723        u32 cause;
 724
 725        /* Check if the shared legacy irq is for us */
 726        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 727                                  PCIE_INTR_CAUSE_ADDRESS);
 728        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 729                return true;
 730
 731        return false;
 732}
 733
 734void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 735{
 736        /* IMPORTANT: INTR_CLR register has to be set after
 737         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 738         * really cleared.
 739         */
 740        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 741                           0);
 742        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 743                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 744
 745        /* IMPORTANT: this extra read transaction is required to
 746         * flush the posted write buffer.
 747         */
 748        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 749                                PCIE_INTR_ENABLE_ADDRESS);
 750}
 751
 752void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 753{
 754        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 755                           PCIE_INTR_ENABLE_ADDRESS,
 756                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 757
 758        /* IMPORTANT: this extra read transaction is required to
 759         * flush the posted write buffer.
 760         */
 761        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 762                                PCIE_INTR_ENABLE_ADDRESS);
 763}
 764
 765static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 766{
 767        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 768
 769        if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 770                return "msi";
 771
 772        return "legacy";
 773}
 774
 775static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 776{
 777        struct ath10k *ar = pipe->hif_ce_state;
 778        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 779        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 780        struct sk_buff *skb;
 781        dma_addr_t paddr;
 782        int ret;
 783
 784        skb = dev_alloc_skb(pipe->buf_sz);
 785        if (!skb)
 786                return -ENOMEM;
 787
 788        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 789
 790        paddr = dma_map_single(ar->dev, skb->data,
 791                               skb->len + skb_tailroom(skb),
 792                               DMA_FROM_DEVICE);
 793        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 794                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 795                dev_kfree_skb_any(skb);
 796                return -EIO;
 797        }
 798
 799        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 800
 801        spin_lock_bh(&ce->ce_lock);
 802        ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
 803        spin_unlock_bh(&ce->ce_lock);
 804        if (ret) {
 805                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 806                                 DMA_FROM_DEVICE);
 807                dev_kfree_skb_any(skb);
 808                return ret;
 809        }
 810
 811        return 0;
 812}
 813
 814static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 815{
 816        struct ath10k *ar = pipe->hif_ce_state;
 817        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 818        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 819        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 820        int ret, num;
 821
 822        if (pipe->buf_sz == 0)
 823                return;
 824
 825        if (!ce_pipe->dest_ring)
 826                return;
 827
 828        spin_lock_bh(&ce->ce_lock);
 829        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 830        spin_unlock_bh(&ce->ce_lock);
 831
 832        while (num >= 0) {
 833                ret = __ath10k_pci_rx_post_buf(pipe);
 834                if (ret) {
 835                        if (ret == -ENOSPC)
 836                                break;
 837                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 838                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 839                                  ATH10K_PCI_RX_POST_RETRY_MS);
 840                        break;
 841                }
 842                num--;
 843        }
 844}
 845
 846void ath10k_pci_rx_post(struct ath10k *ar)
 847{
 848        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 849        int i;
 850
 851        for (i = 0; i < CE_COUNT; i++)
 852                ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 853}
 854
 855void ath10k_pci_rx_replenish_retry(struct timer_list *t)
 856{
 857        struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
 858        struct ath10k *ar = ar_pci->ar;
 859
 860        ath10k_pci_rx_post(ar);
 861}
 862
 863static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 864{
 865        u32 val = 0, region = addr & 0xfffff;
 866
 867        val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 868                                 & 0x7ff) << 21;
 869        val |= 0x100000 | region;
 870        return val;
 871}
 872
 873/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
 874 * Support to access target space below 1M for qca6174 and qca9377.
 875 * If target space is below 1M, the bit[20] of converted CE addr is 0.
 876 * Otherwise bit[20] of converted CE addr is 1.
 877 */
 878static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 879{
 880        u32 val = 0, region = addr & 0xfffff;
 881
 882        val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
 883                                 & 0x7ff) << 21;
 884        val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
 885        return val;
 886}
 887
 888static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 889{
 890        u32 val = 0, region = addr & 0xfffff;
 891
 892        val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 893        val |= 0x100000 | region;
 894        return val;
 895}
 896
 897static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 898{
 899        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 900
 901        if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
 902                return -ENOTSUPP;
 903
 904        return ar_pci->targ_cpu_to_ce_addr(ar, addr);
 905}
 906
 907/*
 908 * Diagnostic read/write access is provided for startup/config/debug usage.
 909 * Caller must guarantee proper alignment, when applicable, and single user
 910 * at any moment.
 911 */
 912static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 913                                    int nbytes)
 914{
 915        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 916        struct ath10k_ce *ce = ath10k_ce_priv(ar);
 917        int ret = 0;
 918        u32 *buf;
 919        unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
 920        struct ath10k_ce_pipe *ce_diag;
 921        /* Host buffer address in CE space */
 922        u32 ce_data;
 923        dma_addr_t ce_data_base = 0;
 924        void *data_buf = NULL;
 925        int i;
 926
 927        spin_lock_bh(&ce->ce_lock);
 928
 929        ce_diag = ar_pci->ce_diag;
 930
 931        /*
 932         * Allocate a temporary bounce buffer to hold caller's data
 933         * to be DMA'ed from Target. This guarantees
 934         *   1) 4-byte alignment
 935         *   2) Buffer in DMA-able space
 936         */
 937        alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 938
 939        data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
 940                                                       alloc_nbytes,
 941                                                       &ce_data_base,
 942                                                       GFP_ATOMIC);
 943
 944        if (!data_buf) {
 945                ret = -ENOMEM;
 946                goto done;
 947        }
 948
 949        /* The address supplied by the caller is in the
 950         * Target CPU virtual address space.
 951         *
 952         * In order to use this address with the diagnostic CE,
 953         * convert it from Target CPU virtual address space
 954         * to CE address space
 955         */
 956        address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 957
 958        remaining_bytes = nbytes;
 959        ce_data = ce_data_base;
 960        while (remaining_bytes) {
 961                nbytes = min_t(unsigned int, remaining_bytes,
 962                               DIAG_TRANSFER_LIMIT);
 963
 964                ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 965                if (ret != 0)
 966                        goto done;
 967
 968                /* Request CE to send from Target(!) address to Host buffer */
 969                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 970                                            0);
 971                if (ret)
 972                        goto done;
 973
 974                i = 0;
 975                while (ath10k_ce_completed_send_next_nolock(ce_diag,
 976                                                            NULL) != 0) {
 977                        udelay(DIAG_ACCESS_CE_WAIT_US);
 978                        i += DIAG_ACCESS_CE_WAIT_US;
 979
 980                        if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 981                                ret = -EBUSY;
 982                                goto done;
 983                        }
 984                }
 985
 986                i = 0;
 987                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
 988                                                            (void **)&buf,
 989                                                            &completed_nbytes)
 990                                                                != 0) {
 991                        udelay(DIAG_ACCESS_CE_WAIT_US);
 992                        i += DIAG_ACCESS_CE_WAIT_US;
 993
 994                        if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
 995                                ret = -EBUSY;
 996                                goto done;
 997                        }
 998                }
 999
1000                if (nbytes != completed_nbytes) {
1001                        ret = -EIO;
1002                        goto done;
1003                }
1004
1005                if (*buf != ce_data) {
1006                        ret = -EIO;
1007                        goto done;
1008                }
1009
1010                remaining_bytes -= nbytes;
1011                memcpy(data, data_buf, nbytes);
1012
1013                address += nbytes;
1014                data += nbytes;
1015        }
1016
1017done:
1018
1019        if (data_buf)
1020                dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1021                                  ce_data_base);
1022
1023        spin_unlock_bh(&ce->ce_lock);
1024
1025        return ret;
1026}
1027
1028static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1029{
1030        __le32 val = 0;
1031        int ret;
1032
1033        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1034        *value = __le32_to_cpu(val);
1035
1036        return ret;
1037}
1038
1039static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1040                                     u32 src, u32 len)
1041{
1042        u32 host_addr, addr;
1043        int ret;
1044
1045        host_addr = host_interest_item_address(src);
1046
1047        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1048        if (ret != 0) {
1049                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1050                            src, ret);
1051                return ret;
1052        }
1053
1054        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1055        if (ret != 0) {
1056                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1057                            addr, len, ret);
1058                return ret;
1059        }
1060
1061        return 0;
1062}
1063
1064#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
1065        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1066
1067int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1068                              const void *data, int nbytes)
1069{
1070        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1071        struct ath10k_ce *ce = ath10k_ce_priv(ar);
1072        int ret = 0;
1073        u32 *buf;
1074        unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1075        struct ath10k_ce_pipe *ce_diag;
1076        void *data_buf = NULL;
1077        dma_addr_t ce_data_base = 0;
1078        int i;
1079
1080        spin_lock_bh(&ce->ce_lock);
1081
1082        ce_diag = ar_pci->ce_diag;
1083
1084        /*
1085         * Allocate a temporary bounce buffer to hold caller's data
1086         * to be DMA'ed to Target. This guarantees
1087         *   1) 4-byte alignment
1088         *   2) Buffer in DMA-able space
1089         */
1090        alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1091
1092        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1093                                                       alloc_nbytes,
1094                                                       &ce_data_base,
1095                                                       GFP_ATOMIC);
1096        if (!data_buf) {
1097                ret = -ENOMEM;
1098                goto done;
1099        }
1100
1101        /*
1102         * The address supplied by the caller is in the
1103         * Target CPU virtual address space.
1104         *
1105         * In order to use this address with the diagnostic CE,
1106         * convert it from
1107         *    Target CPU virtual address space
1108         * to
1109         *    CE address space
1110         */
1111        address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1112
1113        remaining_bytes = nbytes;
1114        while (remaining_bytes) {
1115                /* FIXME: check cast */
1116                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1117
1118                /* Copy caller's data to allocated DMA buf */
1119                memcpy(data_buf, data, nbytes);
1120
1121                /* Set up to receive directly into Target(!) address */
1122                ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
1123                if (ret != 0)
1124                        goto done;
1125
1126                /*
1127                 * Request CE to send caller-supplied data that
1128                 * was copied to bounce buffer to Target(!) address.
1129                 */
1130                ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
1131                                            nbytes, 0, 0);
1132                if (ret != 0)
1133                        goto done;
1134
1135                i = 0;
1136                while (ath10k_ce_completed_send_next_nolock(ce_diag,
1137                                                            NULL) != 0) {
1138                        udelay(DIAG_ACCESS_CE_WAIT_US);
1139                        i += DIAG_ACCESS_CE_WAIT_US;
1140
1141                        if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1142                                ret = -EBUSY;
1143                                goto done;
1144                        }
1145                }
1146
1147                i = 0;
1148                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1149                                                            (void **)&buf,
1150                                                            &completed_nbytes)
1151                                                                != 0) {
1152                        udelay(DIAG_ACCESS_CE_WAIT_US);
1153                        i += DIAG_ACCESS_CE_WAIT_US;
1154
1155                        if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1156                                ret = -EBUSY;
1157                                goto done;
1158                        }
1159                }
1160
1161                if (nbytes != completed_nbytes) {
1162                        ret = -EIO;
1163                        goto done;
1164                }
1165
1166                if (*buf != address) {
1167                        ret = -EIO;
1168                        goto done;
1169                }
1170
1171                remaining_bytes -= nbytes;
1172                address += nbytes;
1173                data += nbytes;
1174        }
1175
1176done:
1177        if (data_buf) {
1178                dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1179                                  ce_data_base);
1180        }
1181
1182        if (ret != 0)
1183                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1184                            address, ret);
1185
1186        spin_unlock_bh(&ce->ce_lock);
1187
1188        return ret;
1189}
1190
1191static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1192{
1193        __le32 val = __cpu_to_le32(value);
1194
1195        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1196}
1197
1198/* Called by lower (CE) layer when a send to Target completes. */
1199static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1200{
1201        struct ath10k *ar = ce_state->ar;
1202        struct sk_buff_head list;
1203        struct sk_buff *skb;
1204
1205        __skb_queue_head_init(&list);
1206        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1207                /* no need to call tx completion for NULL pointers */
1208                if (skb == NULL)
1209                        continue;
1210
1211                __skb_queue_tail(&list, skb);
1212        }
1213
1214        while ((skb = __skb_dequeue(&list)))
1215                ath10k_htc_tx_completion_handler(ar, skb);
1216}
1217
1218static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1219                                     void (*callback)(struct ath10k *ar,
1220                                                      struct sk_buff *skb))
1221{
1222        struct ath10k *ar = ce_state->ar;
1223        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1224        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1225        struct sk_buff *skb;
1226        struct sk_buff_head list;
1227        void *transfer_context;
1228        unsigned int nbytes, max_nbytes;
1229
1230        __skb_queue_head_init(&list);
1231        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1232                                             &nbytes) == 0) {
1233                skb = transfer_context;
1234                max_nbytes = skb->len + skb_tailroom(skb);
1235                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1236                                 max_nbytes, DMA_FROM_DEVICE);
1237
1238                if (unlikely(max_nbytes < nbytes)) {
1239                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1240                                    nbytes, max_nbytes);
1241                        dev_kfree_skb_any(skb);
1242                        continue;
1243                }
1244
1245                skb_put(skb, nbytes);
1246                __skb_queue_tail(&list, skb);
1247        }
1248
1249        while ((skb = __skb_dequeue(&list))) {
1250                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1251                           ce_state->id, skb->len);
1252                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1253                                skb->data, skb->len);
1254
1255                callback(ar, skb);
1256        }
1257
1258        ath10k_pci_rx_post_pipe(pipe_info);
1259}
1260
1261static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1262                                         void (*callback)(struct ath10k *ar,
1263                                                          struct sk_buff *skb))
1264{
1265        struct ath10k *ar = ce_state->ar;
1266        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1267        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1268        struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1269        struct sk_buff *skb;
1270        struct sk_buff_head list;
1271        void *transfer_context;
1272        unsigned int nbytes, max_nbytes, nentries;
1273        int orig_len;
1274
1275        /* No need to aquire ce_lock for CE5, since this is the only place CE5
1276         * is processed other than init and deinit. Before releasing CE5
1277         * buffers, interrupts are disabled. Thus CE5 access is serialized.
1278         */
1279        __skb_queue_head_init(&list);
1280        while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1281                                                    &nbytes) == 0) {
1282                skb = transfer_context;
1283                max_nbytes = skb->len + skb_tailroom(skb);
1284
1285                if (unlikely(max_nbytes < nbytes)) {
1286                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1287                                    nbytes, max_nbytes);
1288                        continue;
1289                }
1290
1291                dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1292                                        max_nbytes, DMA_FROM_DEVICE);
1293                skb_put(skb, nbytes);
1294                __skb_queue_tail(&list, skb);
1295        }
1296
1297        nentries = skb_queue_len(&list);
1298        while ((skb = __skb_dequeue(&list))) {
1299                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1300                           ce_state->id, skb->len);
1301                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1302                                skb->data, skb->len);
1303
1304                orig_len = skb->len;
1305                callback(ar, skb);
1306                skb_push(skb, orig_len - skb->len);
1307                skb_reset_tail_pointer(skb);
1308                skb_trim(skb, 0);
1309
1310                /*let device gain the buffer again*/
1311                dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1312                                           skb->len + skb_tailroom(skb),
1313                                           DMA_FROM_DEVICE);
1314        }
1315        ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1316}
1317
1318/* Called by lower (CE) layer when data is received from the Target. */
1319static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1320{
1321        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1322}
1323
1324static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1325{
1326        /* CE4 polling needs to be done whenever CE pipe which transports
1327         * HTT Rx (target->host) is processed.
1328         */
1329        ath10k_ce_per_engine_service(ce_state->ar, 4);
1330
1331        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1332}
1333
1334/* Called by lower (CE) layer when data is received from the Target.
1335 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1336 */
1337static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1338{
1339        ath10k_pci_process_rx_cb(ce_state,
1340                                 ath10k_htt_rx_pktlog_completion_handler);
1341}
1342
1343/* Called by lower (CE) layer when a send to HTT Target completes. */
1344static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1345{
1346        struct ath10k *ar = ce_state->ar;
1347        struct sk_buff *skb;
1348
1349        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1350                /* no need to call tx completion for NULL pointers */
1351                if (!skb)
1352                        continue;
1353
1354                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1355                                 skb->len, DMA_TO_DEVICE);
1356                ath10k_htt_hif_tx_complete(ar, skb);
1357        }
1358}
1359
1360static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1361{
1362        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1363        ath10k_htt_t2h_msg_handler(ar, skb);
1364}
1365
1366/* Called by lower (CE) layer when HTT data is received from the Target. */
1367static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1368{
1369        /* CE4 polling needs to be done whenever CE pipe which transports
1370         * HTT Rx (target->host) is processed.
1371         */
1372        ath10k_ce_per_engine_service(ce_state->ar, 4);
1373
1374        ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1375}
1376
1377int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1378                         struct ath10k_hif_sg_item *items, int n_items)
1379{
1380        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1381        struct ath10k_ce *ce = ath10k_ce_priv(ar);
1382        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1383        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1384        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1385        unsigned int nentries_mask;
1386        unsigned int sw_index;
1387        unsigned int write_index;
1388        int err, i = 0;
1389
1390        spin_lock_bh(&ce->ce_lock);
1391
1392        nentries_mask = src_ring->nentries_mask;
1393        sw_index = src_ring->sw_index;
1394        write_index = src_ring->write_index;
1395
1396        if (unlikely(CE_RING_DELTA(nentries_mask,
1397                                   write_index, sw_index - 1) < n_items)) {
1398                err = -ENOBUFS;
1399                goto err;
1400        }
1401
1402        for (i = 0; i < n_items - 1; i++) {
1403                ath10k_dbg(ar, ATH10K_DBG_PCI,
1404                           "pci tx item %d paddr %pad len %d n_items %d\n",
1405                           i, &items[i].paddr, items[i].len, n_items);
1406                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1407                                items[i].vaddr, items[i].len);
1408
1409                err = ath10k_ce_send_nolock(ce_pipe,
1410                                            items[i].transfer_context,
1411                                            items[i].paddr,
1412                                            items[i].len,
1413                                            items[i].transfer_id,
1414                                            CE_SEND_FLAG_GATHER);
1415                if (err)
1416                        goto err;
1417        }
1418
1419        /* `i` is equal to `n_items -1` after for() */
1420
1421        ath10k_dbg(ar, ATH10K_DBG_PCI,
1422                   "pci tx item %d paddr %pad len %d n_items %d\n",
1423                   i, &items[i].paddr, items[i].len, n_items);
1424        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1425                        items[i].vaddr, items[i].len);
1426
1427        err = ath10k_ce_send_nolock(ce_pipe,
1428                                    items[i].transfer_context,
1429                                    items[i].paddr,
1430                                    items[i].len,
1431                                    items[i].transfer_id,
1432                                    0);
1433        if (err)
1434                goto err;
1435
1436        spin_unlock_bh(&ce->ce_lock);
1437        return 0;
1438
1439err:
1440        for (; i > 0; i--)
1441                __ath10k_ce_send_revert(ce_pipe);
1442
1443        spin_unlock_bh(&ce->ce_lock);
1444        return err;
1445}
1446
1447int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1448                             size_t buf_len)
1449{
1450        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1451}
1452
1453u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1454{
1455        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1456
1457        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1458
1459        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1460}
1461
1462static void ath10k_pci_dump_registers(struct ath10k *ar,
1463                                      struct ath10k_fw_crash_data *crash_data)
1464{
1465        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1466        int i, ret;
1467
1468        lockdep_assert_held(&ar->data_lock);
1469
1470        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1471                                      hi_failure_state,
1472                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1473        if (ret) {
1474                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1475                return;
1476        }
1477
1478        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1479
1480        ath10k_err(ar, "firmware register dump:\n");
1481        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1482                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1483                           i,
1484                           __le32_to_cpu(reg_dump_values[i]),
1485                           __le32_to_cpu(reg_dump_values[i + 1]),
1486                           __le32_to_cpu(reg_dump_values[i + 2]),
1487                           __le32_to_cpu(reg_dump_values[i + 3]));
1488
1489        if (!crash_data)
1490                return;
1491
1492        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1493                crash_data->registers[i] = reg_dump_values[i];
1494}
1495
1496static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1497                                          const struct ath10k_mem_region *mem_region,
1498                                          u8 *buf, size_t buf_len)
1499{
1500        const struct ath10k_mem_section *cur_section, *next_section;
1501        unsigned int count, section_size, skip_size;
1502        int ret, i, j;
1503
1504        if (!mem_region || !buf)
1505                return 0;
1506
1507        cur_section = &mem_region->section_table.sections[0];
1508
1509        if (mem_region->start > cur_section->start) {
1510                ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1511                            mem_region->start, cur_section->start);
1512                return 0;
1513        }
1514
1515        skip_size = cur_section->start - mem_region->start;
1516
1517        /* fill the gap between the first register section and register
1518         * start address
1519         */
1520        for (i = 0; i < skip_size; i++) {
1521                *buf = ATH10K_MAGIC_NOT_COPIED;
1522                buf++;
1523        }
1524
1525        count = 0;
1526
1527        for (i = 0; cur_section != NULL; i++) {
1528                section_size = cur_section->end - cur_section->start;
1529
1530                if (section_size <= 0) {
1531                        ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1532                                    cur_section->start,
1533                                    cur_section->end);
1534                        break;
1535                }
1536
1537                if ((i + 1) == mem_region->section_table.size) {
1538                        /* last section */
1539                        next_section = NULL;
1540                        skip_size = 0;
1541                } else {
1542                        next_section = cur_section + 1;
1543
1544                        if (cur_section->end > next_section->start) {
1545                                ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1546                                            next_section->start,
1547                                            cur_section->end);
1548                                break;
1549                        }
1550
1551                        skip_size = next_section->start - cur_section->end;
1552                }
1553
1554                if (buf_len < (skip_size + section_size)) {
1555                        ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1556                        break;
1557                }
1558
1559                buf_len -= skip_size + section_size;
1560
1561                /* read section to dest memory */
1562                ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1563                                               buf, section_size);
1564                if (ret) {
1565                        ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1566                                    cur_section->start, ret);
1567                        break;
1568                }
1569
1570                buf += section_size;
1571                count += section_size;
1572
1573                /* fill in the gap between this section and the next */
1574                for (j = 0; j < skip_size; j++) {
1575                        *buf = ATH10K_MAGIC_NOT_COPIED;
1576                        buf++;
1577                }
1578
1579                count += skip_size;
1580
1581                if (!next_section)
1582                        /* this was the last section */
1583                        break;
1584
1585                cur_section = next_section;
1586        }
1587
1588        return count;
1589}
1590
1591static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1592{
1593        u32 val;
1594
1595        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1596                           FW_RAM_CONFIG_ADDRESS, config);
1597
1598        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1599                                FW_RAM_CONFIG_ADDRESS);
1600        if (val != config) {
1601                ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1602                            val, config);
1603                return -EIO;
1604        }
1605
1606        return 0;
1607}
1608
1609/* if an error happened returns < 0, otherwise the length */
1610static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1611                                       const struct ath10k_mem_region *region,
1612                                       u8 *buf)
1613{
1614        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1615        u32 base_addr, i;
1616
1617        base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1618        base_addr += region->start;
1619
1620        for (i = 0; i < region->len; i += 4) {
1621                iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1622                *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1623        }
1624
1625        return region->len;
1626}
1627
1628/* if an error happened returns < 0, otherwise the length */
1629static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1630                                      const struct ath10k_mem_region *region,
1631                                      u8 *buf)
1632{
1633        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1634        u32 i;
1635
1636        for (i = 0; i < region->len; i += 4)
1637                *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1638
1639        return region->len;
1640}
1641
1642/* if an error happened returns < 0, otherwise the length */
1643static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1644                                          const struct ath10k_mem_region *current_region,
1645                                          u8 *buf)
1646{
1647        int ret;
1648
1649        if (current_region->section_table.size > 0)
1650                /* Copy each section individually. */
1651                return ath10k_pci_dump_memory_section(ar,
1652                                                      current_region,
1653                                                      buf,
1654                                                      current_region->len);
1655
1656        /* No individiual memory sections defined so we can
1657         * copy the entire memory region.
1658         */
1659        ret = ath10k_pci_diag_read_mem(ar,
1660                                       current_region->start,
1661                                       buf,
1662                                       current_region->len);
1663        if (ret) {
1664                ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1665                            current_region->name, ret);
1666                return ret;
1667        }
1668
1669        return current_region->len;
1670}
1671
1672static void ath10k_pci_dump_memory(struct ath10k *ar,
1673                                   struct ath10k_fw_crash_data *crash_data)
1674{
1675        const struct ath10k_hw_mem_layout *mem_layout;
1676        const struct ath10k_mem_region *current_region;
1677        struct ath10k_dump_ram_data_hdr *hdr;
1678        u32 count, shift;
1679        size_t buf_len;
1680        int ret, i;
1681        u8 *buf;
1682
1683        lockdep_assert_held(&ar->data_lock);
1684
1685        if (!crash_data)
1686                return;
1687
1688        mem_layout = ath10k_coredump_get_mem_layout(ar);
1689        if (!mem_layout)
1690                return;
1691
1692        current_region = &mem_layout->region_table.regions[0];
1693
1694        buf = crash_data->ramdump_buf;
1695        buf_len = crash_data->ramdump_buf_len;
1696
1697        memset(buf, 0, buf_len);
1698
1699        for (i = 0; i < mem_layout->region_table.size; i++) {
1700                count = 0;
1701
1702                if (current_region->len > buf_len) {
1703                        ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1704                                    current_region->name,
1705                                    current_region->len,
1706                                    buf_len);
1707                        break;
1708                }
1709
1710                /* To get IRAM dump, the host driver needs to switch target
1711                 * ram config from DRAM to IRAM.
1712                 */
1713                if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1714                    current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1715                        shift = current_region->start >> 20;
1716
1717                        ret = ath10k_pci_set_ram_config(ar, shift);
1718                        if (ret) {
1719                                ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1720                                            current_region->name, ret);
1721                                break;
1722                        }
1723                }
1724
1725                /* Reserve space for the header. */
1726                hdr = (void *)buf;
1727                buf += sizeof(*hdr);
1728                buf_len -= sizeof(*hdr);
1729
1730                switch (current_region->type) {
1731                case ATH10K_MEM_REGION_TYPE_IOSRAM:
1732                        count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1733                        break;
1734                case ATH10K_MEM_REGION_TYPE_IOREG:
1735                        count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1736                        break;
1737                default:
1738                        ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1739                        if (ret < 0)
1740                                break;
1741
1742                        count = ret;
1743                        break;
1744                }
1745
1746                hdr->region_type = cpu_to_le32(current_region->type);
1747                hdr->start = cpu_to_le32(current_region->start);
1748                hdr->length = cpu_to_le32(count);
1749
1750                if (count == 0)
1751                        /* Note: the header remains, just with zero length. */
1752                        break;
1753
1754                buf += count;
1755                buf_len -= count;
1756
1757                current_region++;
1758        }
1759}
1760
1761static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1762{
1763        struct ath10k_fw_crash_data *crash_data;
1764        char guid[UUID_STRING_LEN + 1];
1765
1766        spin_lock_bh(&ar->data_lock);
1767
1768        ar->stats.fw_crash_counter++;
1769
1770        crash_data = ath10k_coredump_new(ar);
1771
1772        if (crash_data)
1773                scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1774        else
1775                scnprintf(guid, sizeof(guid), "n/a");
1776
1777        ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1778        ath10k_print_driver_info(ar);
1779        ath10k_pci_dump_registers(ar, crash_data);
1780        ath10k_ce_dump_registers(ar, crash_data);
1781        ath10k_pci_dump_memory(ar, crash_data);
1782
1783        spin_unlock_bh(&ar->data_lock);
1784
1785        queue_work(ar->workqueue, &ar->restart_work);
1786}
1787
1788void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1789                                        int force)
1790{
1791        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1792
1793        if (!force) {
1794                int resources;
1795                /*
1796                 * Decide whether to actually poll for completions, or just
1797                 * wait for a later chance.
1798                 * If there seem to be plenty of resources left, then just wait
1799                 * since checking involves reading a CE register, which is a
1800                 * relatively expensive operation.
1801                 */
1802                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1803
1804                /*
1805                 * If at least 50% of the total resources are still available,
1806                 * don't bother checking again yet.
1807                 */
1808                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1809                        return;
1810        }
1811        ath10k_ce_per_engine_service(ar, pipe);
1812}
1813
1814static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1815{
1816        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1817
1818        del_timer_sync(&ar_pci->rx_post_retry);
1819}
1820
1821int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1822                                       u8 *ul_pipe, u8 *dl_pipe)
1823{
1824        const struct service_to_pipe *entry;
1825        bool ul_set = false, dl_set = false;
1826        int i;
1827
1828        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1829
1830        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1831                entry = &target_service_to_ce_map_wlan[i];
1832
1833                if (__le32_to_cpu(entry->service_id) != service_id)
1834                        continue;
1835
1836                switch (__le32_to_cpu(entry->pipedir)) {
1837                case PIPEDIR_NONE:
1838                        break;
1839                case PIPEDIR_IN:
1840                        WARN_ON(dl_set);
1841                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1842                        dl_set = true;
1843                        break;
1844                case PIPEDIR_OUT:
1845                        WARN_ON(ul_set);
1846                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1847                        ul_set = true;
1848                        break;
1849                case PIPEDIR_INOUT:
1850                        WARN_ON(dl_set);
1851                        WARN_ON(ul_set);
1852                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1853                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1854                        dl_set = true;
1855                        ul_set = true;
1856                        break;
1857                }
1858        }
1859
1860        if (!ul_set || !dl_set)
1861                return -ENOENT;
1862
1863        return 0;
1864}
1865
1866void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1867                                     u8 *ul_pipe, u8 *dl_pipe)
1868{
1869        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1870
1871        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1872                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1873                                                 ul_pipe, dl_pipe);
1874}
1875
1876void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1877{
1878        u32 val;
1879
1880        switch (ar->hw_rev) {
1881        case ATH10K_HW_QCA988X:
1882        case ATH10K_HW_QCA9887:
1883        case ATH10K_HW_QCA6174:
1884        case ATH10K_HW_QCA9377:
1885                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1886                                        CORE_CTRL_ADDRESS);
1887                val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1888                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1889                                   CORE_CTRL_ADDRESS, val);
1890                break;
1891        case ATH10K_HW_QCA99X0:
1892        case ATH10K_HW_QCA9984:
1893        case ATH10K_HW_QCA9888:
1894        case ATH10K_HW_QCA4019:
1895                /* TODO: Find appropriate register configuration for QCA99X0
1896                 *  to mask irq/MSI.
1897                 */
1898                break;
1899        case ATH10K_HW_WCN3990:
1900                break;
1901        }
1902}
1903
1904static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1905{
1906        u32 val;
1907
1908        switch (ar->hw_rev) {
1909        case ATH10K_HW_QCA988X:
1910        case ATH10K_HW_QCA9887:
1911        case ATH10K_HW_QCA6174:
1912        case ATH10K_HW_QCA9377:
1913                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1914                                        CORE_CTRL_ADDRESS);
1915                val |= CORE_CTRL_PCIE_REG_31_MASK;
1916                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1917                                   CORE_CTRL_ADDRESS, val);
1918                break;
1919        case ATH10K_HW_QCA99X0:
1920        case ATH10K_HW_QCA9984:
1921        case ATH10K_HW_QCA9888:
1922        case ATH10K_HW_QCA4019:
1923                /* TODO: Find appropriate register configuration for QCA99X0
1924                 *  to unmask irq/MSI.
1925                 */
1926                break;
1927        case ATH10K_HW_WCN3990:
1928                break;
1929        }
1930}
1931
1932static void ath10k_pci_irq_disable(struct ath10k *ar)
1933{
1934        ath10k_ce_disable_interrupts(ar);
1935        ath10k_pci_disable_and_clear_legacy_irq(ar);
1936        ath10k_pci_irq_msi_fw_mask(ar);
1937}
1938
1939static void ath10k_pci_irq_sync(struct ath10k *ar)
1940{
1941        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1942
1943        synchronize_irq(ar_pci->pdev->irq);
1944}
1945
1946static void ath10k_pci_irq_enable(struct ath10k *ar)
1947{
1948        ath10k_ce_enable_interrupts(ar);
1949        ath10k_pci_enable_legacy_irq(ar);
1950        ath10k_pci_irq_msi_fw_unmask(ar);
1951}
1952
1953static int ath10k_pci_hif_start(struct ath10k *ar)
1954{
1955        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1956
1957        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1958
1959        napi_enable(&ar->napi);
1960
1961        ath10k_pci_irq_enable(ar);
1962        ath10k_pci_rx_post(ar);
1963
1964        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1965                                   ar_pci->link_ctl);
1966
1967        return 0;
1968}
1969
1970static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1971{
1972        struct ath10k *ar;
1973        struct ath10k_ce_pipe *ce_pipe;
1974        struct ath10k_ce_ring *ce_ring;
1975        struct sk_buff *skb;
1976        int i;
1977
1978        ar = pci_pipe->hif_ce_state;
1979        ce_pipe = pci_pipe->ce_hdl;
1980        ce_ring = ce_pipe->dest_ring;
1981
1982        if (!ce_ring)
1983                return;
1984
1985        if (!pci_pipe->buf_sz)
1986                return;
1987
1988        for (i = 0; i < ce_ring->nentries; i++) {
1989                skb = ce_ring->per_transfer_context[i];
1990                if (!skb)
1991                        continue;
1992
1993                ce_ring->per_transfer_context[i] = NULL;
1994
1995                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1996                                 skb->len + skb_tailroom(skb),
1997                                 DMA_FROM_DEVICE);
1998                dev_kfree_skb_any(skb);
1999        }
2000}
2001
2002static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2003{
2004        struct ath10k *ar;
2005        struct ath10k_ce_pipe *ce_pipe;
2006        struct ath10k_ce_ring *ce_ring;
2007        struct sk_buff *skb;
2008        int i;
2009
2010        ar = pci_pipe->hif_ce_state;
2011        ce_pipe = pci_pipe->ce_hdl;
2012        ce_ring = ce_pipe->src_ring;
2013
2014        if (!ce_ring)
2015                return;
2016
2017        if (!pci_pipe->buf_sz)
2018                return;
2019
2020        for (i = 0; i < ce_ring->nentries; i++) {
2021                skb = ce_ring->per_transfer_context[i];
2022                if (!skb)
2023                        continue;
2024
2025                ce_ring->per_transfer_context[i] = NULL;
2026
2027                ath10k_htc_tx_completion_handler(ar, skb);
2028        }
2029}
2030
2031/*
2032 * Cleanup residual buffers for device shutdown:
2033 *    buffers that were enqueued for receive
2034 *    buffers that were to be sent
2035 * Note: Buffers that had completed but which were
2036 * not yet processed are on a completion queue. They
2037 * are handled when the completion thread shuts down.
2038 */
2039static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2040{
2041        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2042        int pipe_num;
2043
2044        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2045                struct ath10k_pci_pipe *pipe_info;
2046
2047                pipe_info = &ar_pci->pipe_info[pipe_num];
2048                ath10k_pci_rx_pipe_cleanup(pipe_info);
2049                ath10k_pci_tx_pipe_cleanup(pipe_info);
2050        }
2051}
2052
2053void ath10k_pci_ce_deinit(struct ath10k *ar)
2054{
2055        int i;
2056
2057        for (i = 0; i < CE_COUNT; i++)
2058                ath10k_ce_deinit_pipe(ar, i);
2059}
2060
2061void ath10k_pci_flush(struct ath10k *ar)
2062{
2063        ath10k_pci_rx_retry_sync(ar);
2064        ath10k_pci_buffer_cleanup(ar);
2065}
2066
2067static void ath10k_pci_hif_stop(struct ath10k *ar)
2068{
2069        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2070        unsigned long flags;
2071
2072        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2073
2074        /* Most likely the device has HTT Rx ring configured. The only way to
2075         * prevent the device from accessing (and possible corrupting) host
2076         * memory is to reset the chip now.
2077         *
2078         * There's also no known way of masking MSI interrupts on the device.
2079         * For ranged MSI the CE-related interrupts can be masked. However
2080         * regardless how many MSI interrupts are assigned the first one
2081         * is always used for firmware indications (crashes) and cannot be
2082         * masked. To prevent the device from asserting the interrupt reset it
2083         * before proceeding with cleanup.
2084         */
2085        ath10k_pci_safe_chip_reset(ar);
2086
2087        ath10k_pci_irq_disable(ar);
2088        ath10k_pci_irq_sync(ar);
2089        napi_synchronize(&ar->napi);
2090        napi_disable(&ar->napi);
2091        ath10k_pci_flush(ar);
2092
2093        spin_lock_irqsave(&ar_pci->ps_lock, flags);
2094        WARN_ON(ar_pci->ps_wake_refcount > 0);
2095        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2096}
2097
2098int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2099                                    void *req, u32 req_len,
2100                                    void *resp, u32 *resp_len)
2101{
2102        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2103        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2104        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2105        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2106        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2107        dma_addr_t req_paddr = 0;
2108        dma_addr_t resp_paddr = 0;
2109        struct bmi_xfer xfer = {};
2110        void *treq, *tresp = NULL;
2111        int ret = 0;
2112
2113        might_sleep();
2114
2115        if (resp && !resp_len)
2116                return -EINVAL;
2117
2118        if (resp && resp_len && *resp_len == 0)
2119                return -EINVAL;
2120
2121        treq = kmemdup(req, req_len, GFP_KERNEL);
2122        if (!treq)
2123                return -ENOMEM;
2124
2125        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2126        ret = dma_mapping_error(ar->dev, req_paddr);
2127        if (ret) {
2128                ret = -EIO;
2129                goto err_dma;
2130        }
2131
2132        if (resp && resp_len) {
2133                tresp = kzalloc(*resp_len, GFP_KERNEL);
2134                if (!tresp) {
2135                        ret = -ENOMEM;
2136                        goto err_req;
2137                }
2138
2139                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2140                                            DMA_FROM_DEVICE);
2141                ret = dma_mapping_error(ar->dev, resp_paddr);
2142                if (ret) {
2143                        ret = -EIO;
2144                        goto err_req;
2145                }
2146
2147                xfer.wait_for_resp = true;
2148                xfer.resp_len = 0;
2149
2150                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2151        }
2152
2153        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2154        if (ret)
2155                goto err_resp;
2156
2157        ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2158        if (ret) {
2159                dma_addr_t unused_buffer;
2160                unsigned int unused_nbytes;
2161                unsigned int unused_id;
2162
2163                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2164                                           &unused_nbytes, &unused_id);
2165        } else {
2166                /* non-zero means we did not time out */
2167                ret = 0;
2168        }
2169
2170err_resp:
2171        if (resp) {
2172                dma_addr_t unused_buffer;
2173
2174                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2175                dma_unmap_single(ar->dev, resp_paddr,
2176                                 *resp_len, DMA_FROM_DEVICE);
2177        }
2178err_req:
2179        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2180
2181        if (ret == 0 && resp_len) {
2182                *resp_len = min(*resp_len, xfer.resp_len);
2183                memcpy(resp, tresp, xfer.resp_len);
2184        }
2185err_dma:
2186        kfree(treq);
2187        kfree(tresp);
2188
2189        return ret;
2190}
2191
2192static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2193{
2194        struct bmi_xfer *xfer;
2195
2196        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2197                return;
2198
2199        xfer->tx_done = true;
2200}
2201
2202static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2203{
2204        struct ath10k *ar = ce_state->ar;
2205        struct bmi_xfer *xfer;
2206        unsigned int nbytes;
2207
2208        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2209                                          &nbytes))
2210                return;
2211
2212        if (WARN_ON_ONCE(!xfer))
2213                return;
2214
2215        if (!xfer->wait_for_resp) {
2216                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2217                return;
2218        }
2219
2220        xfer->resp_len = nbytes;
2221        xfer->rx_done = true;
2222}
2223
2224static int ath10k_pci_bmi_wait(struct ath10k *ar,
2225                               struct ath10k_ce_pipe *tx_pipe,
2226                               struct ath10k_ce_pipe *rx_pipe,
2227                               struct bmi_xfer *xfer)
2228{
2229        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2230        unsigned long started = jiffies;
2231        unsigned long dur;
2232        int ret;
2233
2234        while (time_before_eq(jiffies, timeout)) {
2235                ath10k_pci_bmi_send_done(tx_pipe);
2236                ath10k_pci_bmi_recv_data(rx_pipe);
2237
2238                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2239                        ret = 0;
2240                        goto out;
2241                }
2242
2243                schedule();
2244        }
2245
2246        ret = -ETIMEDOUT;
2247
2248out:
2249        dur = jiffies - started;
2250        if (dur > HZ)
2251                ath10k_dbg(ar, ATH10K_DBG_BMI,
2252                           "bmi cmd took %lu jiffies hz %d ret %d\n",
2253                           dur, HZ, ret);
2254        return ret;
2255}
2256
2257/*
2258 * Send an interrupt to the device to wake up the Target CPU
2259 * so it has an opportunity to notice any changed state.
2260 */
2261static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2262{
2263        u32 addr, val;
2264
2265        addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2266        val = ath10k_pci_read32(ar, addr);
2267        val |= CORE_CTRL_CPU_INTR_MASK;
2268        ath10k_pci_write32(ar, addr, val);
2269
2270        return 0;
2271}
2272
2273static int ath10k_pci_get_num_banks(struct ath10k *ar)
2274{
2275        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276
2277        switch (ar_pci->pdev->device) {
2278        case QCA988X_2_0_DEVICE_ID_UBNT:
2279        case QCA988X_2_0_DEVICE_ID:
2280        case QCA99X0_2_0_DEVICE_ID:
2281        case QCA9888_2_0_DEVICE_ID:
2282        case QCA9984_1_0_DEVICE_ID:
2283        case QCA9887_1_0_DEVICE_ID:
2284                return 1;
2285        case QCA6164_2_1_DEVICE_ID:
2286        case QCA6174_2_1_DEVICE_ID:
2287                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
2288                case QCA6174_HW_1_0_CHIP_ID_REV:
2289                case QCA6174_HW_1_1_CHIP_ID_REV:
2290                case QCA6174_HW_2_1_CHIP_ID_REV:
2291                case QCA6174_HW_2_2_CHIP_ID_REV:
2292                        return 3;
2293                case QCA6174_HW_1_3_CHIP_ID_REV:
2294                        return 2;
2295                case QCA6174_HW_3_0_CHIP_ID_REV:
2296                case QCA6174_HW_3_1_CHIP_ID_REV:
2297                case QCA6174_HW_3_2_CHIP_ID_REV:
2298                        return 9;
2299                }
2300                break;
2301        case QCA9377_1_0_DEVICE_ID:
2302                return 9;
2303        }
2304
2305        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2306        return 1;
2307}
2308
2309static int ath10k_bus_get_num_banks(struct ath10k *ar)
2310{
2311        struct ath10k_ce *ce = ath10k_ce_priv(ar);
2312
2313        return ce->bus_ops->get_num_banks(ar);
2314}
2315
2316int ath10k_pci_init_config(struct ath10k *ar)
2317{
2318        u32 interconnect_targ_addr;
2319        u32 pcie_state_targ_addr = 0;
2320        u32 pipe_cfg_targ_addr = 0;
2321        u32 svc_to_pipe_map = 0;
2322        u32 pcie_config_flags = 0;
2323        u32 ealloc_value;
2324        u32 ealloc_targ_addr;
2325        u32 flag2_value;
2326        u32 flag2_targ_addr;
2327        int ret = 0;
2328
2329        /* Download to Target the CE Config and the service-to-CE map */
2330        interconnect_targ_addr =
2331                host_interest_item_address(HI_ITEM(hi_interconnect_state));
2332
2333        /* Supply Target-side CE configuration */
2334        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2335                                     &pcie_state_targ_addr);
2336        if (ret != 0) {
2337                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2338                return ret;
2339        }
2340
2341        if (pcie_state_targ_addr == 0) {
2342                ret = -EIO;
2343                ath10k_err(ar, "Invalid pcie state addr\n");
2344                return ret;
2345        }
2346
2347        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2348                                          offsetof(struct pcie_state,
2349                                                   pipe_cfg_addr)),
2350                                     &pipe_cfg_targ_addr);
2351        if (ret != 0) {
2352                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2353                return ret;
2354        }
2355
2356        if (pipe_cfg_targ_addr == 0) {
2357                ret = -EIO;
2358                ath10k_err(ar, "Invalid pipe cfg addr\n");
2359                return ret;
2360        }
2361
2362        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2363                                        target_ce_config_wlan,
2364                                        sizeof(struct ce_pipe_config) *
2365                                        NUM_TARGET_CE_CONFIG_WLAN);
2366
2367        if (ret != 0) {
2368                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2369                return ret;
2370        }
2371
2372        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2373                                          offsetof(struct pcie_state,
2374                                                   svc_to_pipe_map)),
2375                                     &svc_to_pipe_map);
2376        if (ret != 0) {
2377                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2378                return ret;
2379        }
2380
2381        if (svc_to_pipe_map == 0) {
2382                ret = -EIO;
2383                ath10k_err(ar, "Invalid svc_to_pipe map\n");
2384                return ret;
2385        }
2386
2387        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2388                                        target_service_to_ce_map_wlan,
2389                                        sizeof(target_service_to_ce_map_wlan));
2390        if (ret != 0) {
2391                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2392                return ret;
2393        }
2394
2395        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2396                                          offsetof(struct pcie_state,
2397                                                   config_flags)),
2398                                     &pcie_config_flags);
2399        if (ret != 0) {
2400                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2401                return ret;
2402        }
2403
2404        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2405
2406        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2407                                           offsetof(struct pcie_state,
2408                                                    config_flags)),
2409                                      pcie_config_flags);
2410        if (ret != 0) {
2411                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2412                return ret;
2413        }
2414
2415        /* configure early allocation */
2416        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2417
2418        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2419        if (ret != 0) {
2420                ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2421                return ret;
2422        }
2423
2424        /* first bank is switched to IRAM */
2425        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2426                         HI_EARLY_ALLOC_MAGIC_MASK);
2427        ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2428                          HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2429                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2430
2431        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2432        if (ret != 0) {
2433                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2434                return ret;
2435        }
2436
2437        /* Tell Target to proceed with initialization */
2438        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2439
2440        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2441        if (ret != 0) {
2442                ath10k_err(ar, "Failed to get option val: %d\n", ret);
2443                return ret;
2444        }
2445
2446        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2447
2448        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2449        if (ret != 0) {
2450                ath10k_err(ar, "Failed to set option val: %d\n", ret);
2451                return ret;
2452        }
2453
2454        return 0;
2455}
2456
2457static void ath10k_pci_override_ce_config(struct ath10k *ar)
2458{
2459        struct ce_attr *attr;
2460        struct ce_pipe_config *config;
2461
2462        /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2463         * since it is currently used for other feature.
2464         */
2465
2466        /* Override Host's Copy Engine 5 configuration */
2467        attr = &host_ce_config_wlan[5];
2468        attr->src_sz_max = 0;
2469        attr->dest_nentries = 0;
2470
2471        /* Override Target firmware's Copy Engine configuration */
2472        config = &target_ce_config_wlan[5];
2473        config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2474        config->nbytes_max = __cpu_to_le32(2048);
2475
2476        /* Map from service/endpoint to Copy Engine */
2477        target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2478}
2479
2480int ath10k_pci_alloc_pipes(struct ath10k *ar)
2481{
2482        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2483        struct ath10k_pci_pipe *pipe;
2484        struct ath10k_ce *ce = ath10k_ce_priv(ar);
2485        int i, ret;
2486
2487        for (i = 0; i < CE_COUNT; i++) {
2488                pipe = &ar_pci->pipe_info[i];
2489                pipe->ce_hdl = &ce->ce_states[i];
2490                pipe->pipe_num = i;
2491                pipe->hif_ce_state = ar;
2492
2493                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2494                if (ret) {
2495                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2496                                   i, ret);
2497                        return ret;
2498                }
2499
2500                /* Last CE is Diagnostic Window */
2501                if (i == CE_DIAG_PIPE) {
2502                        ar_pci->ce_diag = pipe->ce_hdl;
2503                        continue;
2504                }
2505
2506                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2507        }
2508
2509        return 0;
2510}
2511
2512void ath10k_pci_free_pipes(struct ath10k *ar)
2513{
2514        int i;
2515
2516        for (i = 0; i < CE_COUNT; i++)
2517                ath10k_ce_free_pipe(ar, i);
2518}
2519
2520int ath10k_pci_init_pipes(struct ath10k *ar)
2521{
2522        int i, ret;
2523
2524        for (i = 0; i < CE_COUNT; i++) {
2525                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2526                if (ret) {
2527                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2528                                   i, ret);
2529                        return ret;
2530                }
2531        }
2532
2533        return 0;
2534}
2535
2536static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2537{
2538        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2539               FW_IND_EVENT_PENDING;
2540}
2541
2542static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2543{
2544        u32 val;
2545
2546        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2547        val &= ~FW_IND_EVENT_PENDING;
2548        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2549}
2550
2551static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2552{
2553        u32 val;
2554
2555        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2556        return (val == 0xffffffff);
2557}
2558
2559/* this function effectively clears target memory controller assert line */
2560static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2561{
2562        u32 val;
2563
2564        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2565        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2566                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
2567        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2568
2569        msleep(10);
2570
2571        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2572        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2573                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2574        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2575
2576        msleep(10);
2577}
2578
2579static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2580{
2581        u32 val;
2582
2583        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2584
2585        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2586                                SOC_RESET_CONTROL_ADDRESS);
2587        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2588                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2589}
2590
2591static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2592{
2593        u32 val;
2594
2595        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2596                                SOC_RESET_CONTROL_ADDRESS);
2597
2598        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2599                           val | SOC_RESET_CONTROL_CE_RST_MASK);
2600        msleep(10);
2601        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2602                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2603}
2604
2605static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2606{
2607        u32 val;
2608
2609        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2610                                SOC_LF_TIMER_CONTROL0_ADDRESS);
2611        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2612                           SOC_LF_TIMER_CONTROL0_ADDRESS,
2613                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2614}
2615
2616static int ath10k_pci_warm_reset(struct ath10k *ar)
2617{
2618        int ret;
2619
2620        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2621
2622        spin_lock_bh(&ar->data_lock);
2623        ar->stats.fw_warm_reset_counter++;
2624        spin_unlock_bh(&ar->data_lock);
2625
2626        ath10k_pci_irq_disable(ar);
2627
2628        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2629         * were to access copy engine while host performs copy engine reset
2630         * then it is possible for the device to confuse pci-e controller to
2631         * the point of bringing host system to a complete stop (i.e. hang).
2632         */
2633        ath10k_pci_warm_reset_si0(ar);
2634        ath10k_pci_warm_reset_cpu(ar);
2635        ath10k_pci_init_pipes(ar);
2636        ath10k_pci_wait_for_target_init(ar);
2637
2638        ath10k_pci_warm_reset_clear_lf(ar);
2639        ath10k_pci_warm_reset_ce(ar);
2640        ath10k_pci_warm_reset_cpu(ar);
2641        ath10k_pci_init_pipes(ar);
2642
2643        ret = ath10k_pci_wait_for_target_init(ar);
2644        if (ret) {
2645                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2646                return ret;
2647        }
2648
2649        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2650
2651        return 0;
2652}
2653
2654static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2655{
2656        ath10k_pci_irq_disable(ar);
2657        return ath10k_pci_qca99x0_chip_reset(ar);
2658}
2659
2660static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2661{
2662        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2663
2664        if (!ar_pci->pci_soft_reset)
2665                return -ENOTSUPP;
2666
2667        return ar_pci->pci_soft_reset(ar);
2668}
2669
2670static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2671{
2672        int i, ret;
2673        u32 val;
2674
2675        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2676
2677        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2678         * It is thus preferred to use warm reset which is safer but may not be
2679         * able to recover the device from all possible fail scenarios.
2680         *
2681         * Warm reset doesn't always work on first try so attempt it a few
2682         * times before giving up.
2683         */
2684        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2685                ret = ath10k_pci_warm_reset(ar);
2686                if (ret) {
2687                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2688                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2689                                    ret);
2690                        continue;
2691                }
2692
2693                /* FIXME: Sometimes copy engine doesn't recover after warm
2694                 * reset. In most cases this needs cold reset. In some of these
2695                 * cases the device is in such a state that a cold reset may
2696                 * lock up the host.
2697                 *
2698                 * Reading any host interest register via copy engine is
2699                 * sufficient to verify if device is capable of booting
2700                 * firmware blob.
2701                 */
2702                ret = ath10k_pci_init_pipes(ar);
2703                if (ret) {
2704                        ath10k_warn(ar, "failed to init copy engine: %d\n",
2705                                    ret);
2706                        continue;
2707                }
2708
2709                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2710                                             &val);
2711                if (ret) {
2712                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
2713                                    ret);
2714                        continue;
2715                }
2716
2717                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2718                return 0;
2719        }
2720
2721        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2722                ath10k_warn(ar, "refusing cold reset as requested\n");
2723                return -EPERM;
2724        }
2725
2726        ret = ath10k_pci_cold_reset(ar);
2727        if (ret) {
2728                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2729                return ret;
2730        }
2731
2732        ret = ath10k_pci_wait_for_target_init(ar);
2733        if (ret) {
2734                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2735                            ret);
2736                return ret;
2737        }
2738
2739        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2740
2741        return 0;
2742}
2743
2744static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2745{
2746        int ret;
2747
2748        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2749
2750        /* FIXME: QCA6174 requires cold + warm reset to work. */
2751
2752        ret = ath10k_pci_cold_reset(ar);
2753        if (ret) {
2754                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2755                return ret;
2756        }
2757
2758        ret = ath10k_pci_wait_for_target_init(ar);
2759        if (ret) {
2760                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2761                            ret);
2762                return ret;
2763        }
2764
2765        ret = ath10k_pci_warm_reset(ar);
2766        if (ret) {
2767                ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2768                return ret;
2769        }
2770
2771        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2772
2773        return 0;
2774}
2775
2776static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2777{
2778        int ret;
2779
2780        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2781
2782        ret = ath10k_pci_cold_reset(ar);
2783        if (ret) {
2784                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2785                return ret;
2786        }
2787
2788        ret = ath10k_pci_wait_for_target_init(ar);
2789        if (ret) {
2790                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2791                            ret);
2792                return ret;
2793        }
2794
2795        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2796
2797        return 0;
2798}
2799
2800static int ath10k_pci_chip_reset(struct ath10k *ar)
2801{
2802        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2803
2804        if (WARN_ON(!ar_pci->pci_hard_reset))
2805                return -ENOTSUPP;
2806
2807        return ar_pci->pci_hard_reset(ar);
2808}
2809
2810static int ath10k_pci_hif_power_up(struct ath10k *ar)
2811{
2812        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2813        int ret;
2814
2815        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2816
2817        pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2818                                  &ar_pci->link_ctl);
2819        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2820                                   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2821
2822        /*
2823         * Bring the target up cleanly.
2824         *
2825         * The target may be in an undefined state with an AUX-powered Target
2826         * and a Host in WoW mode. If the Host crashes, loses power, or is
2827         * restarted (without unloading the driver) then the Target is left
2828         * (aux) powered and running. On a subsequent driver load, the Target
2829         * is in an unexpected state. We try to catch that here in order to
2830         * reset the Target and retry the probe.
2831         */
2832        ret = ath10k_pci_chip_reset(ar);
2833        if (ret) {
2834                if (ath10k_pci_has_fw_crashed(ar)) {
2835                        ath10k_warn(ar, "firmware crashed during chip reset\n");
2836                        ath10k_pci_fw_crashed_clear(ar);
2837                        ath10k_pci_fw_crashed_dump(ar);
2838                }
2839
2840                ath10k_err(ar, "failed to reset chip: %d\n", ret);
2841                goto err_sleep;
2842        }
2843
2844        ret = ath10k_pci_init_pipes(ar);
2845        if (ret) {
2846                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2847                goto err_sleep;
2848        }
2849
2850        ret = ath10k_pci_init_config(ar);
2851        if (ret) {
2852                ath10k_err(ar, "failed to setup init config: %d\n", ret);
2853                goto err_ce;
2854        }
2855
2856        ret = ath10k_pci_wake_target_cpu(ar);
2857        if (ret) {
2858                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2859                goto err_ce;
2860        }
2861
2862        return 0;
2863
2864err_ce:
2865        ath10k_pci_ce_deinit(ar);
2866
2867err_sleep:
2868        return ret;
2869}
2870
2871void ath10k_pci_hif_power_down(struct ath10k *ar)
2872{
2873        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2874
2875        /* Currently hif_power_up performs effectively a reset and hif_stop
2876         * resets the chip as well so there's no point in resetting here.
2877         */
2878}
2879
2880static int ath10k_pci_hif_suspend(struct ath10k *ar)
2881{
2882        /* Nothing to do; the important stuff is in the driver suspend. */
2883        return 0;
2884}
2885
2886static int ath10k_pci_suspend(struct ath10k *ar)
2887{
2888        /* The grace timer can still be counting down and ar->ps_awake be true.
2889         * It is known that the device may be asleep after resuming regardless
2890         * of the SoC powersave state before suspending. Hence make sure the
2891         * device is asleep before proceeding.
2892         */
2893        ath10k_pci_sleep_sync(ar);
2894
2895        return 0;
2896}
2897
2898static int ath10k_pci_hif_resume(struct ath10k *ar)
2899{
2900        /* Nothing to do; the important stuff is in the driver resume. */
2901        return 0;
2902}
2903
2904static int ath10k_pci_resume(struct ath10k *ar)
2905{
2906        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2907        struct pci_dev *pdev = ar_pci->pdev;
2908        u32 val;
2909        int ret = 0;
2910
2911        ret = ath10k_pci_force_wake(ar);
2912        if (ret) {
2913                ath10k_err(ar, "failed to wake up target: %d\n", ret);
2914                return ret;
2915        }
2916
2917        /* Suspend/Resume resets the PCI configuration space, so we have to
2918         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2919         * from interfering with C3 CPU state. pci_restore_state won't help
2920         * here since it only restores the first 64 bytes pci config header.
2921         */
2922        pci_read_config_dword(pdev, 0x40, &val);
2923        if ((val & 0x0000ff00) != 0)
2924                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2925
2926        return ret;
2927}
2928
2929static bool ath10k_pci_validate_cal(void *data, size_t size)
2930{
2931        __le16 *cal_words = data;
2932        u16 checksum = 0;
2933        size_t i;
2934
2935        if (size % 2 != 0)
2936                return false;
2937
2938        for (i = 0; i < size / 2; i++)
2939                checksum ^= le16_to_cpu(cal_words[i]);
2940
2941        return checksum == 0xffff;
2942}
2943
2944static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2945{
2946        /* Enable SI clock */
2947        ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2948
2949        /* Configure GPIOs for I2C operation */
2950        ath10k_pci_write32(ar,
2951                           GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2952                           4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2953                           SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2954                              GPIO_PIN0_CONFIG) |
2955                           SM(1, GPIO_PIN0_PAD_PULL));
2956
2957        ath10k_pci_write32(ar,
2958                           GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2959                           4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2960                           SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2961                           SM(1, GPIO_PIN0_PAD_PULL));
2962
2963        ath10k_pci_write32(ar,
2964                           GPIO_BASE_ADDRESS +
2965                           QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2966                           1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2967
2968        /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2969        ath10k_pci_write32(ar,
2970                           SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2971                           SM(1, SI_CONFIG_ERR_INT) |
2972                           SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2973                           SM(1, SI_CONFIG_I2C) |
2974                           SM(1, SI_CONFIG_POS_SAMPLE) |
2975                           SM(1, SI_CONFIG_INACTIVE_DATA) |
2976                           SM(1, SI_CONFIG_INACTIVE_CLK) |
2977                           SM(8, SI_CONFIG_DIVIDER));
2978}
2979
2980static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2981{
2982        u32 reg;
2983        int wait_limit;
2984
2985        /* set device select byte and for the read operation */
2986        reg = QCA9887_EEPROM_SELECT_READ |
2987              SM(addr, QCA9887_EEPROM_ADDR_LO) |
2988              SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2989        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2990
2991        /* write transmit data, transfer length, and START bit */
2992        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2993                           SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2994                           SM(4, SI_CS_TX_CNT));
2995
2996        /* wait max 1 sec */
2997        wait_limit = 100000;
2998
2999        /* wait for SI_CS_DONE_INT */
3000        do {
3001                reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3002                if (MS(reg, SI_CS_DONE_INT))
3003                        break;
3004
3005                wait_limit--;
3006                udelay(10);
3007        } while (wait_limit > 0);
3008
3009        if (!MS(reg, SI_CS_DONE_INT)) {
3010                ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3011                           addr);
3012                return -ETIMEDOUT;
3013        }
3014
3015        /* clear SI_CS_DONE_INT */
3016        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3017
3018        if (MS(reg, SI_CS_DONE_ERR)) {
3019                ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3020                return -EIO;
3021        }
3022
3023        /* extract receive data */
3024        reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3025        *out = reg;
3026
3027        return 0;
3028}
3029
3030static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3031                                           size_t *data_len)
3032{
3033        u8 *caldata = NULL;
3034        size_t calsize, i;
3035        int ret;
3036
3037        if (!QCA_REV_9887(ar))
3038                return -EOPNOTSUPP;
3039
3040        calsize = ar->hw_params.cal_data_len;
3041        caldata = kmalloc(calsize, GFP_KERNEL);
3042        if (!caldata)
3043                return -ENOMEM;
3044
3045        ath10k_pci_enable_eeprom(ar);
3046
3047        for (i = 0; i < calsize; i++) {
3048                ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3049                if (ret)
3050                        goto err_free;
3051        }
3052
3053        if (!ath10k_pci_validate_cal(caldata, calsize))
3054                goto err_free;
3055
3056        *data = caldata;
3057        *data_len = calsize;
3058
3059        return 0;
3060
3061err_free:
3062        kfree(caldata);
3063
3064        return -EINVAL;
3065}
3066
3067static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3068        .tx_sg                  = ath10k_pci_hif_tx_sg,
3069        .diag_read              = ath10k_pci_hif_diag_read,
3070        .diag_write             = ath10k_pci_diag_write_mem,
3071        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
3072        .start                  = ath10k_pci_hif_start,
3073        .stop                   = ath10k_pci_hif_stop,
3074        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
3075        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
3076        .send_complete_check    = ath10k_pci_hif_send_complete_check,
3077        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
3078        .power_up               = ath10k_pci_hif_power_up,
3079        .power_down             = ath10k_pci_hif_power_down,
3080        .read32                 = ath10k_pci_read32,
3081        .write32                = ath10k_pci_write32,
3082        .suspend                = ath10k_pci_hif_suspend,
3083        .resume                 = ath10k_pci_hif_resume,
3084        .fetch_cal_eeprom       = ath10k_pci_hif_fetch_cal_eeprom,
3085};
3086
3087/*
3088 * Top-level interrupt handler for all PCI interrupts from a Target.
3089 * When a block of MSI interrupts is allocated, this top-level handler
3090 * is not used; instead, we directly call the correct sub-handler.
3091 */
3092static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3093{
3094        struct ath10k *ar = arg;
3095        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3096        int ret;
3097
3098        if (ath10k_pci_has_device_gone(ar))
3099                return IRQ_NONE;
3100
3101        ret = ath10k_pci_force_wake(ar);
3102        if (ret) {
3103                ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3104                return IRQ_NONE;
3105        }
3106
3107        if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3108            !ath10k_pci_irq_pending(ar))
3109                return IRQ_NONE;
3110
3111        ath10k_pci_disable_and_clear_legacy_irq(ar);
3112        ath10k_pci_irq_msi_fw_mask(ar);
3113        napi_schedule(&ar->napi);
3114
3115        return IRQ_HANDLED;
3116}
3117
3118static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3119{
3120        struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3121        int done = 0;
3122
3123        if (ath10k_pci_has_fw_crashed(ar)) {
3124                ath10k_pci_fw_crashed_clear(ar);
3125                ath10k_pci_fw_crashed_dump(ar);
3126                napi_complete(ctx);
3127                return done;
3128        }
3129
3130        ath10k_ce_per_engine_service_any(ar);
3131
3132        done = ath10k_htt_txrx_compl_task(ar, budget);
3133
3134        if (done < budget) {
3135                napi_complete_done(ctx, done);
3136                /* In case of MSI, it is possible that interrupts are received
3137                 * while NAPI poll is inprogress. So pending interrupts that are
3138                 * received after processing all copy engine pipes by NAPI poll
3139                 * will not be handled again. This is causing failure to
3140                 * complete boot sequence in x86 platform. So before enabling
3141                 * interrupts safer to check for pending interrupts for
3142                 * immediate servicing.
3143                 */
3144                if (ath10k_ce_interrupt_summary(ar)) {
3145                        napi_reschedule(ctx);
3146                        goto out;
3147                }
3148                ath10k_pci_enable_legacy_irq(ar);
3149                ath10k_pci_irq_msi_fw_unmask(ar);
3150        }
3151
3152out:
3153        return done;
3154}
3155
3156static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3157{
3158        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3159        int ret;
3160
3161        ret = request_irq(ar_pci->pdev->irq,
3162                          ath10k_pci_interrupt_handler,
3163                          IRQF_SHARED, "ath10k_pci", ar);
3164        if (ret) {
3165                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3166                            ar_pci->pdev->irq, ret);
3167                return ret;
3168        }
3169
3170        return 0;
3171}
3172
3173static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3174{
3175        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3176        int ret;
3177
3178        ret = request_irq(ar_pci->pdev->irq,
3179                          ath10k_pci_interrupt_handler,
3180                          IRQF_SHARED, "ath10k_pci", ar);
3181        if (ret) {
3182                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3183                            ar_pci->pdev->irq, ret);
3184                return ret;
3185        }
3186
3187        return 0;
3188}
3189
3190static int ath10k_pci_request_irq(struct ath10k *ar)
3191{
3192        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3193
3194        switch (ar_pci->oper_irq_mode) {
3195        case ATH10K_PCI_IRQ_LEGACY:
3196                return ath10k_pci_request_irq_legacy(ar);
3197        case ATH10K_PCI_IRQ_MSI:
3198                return ath10k_pci_request_irq_msi(ar);
3199        default:
3200                return -EINVAL;
3201        }
3202}
3203
3204static void ath10k_pci_free_irq(struct ath10k *ar)
3205{
3206        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3207
3208        free_irq(ar_pci->pdev->irq, ar);
3209}
3210
3211void ath10k_pci_init_napi(struct ath10k *ar)
3212{
3213        netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3214                       ATH10K_NAPI_BUDGET);
3215}
3216
3217static int ath10k_pci_init_irq(struct ath10k *ar)
3218{
3219        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3220        int ret;
3221
3222        ath10k_pci_init_napi(ar);
3223
3224        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3225                ath10k_info(ar, "limiting irq mode to: %d\n",
3226                            ath10k_pci_irq_mode);
3227
3228        /* Try MSI */
3229        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3230                ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3231                ret = pci_enable_msi(ar_pci->pdev);
3232                if (ret == 0)
3233                        return 0;
3234
3235                /* fall-through */
3236        }
3237
3238        /* Try legacy irq
3239         *
3240         * A potential race occurs here: The CORE_BASE write
3241         * depends on target correctly decoding AXI address but
3242         * host won't know when target writes BAR to CORE_CTRL.
3243         * This write might get lost if target has NOT written BAR.
3244         * For now, fix the race by repeating the write in below
3245         * synchronization checking.
3246         */
3247        ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3248
3249        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3250                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3251
3252        return 0;
3253}
3254
3255static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3256{
3257        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3258                           0);
3259}
3260
3261static int ath10k_pci_deinit_irq(struct ath10k *ar)
3262{
3263        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3264
3265        switch (ar_pci->oper_irq_mode) {
3266        case ATH10K_PCI_IRQ_LEGACY:
3267                ath10k_pci_deinit_irq_legacy(ar);
3268                break;
3269        default:
3270                pci_disable_msi(ar_pci->pdev);
3271                break;
3272        }
3273
3274        return 0;
3275}
3276
3277int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3278{
3279        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3280        unsigned long timeout;
3281        u32 val;
3282
3283        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3284
3285        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3286
3287        do {
3288                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3289
3290                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3291                           val);
3292
3293                /* target should never return this */
3294                if (val == 0xffffffff)
3295                        continue;
3296
3297                /* the device has crashed so don't bother trying anymore */
3298                if (val & FW_IND_EVENT_PENDING)
3299                        break;
3300
3301                if (val & FW_IND_INITIALIZED)
3302                        break;
3303
3304                if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3305                        /* Fix potential race by repeating CORE_BASE writes */
3306                        ath10k_pci_enable_legacy_irq(ar);
3307
3308                mdelay(10);
3309        } while (time_before(jiffies, timeout));
3310
3311        ath10k_pci_disable_and_clear_legacy_irq(ar);
3312        ath10k_pci_irq_msi_fw_mask(ar);
3313
3314        if (val == 0xffffffff) {
3315                ath10k_err(ar, "failed to read device register, device is gone\n");
3316                return -EIO;
3317        }
3318
3319        if (val & FW_IND_EVENT_PENDING) {
3320                ath10k_warn(ar, "device has crashed during init\n");
3321                return -ECOMM;
3322        }
3323
3324        if (!(val & FW_IND_INITIALIZED)) {
3325                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3326                           val);
3327                return -ETIMEDOUT;
3328        }
3329
3330        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3331        return 0;
3332}
3333
3334static int ath10k_pci_cold_reset(struct ath10k *ar)
3335{
3336        u32 val;
3337
3338        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3339
3340        spin_lock_bh(&ar->data_lock);
3341
3342        ar->stats.fw_cold_reset_counter++;
3343
3344        spin_unlock_bh(&ar->data_lock);
3345
3346        /* Put Target, including PCIe, into RESET. */
3347        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3348        val |= 1;
3349        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3350
3351        /* After writing into SOC_GLOBAL_RESET to put device into
3352         * reset and pulling out of reset pcie may not be stable
3353         * for any immediate pcie register access and cause bus error,
3354         * add delay before any pcie access request to fix this issue.
3355         */
3356        msleep(20);
3357
3358        /* Pull Target, including PCIe, out of RESET. */
3359        val &= ~1;
3360        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3361
3362        msleep(20);
3363
3364        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3365
3366        return 0;
3367}
3368
3369static int ath10k_pci_claim(struct ath10k *ar)
3370{
3371        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3372        struct pci_dev *pdev = ar_pci->pdev;
3373        int ret;
3374
3375        pci_set_drvdata(pdev, ar);
3376
3377        ret = pci_enable_device(pdev);
3378        if (ret) {
3379                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3380                return ret;
3381        }
3382
3383        ret = pci_request_region(pdev, BAR_NUM, "ath");
3384        if (ret) {
3385                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3386                           ret);
3387                goto err_device;
3388        }
3389
3390        /* Target expects 32 bit DMA. Enforce it. */
3391        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3392        if (ret) {
3393                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3394                goto err_region;
3395        }
3396
3397        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3398        if (ret) {
3399                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3400                           ret);
3401                goto err_region;
3402        }
3403
3404        pci_set_master(pdev);
3405
3406        /* Arrange for access to Target SoC registers. */
3407        ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3408        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3409        if (!ar_pci->mem) {
3410                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3411                ret = -EIO;
3412                goto err_master;
3413        }
3414
3415        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3416        return 0;
3417
3418err_master:
3419        pci_clear_master(pdev);
3420
3421err_region:
3422        pci_release_region(pdev, BAR_NUM);
3423
3424err_device:
3425        pci_disable_device(pdev);
3426
3427        return ret;
3428}
3429
3430static void ath10k_pci_release(struct ath10k *ar)
3431{
3432        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3433        struct pci_dev *pdev = ar_pci->pdev;
3434
3435        pci_iounmap(pdev, ar_pci->mem);
3436        pci_release_region(pdev, BAR_NUM);
3437        pci_clear_master(pdev);
3438        pci_disable_device(pdev);
3439}
3440
3441static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3442{
3443        const struct ath10k_pci_supp_chip *supp_chip;
3444        int i;
3445        u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3446
3447        for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3448                supp_chip = &ath10k_pci_supp_chips[i];
3449
3450                if (supp_chip->dev_id == dev_id &&
3451                    supp_chip->rev_id == rev_id)
3452                        return true;
3453        }
3454
3455        return false;
3456}
3457
3458int ath10k_pci_setup_resource(struct ath10k *ar)
3459{
3460        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3461        struct ath10k_ce *ce = ath10k_ce_priv(ar);
3462        int ret;
3463
3464        spin_lock_init(&ce->ce_lock);
3465        spin_lock_init(&ar_pci->ps_lock);
3466
3467        timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3468
3469        if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3470                ath10k_pci_override_ce_config(ar);
3471
3472        ret = ath10k_pci_alloc_pipes(ar);
3473        if (ret) {
3474                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3475                           ret);
3476                return ret;
3477        }
3478
3479        return 0;
3480}
3481
3482void ath10k_pci_release_resource(struct ath10k *ar)
3483{
3484        ath10k_pci_rx_retry_sync(ar);
3485        netif_napi_del(&ar->napi);
3486        ath10k_pci_ce_deinit(ar);
3487        ath10k_pci_free_pipes(ar);
3488}
3489
3490static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3491        .read32         = ath10k_bus_pci_read32,
3492        .write32        = ath10k_bus_pci_write32,
3493        .get_num_banks  = ath10k_pci_get_num_banks,
3494};
3495
3496static int ath10k_pci_probe(struct pci_dev *pdev,
3497                            const struct pci_device_id *pci_dev)
3498{
3499        int ret = 0;
3500        struct ath10k *ar;
3501        struct ath10k_pci *ar_pci;
3502        enum ath10k_hw_rev hw_rev;
3503        struct ath10k_bus_params bus_params;
3504        bool pci_ps;
3505        int (*pci_soft_reset)(struct ath10k *ar);
3506        int (*pci_hard_reset)(struct ath10k *ar);
3507        u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3508
3509        switch (pci_dev->device) {
3510        case QCA988X_2_0_DEVICE_ID_UBNT:
3511        case QCA988X_2_0_DEVICE_ID:
3512                hw_rev = ATH10K_HW_QCA988X;
3513                pci_ps = false;
3514                pci_soft_reset = ath10k_pci_warm_reset;
3515                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3516                targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3517                break;
3518        case QCA9887_1_0_DEVICE_ID:
3519                hw_rev = ATH10K_HW_QCA9887;
3520                pci_ps = false;
3521                pci_soft_reset = ath10k_pci_warm_reset;
3522                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3523                targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3524                break;
3525        case QCA6164_2_1_DEVICE_ID:
3526        case QCA6174_2_1_DEVICE_ID:
3527                hw_rev = ATH10K_HW_QCA6174;
3528                pci_ps = true;
3529                pci_soft_reset = ath10k_pci_warm_reset;
3530                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3531                targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3532                break;
3533        case QCA99X0_2_0_DEVICE_ID:
3534                hw_rev = ATH10K_HW_QCA99X0;
3535                pci_ps = false;
3536                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3537                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3538                targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3539                break;
3540        case QCA9984_1_0_DEVICE_ID:
3541                hw_rev = ATH10K_HW_QCA9984;
3542                pci_ps = false;
3543                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3544                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3545                targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3546                break;
3547        case QCA9888_2_0_DEVICE_ID:
3548                hw_rev = ATH10K_HW_QCA9888;
3549                pci_ps = false;
3550                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3551                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3552                targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3553                break;
3554        case QCA9377_1_0_DEVICE_ID:
3555                hw_rev = ATH10K_HW_QCA9377;
3556                pci_ps = true;
3557                pci_soft_reset = NULL;
3558                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3559                targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3560                break;
3561        default:
3562                WARN_ON(1);
3563                return -ENOTSUPP;
3564        }
3565
3566        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3567                                hw_rev, &ath10k_pci_hif_ops);
3568        if (!ar) {
3569                dev_err(&pdev->dev, "failed to allocate core\n");
3570                return -ENOMEM;
3571        }
3572
3573        ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3574                   pdev->vendor, pdev->device,
3575                   pdev->subsystem_vendor, pdev->subsystem_device);
3576
3577        ar_pci = ath10k_pci_priv(ar);
3578        ar_pci->pdev = pdev;
3579        ar_pci->dev = &pdev->dev;
3580        ar_pci->ar = ar;
3581        ar->dev_id = pci_dev->device;
3582        ar_pci->pci_ps = pci_ps;
3583        ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3584        ar_pci->pci_soft_reset = pci_soft_reset;
3585        ar_pci->pci_hard_reset = pci_hard_reset;
3586        ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3587        ar->ce_priv = &ar_pci->ce;
3588
3589        ar->id.vendor = pdev->vendor;
3590        ar->id.device = pdev->device;
3591        ar->id.subsystem_vendor = pdev->subsystem_vendor;
3592        ar->id.subsystem_device = pdev->subsystem_device;
3593
3594        timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3595
3596        ret = ath10k_pci_setup_resource(ar);
3597        if (ret) {
3598                ath10k_err(ar, "failed to setup resource: %d\n", ret);
3599                goto err_core_destroy;
3600        }
3601
3602        ret = ath10k_pci_claim(ar);
3603        if (ret) {
3604                ath10k_err(ar, "failed to claim device: %d\n", ret);
3605                goto err_free_pipes;
3606        }
3607
3608        ret = ath10k_pci_force_wake(ar);
3609        if (ret) {
3610                ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3611                goto err_sleep;
3612        }
3613
3614        ath10k_pci_ce_deinit(ar);
3615        ath10k_pci_irq_disable(ar);
3616
3617        ret = ath10k_pci_init_irq(ar);
3618        if (ret) {
3619                ath10k_err(ar, "failed to init irqs: %d\n", ret);
3620                goto err_sleep;
3621        }
3622
3623        ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3624                    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3625                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3626
3627        ret = ath10k_pci_request_irq(ar);
3628        if (ret) {
3629                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3630                goto err_deinit_irq;
3631        }
3632
3633        ret = ath10k_pci_chip_reset(ar);
3634        if (ret) {
3635                ath10k_err(ar, "failed to reset chip: %d\n", ret);
3636                goto err_free_irq;
3637        }
3638
3639        bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3640        bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3641        if (bus_params.chip_id == 0xffffffff) {
3642                ath10k_err(ar, "failed to get chip id\n");
3643                goto err_free_irq;
3644        }
3645
3646        if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3647                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3648                           pdev->device, bus_params.chip_id);
3649                goto err_free_irq;
3650        }
3651
3652        ret = ath10k_core_register(ar, &bus_params);
3653        if (ret) {
3654                ath10k_err(ar, "failed to register driver core: %d\n", ret);
3655                goto err_free_irq;
3656        }
3657
3658        return 0;
3659
3660err_free_irq:
3661        ath10k_pci_free_irq(ar);
3662        ath10k_pci_rx_retry_sync(ar);
3663
3664err_deinit_irq:
3665        ath10k_pci_deinit_irq(ar);
3666
3667err_sleep:
3668        ath10k_pci_sleep_sync(ar);
3669        ath10k_pci_release(ar);
3670
3671err_free_pipes:
3672        ath10k_pci_free_pipes(ar);
3673
3674err_core_destroy:
3675        ath10k_core_destroy(ar);
3676
3677        return ret;
3678}
3679
3680static void ath10k_pci_remove(struct pci_dev *pdev)
3681{
3682        struct ath10k *ar = pci_get_drvdata(pdev);
3683        struct ath10k_pci *ar_pci;
3684
3685        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3686
3687        if (!ar)
3688                return;
3689
3690        ar_pci = ath10k_pci_priv(ar);
3691
3692        if (!ar_pci)
3693                return;
3694
3695        ath10k_core_unregister(ar);
3696        ath10k_pci_free_irq(ar);
3697        ath10k_pci_deinit_irq(ar);
3698        ath10k_pci_release_resource(ar);
3699        ath10k_pci_sleep_sync(ar);
3700        ath10k_pci_release(ar);
3701        ath10k_core_destroy(ar);
3702}
3703
3704MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3705
3706static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3707{
3708        struct ath10k *ar = dev_get_drvdata(dev);
3709        int ret;
3710
3711        ret = ath10k_pci_suspend(ar);
3712        if (ret)
3713                ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3714
3715        return ret;
3716}
3717
3718static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3719{
3720        struct ath10k *ar = dev_get_drvdata(dev);
3721        int ret;
3722
3723        ret = ath10k_pci_resume(ar);
3724        if (ret)
3725                ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3726
3727        return ret;
3728}
3729
3730static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3731                         ath10k_pci_pm_suspend,
3732                         ath10k_pci_pm_resume);
3733
3734static struct pci_driver ath10k_pci_driver = {
3735        .name = "ath10k_pci",
3736        .id_table = ath10k_pci_id_table,
3737        .probe = ath10k_pci_probe,
3738        .remove = ath10k_pci_remove,
3739#ifdef CONFIG_PM
3740        .driver.pm = &ath10k_pci_pm_ops,
3741#endif
3742};
3743
3744static int __init ath10k_pci_init(void)
3745{
3746        int ret;
3747
3748        ret = pci_register_driver(&ath10k_pci_driver);
3749        if (ret)
3750                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3751                       ret);
3752
3753        ret = ath10k_ahb_init();
3754        if (ret)
3755                printk(KERN_ERR "ahb init failed: %d\n", ret);
3756
3757        return ret;
3758}
3759module_init(ath10k_pci_init);
3760
3761static void __exit ath10k_pci_exit(void)
3762{
3763        pci_unregister_driver(&ath10k_pci_driver);
3764        ath10k_ahb_exit();
3765}
3766
3767module_exit(ath10k_pci_exit);
3768
3769MODULE_AUTHOR("Qualcomm Atheros");
3770MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3771MODULE_LICENSE("Dual BSD/GPL");
3772
3773/* QCA988x 2.0 firmware files */
3774MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3775MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3776MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3777MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3778MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3779MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3780
3781/* QCA9887 1.0 firmware files */
3782MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3783MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3784MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3785
3786/* QCA6174 2.1 firmware files */
3787MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3788MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3789MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3790MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3791
3792/* QCA6174 3.1 firmware files */
3793MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3794MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3795MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3796MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3797MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3798
3799/* QCA9377 1.0 firmware files */
3800MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3801MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3802MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3803