linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_irq_mode {
  37        ATH10K_PCI_IRQ_AUTO = 0,
  38        ATH10K_PCI_IRQ_LEGACY = 1,
  39        ATH10K_PCI_IRQ_MSI = 2,
  40};
  41
  42enum ath10k_pci_reset_mode {
  43        ATH10K_PCI_RESET_AUTO = 0,
  44        ATH10K_PCI_RESET_WARM_ONLY = 1,
  45};
  46
  47static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  48static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  49
  50module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  52
  53module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  55
  56/* how long wait to wait for target to initialise, in ms */
  57#define ATH10K_PCI_TARGET_WAIT 3000
  58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  59
  60static const struct pci_device_id ath10k_pci_id_table[] = {
  61        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  62        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  63        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  64        { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  65        { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  66        {0}
  67};
  68
  69static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  70        /* QCA988X pre 2.0 chips are not supported because they need some nasty
  71         * hacks. ath10k doesn't have them and these devices crash horribly
  72         * because of that.
  73         */
  74        { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  75
  76        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  77        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  78        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  79        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  80        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  81
  82        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  83        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  84        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  85        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  86        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  87
  88        { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  89
  90        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  91        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  92};
  93
  94static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  95static int ath10k_pci_cold_reset(struct ath10k *ar);
  96static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
  97static int ath10k_pci_init_irq(struct ath10k *ar);
  98static int ath10k_pci_deinit_irq(struct ath10k *ar);
  99static int ath10k_pci_request_irq(struct ath10k *ar);
 100static void ath10k_pci_free_irq(struct ath10k *ar);
 101static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 102                               struct ath10k_ce_pipe *rx_pipe,
 103                               struct bmi_xfer *xfer);
 104static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 105static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 106static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 107static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 108static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 109static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 110static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 111
 112static struct ce_attr host_ce_config_wlan[] = {
 113        /* CE0: host->target HTC control and raw streams */
 114        {
 115                .flags = CE_ATTR_FLAGS,
 116                .src_nentries = 16,
 117                .src_sz_max = 256,
 118                .dest_nentries = 0,
 119                .send_cb = ath10k_pci_htc_tx_cb,
 120        },
 121
 122        /* CE1: target->host HTT + HTC control */
 123        {
 124                .flags = CE_ATTR_FLAGS,
 125                .src_nentries = 0,
 126                .src_sz_max = 2048,
 127                .dest_nentries = 512,
 128                .recv_cb = ath10k_pci_htt_htc_rx_cb,
 129        },
 130
 131        /* CE2: target->host WMI */
 132        {
 133                .flags = CE_ATTR_FLAGS,
 134                .src_nentries = 0,
 135                .src_sz_max = 2048,
 136                .dest_nentries = 128,
 137                .recv_cb = ath10k_pci_htc_rx_cb,
 138        },
 139
 140        /* CE3: host->target WMI */
 141        {
 142                .flags = CE_ATTR_FLAGS,
 143                .src_nentries = 32,
 144                .src_sz_max = 2048,
 145                .dest_nentries = 0,
 146                .send_cb = ath10k_pci_htc_tx_cb,
 147        },
 148
 149        /* CE4: host->target HTT */
 150        {
 151                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 152                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 153                .src_sz_max = 256,
 154                .dest_nentries = 0,
 155                .send_cb = ath10k_pci_htt_tx_cb,
 156        },
 157
 158        /* CE5: target->host HTT (HIF->HTT) */
 159        {
 160                .flags = CE_ATTR_FLAGS,
 161                .src_nentries = 0,
 162                .src_sz_max = 512,
 163                .dest_nentries = 512,
 164                .recv_cb = ath10k_pci_htt_rx_cb,
 165        },
 166
 167        /* CE6: target autonomous hif_memcpy */
 168        {
 169                .flags = CE_ATTR_FLAGS,
 170                .src_nentries = 0,
 171                .src_sz_max = 0,
 172                .dest_nentries = 0,
 173        },
 174
 175        /* CE7: ce_diag, the Diagnostic Window */
 176        {
 177                .flags = CE_ATTR_FLAGS,
 178                .src_nentries = 2,
 179                .src_sz_max = DIAG_TRANSFER_LIMIT,
 180                .dest_nentries = 2,
 181        },
 182
 183        /* CE8: target->host pktlog */
 184        {
 185                .flags = CE_ATTR_FLAGS,
 186                .src_nentries = 0,
 187                .src_sz_max = 2048,
 188                .dest_nentries = 128,
 189                .recv_cb = ath10k_pci_pktlog_rx_cb,
 190        },
 191
 192        /* CE9 target autonomous qcache memcpy */
 193        {
 194                .flags = CE_ATTR_FLAGS,
 195                .src_nentries = 0,
 196                .src_sz_max = 0,
 197                .dest_nentries = 0,
 198        },
 199
 200        /* CE10: target autonomous hif memcpy */
 201        {
 202                .flags = CE_ATTR_FLAGS,
 203                .src_nentries = 0,
 204                .src_sz_max = 0,
 205                .dest_nentries = 0,
 206        },
 207
 208        /* CE11: target autonomous hif memcpy */
 209        {
 210                .flags = CE_ATTR_FLAGS,
 211                .src_nentries = 0,
 212                .src_sz_max = 0,
 213                .dest_nentries = 0,
 214        },
 215};
 216
 217/* Target firmware's Copy Engine configuration. */
 218static struct ce_pipe_config target_ce_config_wlan[] = {
 219        /* CE0: host->target HTC control and raw streams */
 220        {
 221                .pipenum = __cpu_to_le32(0),
 222                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 223                .nentries = __cpu_to_le32(32),
 224                .nbytes_max = __cpu_to_le32(256),
 225                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 226                .reserved = __cpu_to_le32(0),
 227        },
 228
 229        /* CE1: target->host HTT + HTC control */
 230        {
 231                .pipenum = __cpu_to_le32(1),
 232                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 233                .nentries = __cpu_to_le32(32),
 234                .nbytes_max = __cpu_to_le32(2048),
 235                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 236                .reserved = __cpu_to_le32(0),
 237        },
 238
 239        /* CE2: target->host WMI */
 240        {
 241                .pipenum = __cpu_to_le32(2),
 242                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 243                .nentries = __cpu_to_le32(64),
 244                .nbytes_max = __cpu_to_le32(2048),
 245                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 246                .reserved = __cpu_to_le32(0),
 247        },
 248
 249        /* CE3: host->target WMI */
 250        {
 251                .pipenum = __cpu_to_le32(3),
 252                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 253                .nentries = __cpu_to_le32(32),
 254                .nbytes_max = __cpu_to_le32(2048),
 255                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 256                .reserved = __cpu_to_le32(0),
 257        },
 258
 259        /* CE4: host->target HTT */
 260        {
 261                .pipenum = __cpu_to_le32(4),
 262                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 263                .nentries = __cpu_to_le32(256),
 264                .nbytes_max = __cpu_to_le32(256),
 265                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 266                .reserved = __cpu_to_le32(0),
 267        },
 268
 269        /* NB: 50% of src nentries, since tx has 2 frags */
 270
 271        /* CE5: target->host HTT (HIF->HTT) */
 272        {
 273                .pipenum = __cpu_to_le32(5),
 274                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 275                .nentries = __cpu_to_le32(32),
 276                .nbytes_max = __cpu_to_le32(512),
 277                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 278                .reserved = __cpu_to_le32(0),
 279        },
 280
 281        /* CE6: Reserved for target autonomous hif_memcpy */
 282        {
 283                .pipenum = __cpu_to_le32(6),
 284                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 285                .nentries = __cpu_to_le32(32),
 286                .nbytes_max = __cpu_to_le32(4096),
 287                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 288                .reserved = __cpu_to_le32(0),
 289        },
 290
 291        /* CE7 used only by Host */
 292        {
 293                .pipenum = __cpu_to_le32(7),
 294                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 295                .nentries = __cpu_to_le32(0),
 296                .nbytes_max = __cpu_to_le32(0),
 297                .flags = __cpu_to_le32(0),
 298                .reserved = __cpu_to_le32(0),
 299        },
 300
 301        /* CE8 target->host packtlog */
 302        {
 303                .pipenum = __cpu_to_le32(8),
 304                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 305                .nentries = __cpu_to_le32(64),
 306                .nbytes_max = __cpu_to_le32(2048),
 307                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 308                .reserved = __cpu_to_le32(0),
 309        },
 310
 311        /* CE9 target autonomous qcache memcpy */
 312        {
 313                .pipenum = __cpu_to_le32(9),
 314                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 315                .nentries = __cpu_to_le32(32),
 316                .nbytes_max = __cpu_to_le32(2048),
 317                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 318                .reserved = __cpu_to_le32(0),
 319        },
 320
 321        /* It not necessary to send target wlan configuration for CE10 & CE11
 322         * as these CEs are not actively used in target.
 323         */
 324};
 325
 326/*
 327 * Map from service/endpoint to Copy Engine.
 328 * This table is derived from the CE_PCI TABLE, above.
 329 * It is passed to the Target at startup for use by firmware.
 330 */
 331static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 332        {
 333                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 334                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 335                __cpu_to_le32(3),
 336        },
 337        {
 338                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 339                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 340                __cpu_to_le32(2),
 341        },
 342        {
 343                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 344                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 345                __cpu_to_le32(3),
 346        },
 347        {
 348                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 349                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 350                __cpu_to_le32(2),
 351        },
 352        {
 353                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 354                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 355                __cpu_to_le32(3),
 356        },
 357        {
 358                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 359                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 360                __cpu_to_le32(2),
 361        },
 362        {
 363                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 364                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 365                __cpu_to_le32(3),
 366        },
 367        {
 368                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 369                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 370                __cpu_to_le32(2),
 371        },
 372        {
 373                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 374                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 375                __cpu_to_le32(3),
 376        },
 377        {
 378                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 379                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 380                __cpu_to_le32(2),
 381        },
 382        {
 383                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 384                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 385                __cpu_to_le32(0),
 386        },
 387        {
 388                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 389                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 390                __cpu_to_le32(1),
 391        },
 392        { /* not used */
 393                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 394                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 395                __cpu_to_le32(0),
 396        },
 397        { /* not used */
 398                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 399                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 400                __cpu_to_le32(1),
 401        },
 402        {
 403                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 404                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 405                __cpu_to_le32(4),
 406        },
 407        {
 408                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 409                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 410                __cpu_to_le32(5),
 411        },
 412
 413        /* (Additions here) */
 414
 415        { /* must be last */
 416                __cpu_to_le32(0),
 417                __cpu_to_le32(0),
 418                __cpu_to_le32(0),
 419        },
 420};
 421
 422static bool ath10k_pci_is_awake(struct ath10k *ar)
 423{
 424        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 425        u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 426                           RTC_STATE_ADDRESS);
 427
 428        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 429}
 430
 431static void __ath10k_pci_wake(struct ath10k *ar)
 432{
 433        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 434
 435        lockdep_assert_held(&ar_pci->ps_lock);
 436
 437        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 438                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 439
 440        iowrite32(PCIE_SOC_WAKE_V_MASK,
 441                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 442                  PCIE_SOC_WAKE_ADDRESS);
 443}
 444
 445static void __ath10k_pci_sleep(struct ath10k *ar)
 446{
 447        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 448
 449        lockdep_assert_held(&ar_pci->ps_lock);
 450
 451        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 452                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 453
 454        iowrite32(PCIE_SOC_WAKE_RESET,
 455                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 456                  PCIE_SOC_WAKE_ADDRESS);
 457        ar_pci->ps_awake = false;
 458}
 459
 460static int ath10k_pci_wake_wait(struct ath10k *ar)
 461{
 462        int tot_delay = 0;
 463        int curr_delay = 5;
 464
 465        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 466                if (ath10k_pci_is_awake(ar)) {
 467                        if (tot_delay > PCIE_WAKE_LATE_US)
 468                                ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
 469                                            tot_delay / 1000);
 470                        return 0;
 471                }
 472
 473                udelay(curr_delay);
 474                tot_delay += curr_delay;
 475
 476                if (curr_delay < 50)
 477                        curr_delay += 5;
 478        }
 479
 480        return -ETIMEDOUT;
 481}
 482
 483static int ath10k_pci_force_wake(struct ath10k *ar)
 484{
 485        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 486        unsigned long flags;
 487        int ret = 0;
 488
 489        if (ar_pci->pci_ps)
 490                return ret;
 491
 492        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 493
 494        if (!ar_pci->ps_awake) {
 495                iowrite32(PCIE_SOC_WAKE_V_MASK,
 496                          ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 497                          PCIE_SOC_WAKE_ADDRESS);
 498
 499                ret = ath10k_pci_wake_wait(ar);
 500                if (ret == 0)
 501                        ar_pci->ps_awake = true;
 502        }
 503
 504        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 505
 506        return ret;
 507}
 508
 509static void ath10k_pci_force_sleep(struct ath10k *ar)
 510{
 511        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 512        unsigned long flags;
 513
 514        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 515
 516        iowrite32(PCIE_SOC_WAKE_RESET,
 517                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 518                  PCIE_SOC_WAKE_ADDRESS);
 519        ar_pci->ps_awake = false;
 520
 521        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 522}
 523
 524static int ath10k_pci_wake(struct ath10k *ar)
 525{
 526        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 527        unsigned long flags;
 528        int ret = 0;
 529
 530        if (ar_pci->pci_ps == 0)
 531                return ret;
 532
 533        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 534
 535        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 536                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 537
 538        /* This function can be called very frequently. To avoid excessive
 539         * CPU stalls for MMIO reads use a cache var to hold the device state.
 540         */
 541        if (!ar_pci->ps_awake) {
 542                __ath10k_pci_wake(ar);
 543
 544                ret = ath10k_pci_wake_wait(ar);
 545                if (ret == 0)
 546                        ar_pci->ps_awake = true;
 547        }
 548
 549        if (ret == 0) {
 550                ar_pci->ps_wake_refcount++;
 551                WARN_ON(ar_pci->ps_wake_refcount == 0);
 552        }
 553
 554        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 555
 556        return ret;
 557}
 558
 559static void ath10k_pci_sleep(struct ath10k *ar)
 560{
 561        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 562        unsigned long flags;
 563
 564        if (ar_pci->pci_ps == 0)
 565                return;
 566
 567        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 568
 569        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 570                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 571
 572        if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 573                goto skip;
 574
 575        ar_pci->ps_wake_refcount--;
 576
 577        mod_timer(&ar_pci->ps_timer, jiffies +
 578                  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 579
 580skip:
 581        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 582}
 583
 584static void ath10k_pci_ps_timer(unsigned long ptr)
 585{
 586        struct ath10k *ar = (void *)ptr;
 587        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 588        unsigned long flags;
 589
 590        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 591
 592        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 593                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 594
 595        if (ar_pci->ps_wake_refcount > 0)
 596                goto skip;
 597
 598        __ath10k_pci_sleep(ar);
 599
 600skip:
 601        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 602}
 603
 604static void ath10k_pci_sleep_sync(struct ath10k *ar)
 605{
 606        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 607        unsigned long flags;
 608
 609        if (ar_pci->pci_ps == 0) {
 610                ath10k_pci_force_sleep(ar);
 611                return;
 612        }
 613
 614        del_timer_sync(&ar_pci->ps_timer);
 615
 616        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 617        WARN_ON(ar_pci->ps_wake_refcount > 0);
 618        __ath10k_pci_sleep(ar);
 619        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 620}
 621
 622static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 623{
 624        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 625        int ret;
 626
 627        if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 628                ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 629                            offset, offset + sizeof(value), ar_pci->mem_len);
 630                return;
 631        }
 632
 633        ret = ath10k_pci_wake(ar);
 634        if (ret) {
 635                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 636                            value, offset, ret);
 637                return;
 638        }
 639
 640        iowrite32(value, ar_pci->mem + offset);
 641        ath10k_pci_sleep(ar);
 642}
 643
 644static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 645{
 646        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 647        u32 val;
 648        int ret;
 649
 650        if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 651                ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 652                            offset, offset + sizeof(val), ar_pci->mem_len);
 653                return 0;
 654        }
 655
 656        ret = ath10k_pci_wake(ar);
 657        if (ret) {
 658                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 659                            offset, ret);
 660                return 0xffffffff;
 661        }
 662
 663        val = ioread32(ar_pci->mem + offset);
 664        ath10k_pci_sleep(ar);
 665
 666        return val;
 667}
 668
 669inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 670{
 671        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 672
 673        ar_pci->bus_ops->write32(ar, offset, value);
 674}
 675
 676inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 677{
 678        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 679
 680        return ar_pci->bus_ops->read32(ar, offset);
 681}
 682
 683u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 684{
 685        return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 686}
 687
 688void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 689{
 690        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 691}
 692
 693u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 694{
 695        return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 696}
 697
 698void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 699{
 700        ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 701}
 702
 703bool ath10k_pci_irq_pending(struct ath10k *ar)
 704{
 705        u32 cause;
 706
 707        /* Check if the shared legacy irq is for us */
 708        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 709                                  PCIE_INTR_CAUSE_ADDRESS);
 710        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 711                return true;
 712
 713        return false;
 714}
 715
 716void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 717{
 718        /* IMPORTANT: INTR_CLR register has to be set after
 719         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 720         * really cleared. */
 721        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 722                           0);
 723        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 724                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 725
 726        /* IMPORTANT: this extra read transaction is required to
 727         * flush the posted write buffer. */
 728        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 729                                PCIE_INTR_ENABLE_ADDRESS);
 730}
 731
 732void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 733{
 734        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 735                           PCIE_INTR_ENABLE_ADDRESS,
 736                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 737
 738        /* IMPORTANT: this extra read transaction is required to
 739         * flush the posted write buffer. */
 740        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 741                                PCIE_INTR_ENABLE_ADDRESS);
 742}
 743
 744static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 745{
 746        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 747
 748        if (ar_pci->num_msi_intrs > 1)
 749                return "msi-x";
 750
 751        if (ar_pci->num_msi_intrs == 1)
 752                return "msi";
 753
 754        return "legacy";
 755}
 756
 757static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 758{
 759        struct ath10k *ar = pipe->hif_ce_state;
 760        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 761        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 762        struct sk_buff *skb;
 763        dma_addr_t paddr;
 764        int ret;
 765
 766        skb = dev_alloc_skb(pipe->buf_sz);
 767        if (!skb)
 768                return -ENOMEM;
 769
 770        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 771
 772        paddr = dma_map_single(ar->dev, skb->data,
 773                               skb->len + skb_tailroom(skb),
 774                               DMA_FROM_DEVICE);
 775        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 776                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 777                dev_kfree_skb_any(skb);
 778                return -EIO;
 779        }
 780
 781        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 782
 783        spin_lock_bh(&ar_pci->ce_lock);
 784        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 785        spin_unlock_bh(&ar_pci->ce_lock);
 786        if (ret) {
 787                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 788                                 DMA_FROM_DEVICE);
 789                dev_kfree_skb_any(skb);
 790                return ret;
 791        }
 792
 793        return 0;
 794}
 795
 796static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 797{
 798        struct ath10k *ar = pipe->hif_ce_state;
 799        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 800        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 801        int ret, num;
 802
 803        if (pipe->buf_sz == 0)
 804                return;
 805
 806        if (!ce_pipe->dest_ring)
 807                return;
 808
 809        spin_lock_bh(&ar_pci->ce_lock);
 810        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 811        spin_unlock_bh(&ar_pci->ce_lock);
 812        while (num--) {
 813                ret = __ath10k_pci_rx_post_buf(pipe);
 814                if (ret) {
 815                        if (ret == -ENOSPC)
 816                                break;
 817                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 818                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 819                                  ATH10K_PCI_RX_POST_RETRY_MS);
 820                        break;
 821                }
 822        }
 823}
 824
 825void ath10k_pci_rx_post(struct ath10k *ar)
 826{
 827        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 828        int i;
 829
 830        for (i = 0; i < CE_COUNT; i++)
 831                ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 832}
 833
 834void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 835{
 836        struct ath10k *ar = (void *)ptr;
 837
 838        ath10k_pci_rx_post(ar);
 839}
 840
 841static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 842{
 843        u32 val = 0;
 844
 845        switch (ar->hw_rev) {
 846        case ATH10K_HW_QCA988X:
 847        case ATH10K_HW_QCA6174:
 848        case ATH10K_HW_QCA9377:
 849                val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 850                                          CORE_CTRL_ADDRESS) &
 851                       0x7ff) << 21;
 852                break;
 853        case ATH10K_HW_QCA99X0:
 854        case ATH10K_HW_QCA4019:
 855                val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 856                break;
 857        }
 858
 859        val |= 0x100000 | (addr & 0xfffff);
 860        return val;
 861}
 862
 863/*
 864 * Diagnostic read/write access is provided for startup/config/debug usage.
 865 * Caller must guarantee proper alignment, when applicable, and single user
 866 * at any moment.
 867 */
 868static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 869                                    int nbytes)
 870{
 871        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 872        int ret = 0;
 873        u32 buf;
 874        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 875        unsigned int id;
 876        unsigned int flags;
 877        struct ath10k_ce_pipe *ce_diag;
 878        /* Host buffer address in CE space */
 879        u32 ce_data;
 880        dma_addr_t ce_data_base = 0;
 881        void *data_buf = NULL;
 882        int i;
 883
 884        spin_lock_bh(&ar_pci->ce_lock);
 885
 886        ce_diag = ar_pci->ce_diag;
 887
 888        /*
 889         * Allocate a temporary bounce buffer to hold caller's data
 890         * to be DMA'ed from Target. This guarantees
 891         *   1) 4-byte alignment
 892         *   2) Buffer in DMA-able space
 893         */
 894        orig_nbytes = nbytes;
 895        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 896                                                       orig_nbytes,
 897                                                       &ce_data_base,
 898                                                       GFP_ATOMIC);
 899
 900        if (!data_buf) {
 901                ret = -ENOMEM;
 902                goto done;
 903        }
 904        memset(data_buf, 0, orig_nbytes);
 905
 906        remaining_bytes = orig_nbytes;
 907        ce_data = ce_data_base;
 908        while (remaining_bytes) {
 909                nbytes = min_t(unsigned int, remaining_bytes,
 910                               DIAG_TRANSFER_LIMIT);
 911
 912                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
 913                if (ret != 0)
 914                        goto done;
 915
 916                /* Request CE to send from Target(!) address to Host buffer */
 917                /*
 918                 * The address supplied by the caller is in the
 919                 * Target CPU virtual address space.
 920                 *
 921                 * In order to use this address with the diagnostic CE,
 922                 * convert it from Target CPU virtual address space
 923                 * to CE address space
 924                 */
 925                address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 926
 927                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 928                                            0);
 929                if (ret)
 930                        goto done;
 931
 932                i = 0;
 933                while (ath10k_ce_completed_send_next_nolock(ce_diag,
 934                                                            NULL) != 0) {
 935                        mdelay(1);
 936                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 937                                ret = -EBUSY;
 938                                goto done;
 939                        }
 940                }
 941
 942                i = 0;
 943                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 944                                                            &completed_nbytes,
 945                                                            &id, &flags) != 0) {
 946                        mdelay(1);
 947
 948                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 949                                ret = -EBUSY;
 950                                goto done;
 951                        }
 952                }
 953
 954                if (nbytes != completed_nbytes) {
 955                        ret = -EIO;
 956                        goto done;
 957                }
 958
 959                if (buf != ce_data) {
 960                        ret = -EIO;
 961                        goto done;
 962                }
 963
 964                remaining_bytes -= nbytes;
 965                address += nbytes;
 966                ce_data += nbytes;
 967        }
 968
 969done:
 970        if (ret == 0)
 971                memcpy(data, data_buf, orig_nbytes);
 972        else
 973                ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 974                            address, ret);
 975
 976        if (data_buf)
 977                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 978                                  ce_data_base);
 979
 980        spin_unlock_bh(&ar_pci->ce_lock);
 981
 982        return ret;
 983}
 984
 985static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 986{
 987        __le32 val = 0;
 988        int ret;
 989
 990        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 991        *value = __le32_to_cpu(val);
 992
 993        return ret;
 994}
 995
 996static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
 997                                     u32 src, u32 len)
 998{
 999        u32 host_addr, addr;
1000        int ret;
1001
1002        host_addr = host_interest_item_address(src);
1003
1004        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1005        if (ret != 0) {
1006                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1007                            src, ret);
1008                return ret;
1009        }
1010
1011        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1012        if (ret != 0) {
1013                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1014                            addr, len, ret);
1015                return ret;
1016        }
1017
1018        return 0;
1019}
1020
1021#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
1022        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1023
1024int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1025                              const void *data, int nbytes)
1026{
1027        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1028        int ret = 0;
1029        u32 buf;
1030        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1031        unsigned int id;
1032        unsigned int flags;
1033        struct ath10k_ce_pipe *ce_diag;
1034        void *data_buf = NULL;
1035        u32 ce_data;    /* Host buffer address in CE space */
1036        dma_addr_t ce_data_base = 0;
1037        int i;
1038
1039        spin_lock_bh(&ar_pci->ce_lock);
1040
1041        ce_diag = ar_pci->ce_diag;
1042
1043        /*
1044         * Allocate a temporary bounce buffer to hold caller's data
1045         * to be DMA'ed to Target. This guarantees
1046         *   1) 4-byte alignment
1047         *   2) Buffer in DMA-able space
1048         */
1049        orig_nbytes = nbytes;
1050        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1051                                                       orig_nbytes,
1052                                                       &ce_data_base,
1053                                                       GFP_ATOMIC);
1054        if (!data_buf) {
1055                ret = -ENOMEM;
1056                goto done;
1057        }
1058
1059        /* Copy caller's data to allocated DMA buf */
1060        memcpy(data_buf, data, orig_nbytes);
1061
1062        /*
1063         * The address supplied by the caller is in the
1064         * Target CPU virtual address space.
1065         *
1066         * In order to use this address with the diagnostic CE,
1067         * convert it from
1068         *    Target CPU virtual address space
1069         * to
1070         *    CE address space
1071         */
1072        address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1073
1074        remaining_bytes = orig_nbytes;
1075        ce_data = ce_data_base;
1076        while (remaining_bytes) {
1077                /* FIXME: check cast */
1078                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1079
1080                /* Set up to receive directly into Target(!) address */
1081                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
1082                if (ret != 0)
1083                        goto done;
1084
1085                /*
1086                 * Request CE to send caller-supplied data that
1087                 * was copied to bounce buffer to Target(!) address.
1088                 */
1089                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1090                                            nbytes, 0, 0);
1091                if (ret != 0)
1092                        goto done;
1093
1094                i = 0;
1095                while (ath10k_ce_completed_send_next_nolock(ce_diag,
1096                                                            NULL) != 0) {
1097                        mdelay(1);
1098
1099                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1100                                ret = -EBUSY;
1101                                goto done;
1102                        }
1103                }
1104
1105                i = 0;
1106                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
1107                                                            &completed_nbytes,
1108                                                            &id, &flags) != 0) {
1109                        mdelay(1);
1110
1111                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1112                                ret = -EBUSY;
1113                                goto done;
1114                        }
1115                }
1116
1117                if (nbytes != completed_nbytes) {
1118                        ret = -EIO;
1119                        goto done;
1120                }
1121
1122                if (buf != address) {
1123                        ret = -EIO;
1124                        goto done;
1125                }
1126
1127                remaining_bytes -= nbytes;
1128                address += nbytes;
1129                ce_data += nbytes;
1130        }
1131
1132done:
1133        if (data_buf) {
1134                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1135                                  ce_data_base);
1136        }
1137
1138        if (ret != 0)
1139                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1140                            address, ret);
1141
1142        spin_unlock_bh(&ar_pci->ce_lock);
1143
1144        return ret;
1145}
1146
1147static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1148{
1149        __le32 val = __cpu_to_le32(value);
1150
1151        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1152}
1153
1154/* Called by lower (CE) layer when a send to Target completes. */
1155static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1156{
1157        struct ath10k *ar = ce_state->ar;
1158        struct sk_buff_head list;
1159        struct sk_buff *skb;
1160
1161        __skb_queue_head_init(&list);
1162        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1163                /* no need to call tx completion for NULL pointers */
1164                if (skb == NULL)
1165                        continue;
1166
1167                __skb_queue_tail(&list, skb);
1168        }
1169
1170        while ((skb = __skb_dequeue(&list)))
1171                ath10k_htc_tx_completion_handler(ar, skb);
1172}
1173
1174static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1175                                     void (*callback)(struct ath10k *ar,
1176                                                      struct sk_buff *skb))
1177{
1178        struct ath10k *ar = ce_state->ar;
1179        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1180        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1181        struct sk_buff *skb;
1182        struct sk_buff_head list;
1183        void *transfer_context;
1184        u32 ce_data;
1185        unsigned int nbytes, max_nbytes;
1186        unsigned int transfer_id;
1187        unsigned int flags;
1188
1189        __skb_queue_head_init(&list);
1190        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1191                                             &ce_data, &nbytes, &transfer_id,
1192                                             &flags) == 0) {
1193                skb = transfer_context;
1194                max_nbytes = skb->len + skb_tailroom(skb);
1195                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1196                                 max_nbytes, DMA_FROM_DEVICE);
1197
1198                if (unlikely(max_nbytes < nbytes)) {
1199                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1200                                    nbytes, max_nbytes);
1201                        dev_kfree_skb_any(skb);
1202                        continue;
1203                }
1204
1205                skb_put(skb, nbytes);
1206                __skb_queue_tail(&list, skb);
1207        }
1208
1209        while ((skb = __skb_dequeue(&list))) {
1210                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1211                           ce_state->id, skb->len);
1212                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1213                                skb->data, skb->len);
1214
1215                callback(ar, skb);
1216        }
1217
1218        ath10k_pci_rx_post_pipe(pipe_info);
1219}
1220
1221/* Called by lower (CE) layer when data is received from the Target. */
1222static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1223{
1224        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1225}
1226
1227static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1228{
1229        /* CE4 polling needs to be done whenever CE pipe which transports
1230         * HTT Rx (target->host) is processed.
1231         */
1232        ath10k_ce_per_engine_service(ce_state->ar, 4);
1233
1234        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1235}
1236
1237/* Called by lower (CE) layer when data is received from the Target.
1238 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1239 */
1240static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1241{
1242        ath10k_pci_process_rx_cb(ce_state,
1243                                 ath10k_htt_rx_pktlog_completion_handler);
1244}
1245
1246/* Called by lower (CE) layer when a send to HTT Target completes. */
1247static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1248{
1249        struct ath10k *ar = ce_state->ar;
1250        struct sk_buff *skb;
1251
1252        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1253                /* no need to call tx completion for NULL pointers */
1254                if (!skb)
1255                        continue;
1256
1257                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1258                                 skb->len, DMA_TO_DEVICE);
1259                ath10k_htt_hif_tx_complete(ar, skb);
1260        }
1261}
1262
1263static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1264{
1265        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1266        ath10k_htt_t2h_msg_handler(ar, skb);
1267}
1268
1269/* Called by lower (CE) layer when HTT data is received from the Target. */
1270static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1271{
1272        /* CE4 polling needs to be done whenever CE pipe which transports
1273         * HTT Rx (target->host) is processed.
1274         */
1275        ath10k_ce_per_engine_service(ce_state->ar, 4);
1276
1277        ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1278}
1279
1280int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1281                         struct ath10k_hif_sg_item *items, int n_items)
1282{
1283        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1284        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1285        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1286        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1287        unsigned int nentries_mask;
1288        unsigned int sw_index;
1289        unsigned int write_index;
1290        int err, i = 0;
1291
1292        spin_lock_bh(&ar_pci->ce_lock);
1293
1294        nentries_mask = src_ring->nentries_mask;
1295        sw_index = src_ring->sw_index;
1296        write_index = src_ring->write_index;
1297
1298        if (unlikely(CE_RING_DELTA(nentries_mask,
1299                                   write_index, sw_index - 1) < n_items)) {
1300                err = -ENOBUFS;
1301                goto err;
1302        }
1303
1304        for (i = 0; i < n_items - 1; i++) {
1305                ath10k_dbg(ar, ATH10K_DBG_PCI,
1306                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1307                           i, items[i].paddr, items[i].len, n_items);
1308                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1309                                items[i].vaddr, items[i].len);
1310
1311                err = ath10k_ce_send_nolock(ce_pipe,
1312                                            items[i].transfer_context,
1313                                            items[i].paddr,
1314                                            items[i].len,
1315                                            items[i].transfer_id,
1316                                            CE_SEND_FLAG_GATHER);
1317                if (err)
1318                        goto err;
1319        }
1320
1321        /* `i` is equal to `n_items -1` after for() */
1322
1323        ath10k_dbg(ar, ATH10K_DBG_PCI,
1324                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1325                   i, items[i].paddr, items[i].len, n_items);
1326        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1327                        items[i].vaddr, items[i].len);
1328
1329        err = ath10k_ce_send_nolock(ce_pipe,
1330                                    items[i].transfer_context,
1331                                    items[i].paddr,
1332                                    items[i].len,
1333                                    items[i].transfer_id,
1334                                    0);
1335        if (err)
1336                goto err;
1337
1338        spin_unlock_bh(&ar_pci->ce_lock);
1339        return 0;
1340
1341err:
1342        for (; i > 0; i--)
1343                __ath10k_ce_send_revert(ce_pipe);
1344
1345        spin_unlock_bh(&ar_pci->ce_lock);
1346        return err;
1347}
1348
1349int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1350                             size_t buf_len)
1351{
1352        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1353}
1354
1355u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1356{
1357        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1358
1359        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1360
1361        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1362}
1363
1364static void ath10k_pci_dump_registers(struct ath10k *ar,
1365                                      struct ath10k_fw_crash_data *crash_data)
1366{
1367        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1368        int i, ret;
1369
1370        lockdep_assert_held(&ar->data_lock);
1371
1372        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1373                                      hi_failure_state,
1374                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1375        if (ret) {
1376                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1377                return;
1378        }
1379
1380        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1381
1382        ath10k_err(ar, "firmware register dump:\n");
1383        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1384                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1385                           i,
1386                           __le32_to_cpu(reg_dump_values[i]),
1387                           __le32_to_cpu(reg_dump_values[i + 1]),
1388                           __le32_to_cpu(reg_dump_values[i + 2]),
1389                           __le32_to_cpu(reg_dump_values[i + 3]));
1390
1391        if (!crash_data)
1392                return;
1393
1394        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1395                crash_data->registers[i] = reg_dump_values[i];
1396}
1397
1398static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1399{
1400        struct ath10k_fw_crash_data *crash_data;
1401        char uuid[50];
1402
1403        spin_lock_bh(&ar->data_lock);
1404
1405        ar->stats.fw_crash_counter++;
1406
1407        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1408
1409        if (crash_data)
1410                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1411        else
1412                scnprintf(uuid, sizeof(uuid), "n/a");
1413
1414        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1415        ath10k_print_driver_info(ar);
1416        ath10k_pci_dump_registers(ar, crash_data);
1417
1418        spin_unlock_bh(&ar->data_lock);
1419
1420        queue_work(ar->workqueue, &ar->restart_work);
1421}
1422
1423void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1424                                        int force)
1425{
1426        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1427
1428        if (!force) {
1429                int resources;
1430                /*
1431                 * Decide whether to actually poll for completions, or just
1432                 * wait for a later chance.
1433                 * If there seem to be plenty of resources left, then just wait
1434                 * since checking involves reading a CE register, which is a
1435                 * relatively expensive operation.
1436                 */
1437                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1438
1439                /*
1440                 * If at least 50% of the total resources are still available,
1441                 * don't bother checking again yet.
1442                 */
1443                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1444                        return;
1445        }
1446        ath10k_ce_per_engine_service(ar, pipe);
1447}
1448
1449void ath10k_pci_kill_tasklet(struct ath10k *ar)
1450{
1451        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1452        int i;
1453
1454        tasklet_kill(&ar_pci->intr_tq);
1455        tasklet_kill(&ar_pci->msi_fw_err);
1456
1457        for (i = 0; i < CE_COUNT; i++)
1458                tasklet_kill(&ar_pci->pipe_info[i].intr);
1459
1460        del_timer_sync(&ar_pci->rx_post_retry);
1461}
1462
1463int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1464                                       u8 *ul_pipe, u8 *dl_pipe)
1465{
1466        const struct service_to_pipe *entry;
1467        bool ul_set = false, dl_set = false;
1468        int i;
1469
1470        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1471
1472        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1473                entry = &target_service_to_ce_map_wlan[i];
1474
1475                if (__le32_to_cpu(entry->service_id) != service_id)
1476                        continue;
1477
1478                switch (__le32_to_cpu(entry->pipedir)) {
1479                case PIPEDIR_NONE:
1480                        break;
1481                case PIPEDIR_IN:
1482                        WARN_ON(dl_set);
1483                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1484                        dl_set = true;
1485                        break;
1486                case PIPEDIR_OUT:
1487                        WARN_ON(ul_set);
1488                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1489                        ul_set = true;
1490                        break;
1491                case PIPEDIR_INOUT:
1492                        WARN_ON(dl_set);
1493                        WARN_ON(ul_set);
1494                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1495                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1496                        dl_set = true;
1497                        ul_set = true;
1498                        break;
1499                }
1500        }
1501
1502        if (WARN_ON(!ul_set || !dl_set))
1503                return -ENOENT;
1504
1505        return 0;
1506}
1507
1508void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1509                                     u8 *ul_pipe, u8 *dl_pipe)
1510{
1511        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1512
1513        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1514                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1515                                                 ul_pipe, dl_pipe);
1516}
1517
1518static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1519{
1520        u32 val;
1521
1522        switch (ar->hw_rev) {
1523        case ATH10K_HW_QCA988X:
1524        case ATH10K_HW_QCA6174:
1525        case ATH10K_HW_QCA9377:
1526                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1527                                        CORE_CTRL_ADDRESS);
1528                val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1529                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1530                                   CORE_CTRL_ADDRESS, val);
1531                break;
1532        case ATH10K_HW_QCA99X0:
1533        case ATH10K_HW_QCA4019:
1534                /* TODO: Find appropriate register configuration for QCA99X0
1535                 *  to mask irq/MSI.
1536                 */
1537                 break;
1538        }
1539}
1540
1541static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1542{
1543        u32 val;
1544
1545        switch (ar->hw_rev) {
1546        case ATH10K_HW_QCA988X:
1547        case ATH10K_HW_QCA6174:
1548        case ATH10K_HW_QCA9377:
1549                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1550                                        CORE_CTRL_ADDRESS);
1551                val |= CORE_CTRL_PCIE_REG_31_MASK;
1552                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1553                                   CORE_CTRL_ADDRESS, val);
1554                break;
1555        case ATH10K_HW_QCA99X0:
1556        case ATH10K_HW_QCA4019:
1557                /* TODO: Find appropriate register configuration for QCA99X0
1558                 *  to unmask irq/MSI.
1559                 */
1560                break;
1561        }
1562}
1563
1564static void ath10k_pci_irq_disable(struct ath10k *ar)
1565{
1566        ath10k_ce_disable_interrupts(ar);
1567        ath10k_pci_disable_and_clear_legacy_irq(ar);
1568        ath10k_pci_irq_msi_fw_mask(ar);
1569}
1570
1571static void ath10k_pci_irq_sync(struct ath10k *ar)
1572{
1573        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1574        int i;
1575
1576        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1577                synchronize_irq(ar_pci->pdev->irq + i);
1578}
1579
1580static void ath10k_pci_irq_enable(struct ath10k *ar)
1581{
1582        ath10k_ce_enable_interrupts(ar);
1583        ath10k_pci_enable_legacy_irq(ar);
1584        ath10k_pci_irq_msi_fw_unmask(ar);
1585}
1586
1587static int ath10k_pci_hif_start(struct ath10k *ar)
1588{
1589        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1590
1591        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1592
1593        ath10k_pci_irq_enable(ar);
1594        ath10k_pci_rx_post(ar);
1595
1596        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1597                                   ar_pci->link_ctl);
1598
1599        return 0;
1600}
1601
1602static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1603{
1604        struct ath10k *ar;
1605        struct ath10k_ce_pipe *ce_pipe;
1606        struct ath10k_ce_ring *ce_ring;
1607        struct sk_buff *skb;
1608        int i;
1609
1610        ar = pci_pipe->hif_ce_state;
1611        ce_pipe = pci_pipe->ce_hdl;
1612        ce_ring = ce_pipe->dest_ring;
1613
1614        if (!ce_ring)
1615                return;
1616
1617        if (!pci_pipe->buf_sz)
1618                return;
1619
1620        for (i = 0; i < ce_ring->nentries; i++) {
1621                skb = ce_ring->per_transfer_context[i];
1622                if (!skb)
1623                        continue;
1624
1625                ce_ring->per_transfer_context[i] = NULL;
1626
1627                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1628                                 skb->len + skb_tailroom(skb),
1629                                 DMA_FROM_DEVICE);
1630                dev_kfree_skb_any(skb);
1631        }
1632}
1633
1634static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1635{
1636        struct ath10k *ar;
1637        struct ath10k_pci *ar_pci;
1638        struct ath10k_ce_pipe *ce_pipe;
1639        struct ath10k_ce_ring *ce_ring;
1640        struct sk_buff *skb;
1641        int i;
1642
1643        ar = pci_pipe->hif_ce_state;
1644        ar_pci = ath10k_pci_priv(ar);
1645        ce_pipe = pci_pipe->ce_hdl;
1646        ce_ring = ce_pipe->src_ring;
1647
1648        if (!ce_ring)
1649                return;
1650
1651        if (!pci_pipe->buf_sz)
1652                return;
1653
1654        for (i = 0; i < ce_ring->nentries; i++) {
1655                skb = ce_ring->per_transfer_context[i];
1656                if (!skb)
1657                        continue;
1658
1659                ce_ring->per_transfer_context[i] = NULL;
1660
1661                ath10k_htc_tx_completion_handler(ar, skb);
1662        }
1663}
1664
1665/*
1666 * Cleanup residual buffers for device shutdown:
1667 *    buffers that were enqueued for receive
1668 *    buffers that were to be sent
1669 * Note: Buffers that had completed but which were
1670 * not yet processed are on a completion queue. They
1671 * are handled when the completion thread shuts down.
1672 */
1673static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1674{
1675        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1676        int pipe_num;
1677
1678        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1679                struct ath10k_pci_pipe *pipe_info;
1680
1681                pipe_info = &ar_pci->pipe_info[pipe_num];
1682                ath10k_pci_rx_pipe_cleanup(pipe_info);
1683                ath10k_pci_tx_pipe_cleanup(pipe_info);
1684        }
1685}
1686
1687void ath10k_pci_ce_deinit(struct ath10k *ar)
1688{
1689        int i;
1690
1691        for (i = 0; i < CE_COUNT; i++)
1692                ath10k_ce_deinit_pipe(ar, i);
1693}
1694
1695void ath10k_pci_flush(struct ath10k *ar)
1696{
1697        ath10k_pci_kill_tasklet(ar);
1698        ath10k_pci_buffer_cleanup(ar);
1699}
1700
1701static void ath10k_pci_hif_stop(struct ath10k *ar)
1702{
1703        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1704        unsigned long flags;
1705
1706        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1707
1708        /* Most likely the device has HTT Rx ring configured. The only way to
1709         * prevent the device from accessing (and possible corrupting) host
1710         * memory is to reset the chip now.
1711         *
1712         * There's also no known way of masking MSI interrupts on the device.
1713         * For ranged MSI the CE-related interrupts can be masked. However
1714         * regardless how many MSI interrupts are assigned the first one
1715         * is always used for firmware indications (crashes) and cannot be
1716         * masked. To prevent the device from asserting the interrupt reset it
1717         * before proceeding with cleanup.
1718         */
1719        ath10k_pci_safe_chip_reset(ar);
1720
1721        ath10k_pci_irq_disable(ar);
1722        ath10k_pci_irq_sync(ar);
1723        ath10k_pci_flush(ar);
1724
1725        spin_lock_irqsave(&ar_pci->ps_lock, flags);
1726        WARN_ON(ar_pci->ps_wake_refcount > 0);
1727        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1728}
1729
1730int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1731                                    void *req, u32 req_len,
1732                                    void *resp, u32 *resp_len)
1733{
1734        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1735        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1736        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1737        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1738        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1739        dma_addr_t req_paddr = 0;
1740        dma_addr_t resp_paddr = 0;
1741        struct bmi_xfer xfer = {};
1742        void *treq, *tresp = NULL;
1743        int ret = 0;
1744
1745        might_sleep();
1746
1747        if (resp && !resp_len)
1748                return -EINVAL;
1749
1750        if (resp && resp_len && *resp_len == 0)
1751                return -EINVAL;
1752
1753        treq = kmemdup(req, req_len, GFP_KERNEL);
1754        if (!treq)
1755                return -ENOMEM;
1756
1757        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1758        ret = dma_mapping_error(ar->dev, req_paddr);
1759        if (ret) {
1760                ret = -EIO;
1761                goto err_dma;
1762        }
1763
1764        if (resp && resp_len) {
1765                tresp = kzalloc(*resp_len, GFP_KERNEL);
1766                if (!tresp) {
1767                        ret = -ENOMEM;
1768                        goto err_req;
1769                }
1770
1771                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1772                                            DMA_FROM_DEVICE);
1773                ret = dma_mapping_error(ar->dev, resp_paddr);
1774                if (ret) {
1775                        ret = -EIO;
1776                        goto err_req;
1777                }
1778
1779                xfer.wait_for_resp = true;
1780                xfer.resp_len = 0;
1781
1782                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1783        }
1784
1785        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1786        if (ret)
1787                goto err_resp;
1788
1789        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1790        if (ret) {
1791                u32 unused_buffer;
1792                unsigned int unused_nbytes;
1793                unsigned int unused_id;
1794
1795                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1796                                           &unused_nbytes, &unused_id);
1797        } else {
1798                /* non-zero means we did not time out */
1799                ret = 0;
1800        }
1801
1802err_resp:
1803        if (resp) {
1804                u32 unused_buffer;
1805
1806                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1807                dma_unmap_single(ar->dev, resp_paddr,
1808                                 *resp_len, DMA_FROM_DEVICE);
1809        }
1810err_req:
1811        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1812
1813        if (ret == 0 && resp_len) {
1814                *resp_len = min(*resp_len, xfer.resp_len);
1815                memcpy(resp, tresp, xfer.resp_len);
1816        }
1817err_dma:
1818        kfree(treq);
1819        kfree(tresp);
1820
1821        return ret;
1822}
1823
1824static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1825{
1826        struct bmi_xfer *xfer;
1827
1828        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1829                return;
1830
1831        xfer->tx_done = true;
1832}
1833
1834static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1835{
1836        struct ath10k *ar = ce_state->ar;
1837        struct bmi_xfer *xfer;
1838        u32 ce_data;
1839        unsigned int nbytes;
1840        unsigned int transfer_id;
1841        unsigned int flags;
1842
1843        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1844                                          &nbytes, &transfer_id, &flags))
1845                return;
1846
1847        if (WARN_ON_ONCE(!xfer))
1848                return;
1849
1850        if (!xfer->wait_for_resp) {
1851                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1852                return;
1853        }
1854
1855        xfer->resp_len = nbytes;
1856        xfer->rx_done = true;
1857}
1858
1859static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1860                               struct ath10k_ce_pipe *rx_pipe,
1861                               struct bmi_xfer *xfer)
1862{
1863        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1864
1865        while (time_before_eq(jiffies, timeout)) {
1866                ath10k_pci_bmi_send_done(tx_pipe);
1867                ath10k_pci_bmi_recv_data(rx_pipe);
1868
1869                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1870                        return 0;
1871
1872                schedule();
1873        }
1874
1875        return -ETIMEDOUT;
1876}
1877
1878/*
1879 * Send an interrupt to the device to wake up the Target CPU
1880 * so it has an opportunity to notice any changed state.
1881 */
1882static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1883{
1884        u32 addr, val;
1885
1886        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1887        val = ath10k_pci_read32(ar, addr);
1888        val |= CORE_CTRL_CPU_INTR_MASK;
1889        ath10k_pci_write32(ar, addr, val);
1890
1891        return 0;
1892}
1893
1894static int ath10k_pci_get_num_banks(struct ath10k *ar)
1895{
1896        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1897
1898        switch (ar_pci->pdev->device) {
1899        case QCA988X_2_0_DEVICE_ID:
1900        case QCA99X0_2_0_DEVICE_ID:
1901                return 1;
1902        case QCA6164_2_1_DEVICE_ID:
1903        case QCA6174_2_1_DEVICE_ID:
1904                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1905                case QCA6174_HW_1_0_CHIP_ID_REV:
1906                case QCA6174_HW_1_1_CHIP_ID_REV:
1907                case QCA6174_HW_2_1_CHIP_ID_REV:
1908                case QCA6174_HW_2_2_CHIP_ID_REV:
1909                        return 3;
1910                case QCA6174_HW_1_3_CHIP_ID_REV:
1911                        return 2;
1912                case QCA6174_HW_3_0_CHIP_ID_REV:
1913                case QCA6174_HW_3_1_CHIP_ID_REV:
1914                case QCA6174_HW_3_2_CHIP_ID_REV:
1915                        return 9;
1916                }
1917                break;
1918        case QCA9377_1_0_DEVICE_ID:
1919                return 2;
1920        }
1921
1922        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1923        return 1;
1924}
1925
1926static int ath10k_bus_get_num_banks(struct ath10k *ar)
1927{
1928        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1929
1930        return ar_pci->bus_ops->get_num_banks(ar);
1931}
1932
1933int ath10k_pci_init_config(struct ath10k *ar)
1934{
1935        u32 interconnect_targ_addr;
1936        u32 pcie_state_targ_addr = 0;
1937        u32 pipe_cfg_targ_addr = 0;
1938        u32 svc_to_pipe_map = 0;
1939        u32 pcie_config_flags = 0;
1940        u32 ealloc_value;
1941        u32 ealloc_targ_addr;
1942        u32 flag2_value;
1943        u32 flag2_targ_addr;
1944        int ret = 0;
1945
1946        /* Download to Target the CE Config and the service-to-CE map */
1947        interconnect_targ_addr =
1948                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1949
1950        /* Supply Target-side CE configuration */
1951        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1952                                     &pcie_state_targ_addr);
1953        if (ret != 0) {
1954                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1955                return ret;
1956        }
1957
1958        if (pcie_state_targ_addr == 0) {
1959                ret = -EIO;
1960                ath10k_err(ar, "Invalid pcie state addr\n");
1961                return ret;
1962        }
1963
1964        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1965                                          offsetof(struct pcie_state,
1966                                                   pipe_cfg_addr)),
1967                                     &pipe_cfg_targ_addr);
1968        if (ret != 0) {
1969                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1970                return ret;
1971        }
1972
1973        if (pipe_cfg_targ_addr == 0) {
1974                ret = -EIO;
1975                ath10k_err(ar, "Invalid pipe cfg addr\n");
1976                return ret;
1977        }
1978
1979        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1980                                        target_ce_config_wlan,
1981                                        sizeof(struct ce_pipe_config) *
1982                                        NUM_TARGET_CE_CONFIG_WLAN);
1983
1984        if (ret != 0) {
1985                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1986                return ret;
1987        }
1988
1989        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1990                                          offsetof(struct pcie_state,
1991                                                   svc_to_pipe_map)),
1992                                     &svc_to_pipe_map);
1993        if (ret != 0) {
1994                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1995                return ret;
1996        }
1997
1998        if (svc_to_pipe_map == 0) {
1999                ret = -EIO;
2000                ath10k_err(ar, "Invalid svc_to_pipe map\n");
2001                return ret;
2002        }
2003
2004        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2005                                        target_service_to_ce_map_wlan,
2006                                        sizeof(target_service_to_ce_map_wlan));
2007        if (ret != 0) {
2008                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2009                return ret;
2010        }
2011
2012        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2013                                          offsetof(struct pcie_state,
2014                                                   config_flags)),
2015                                     &pcie_config_flags);
2016        if (ret != 0) {
2017                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2018                return ret;
2019        }
2020
2021        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2022
2023        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2024                                           offsetof(struct pcie_state,
2025                                                    config_flags)),
2026                                      pcie_config_flags);
2027        if (ret != 0) {
2028                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2029                return ret;
2030        }
2031
2032        /* configure early allocation */
2033        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2034
2035        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2036        if (ret != 0) {
2037                ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
2038                return ret;
2039        }
2040
2041        /* first bank is switched to IRAM */
2042        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2043                         HI_EARLY_ALLOC_MAGIC_MASK);
2044        ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2045                          HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2046                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2047
2048        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2049        if (ret != 0) {
2050                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2051                return ret;
2052        }
2053
2054        /* Tell Target to proceed with initialization */
2055        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2056
2057        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2058        if (ret != 0) {
2059                ath10k_err(ar, "Failed to get option val: %d\n", ret);
2060                return ret;
2061        }
2062
2063        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2064
2065        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2066        if (ret != 0) {
2067                ath10k_err(ar, "Failed to set option val: %d\n", ret);
2068                return ret;
2069        }
2070
2071        return 0;
2072}
2073
2074static void ath10k_pci_override_ce_config(struct ath10k *ar)
2075{
2076        struct ce_attr *attr;
2077        struct ce_pipe_config *config;
2078
2079        /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2080         * since it is currently used for other feature.
2081         */
2082
2083        /* Override Host's Copy Engine 5 configuration */
2084        attr = &host_ce_config_wlan[5];
2085        attr->src_sz_max = 0;
2086        attr->dest_nentries = 0;
2087
2088        /* Override Target firmware's Copy Engine configuration */
2089        config = &target_ce_config_wlan[5];
2090        config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2091        config->nbytes_max = __cpu_to_le32(2048);
2092
2093        /* Map from service/endpoint to Copy Engine */
2094        target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2095}
2096
2097int ath10k_pci_alloc_pipes(struct ath10k *ar)
2098{
2099        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2100        struct ath10k_pci_pipe *pipe;
2101        int i, ret;
2102
2103        for (i = 0; i < CE_COUNT; i++) {
2104                pipe = &ar_pci->pipe_info[i];
2105                pipe->ce_hdl = &ar_pci->ce_states[i];
2106                pipe->pipe_num = i;
2107                pipe->hif_ce_state = ar;
2108
2109                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2110                if (ret) {
2111                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2112                                   i, ret);
2113                        return ret;
2114                }
2115
2116                /* Last CE is Diagnostic Window */
2117                if (i == CE_DIAG_PIPE) {
2118                        ar_pci->ce_diag = pipe->ce_hdl;
2119                        continue;
2120                }
2121
2122                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2123        }
2124
2125        return 0;
2126}
2127
2128void ath10k_pci_free_pipes(struct ath10k *ar)
2129{
2130        int i;
2131
2132        for (i = 0; i < CE_COUNT; i++)
2133                ath10k_ce_free_pipe(ar, i);
2134}
2135
2136int ath10k_pci_init_pipes(struct ath10k *ar)
2137{
2138        int i, ret;
2139
2140        for (i = 0; i < CE_COUNT; i++) {
2141                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2142                if (ret) {
2143                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2144                                   i, ret);
2145                        return ret;
2146                }
2147        }
2148
2149        return 0;
2150}
2151
2152static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2153{
2154        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2155               FW_IND_EVENT_PENDING;
2156}
2157
2158static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2159{
2160        u32 val;
2161
2162        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2163        val &= ~FW_IND_EVENT_PENDING;
2164        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2165}
2166
2167/* this function effectively clears target memory controller assert line */
2168static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2169{
2170        u32 val;
2171
2172        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2173        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2174                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
2175        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2176
2177        msleep(10);
2178
2179        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2180        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2181                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2182        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2183
2184        msleep(10);
2185}
2186
2187static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2188{
2189        u32 val;
2190
2191        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2192
2193        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2194                                SOC_RESET_CONTROL_ADDRESS);
2195        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2196                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2197}
2198
2199static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2200{
2201        u32 val;
2202
2203        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2204                                SOC_RESET_CONTROL_ADDRESS);
2205
2206        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2207                           val | SOC_RESET_CONTROL_CE_RST_MASK);
2208        msleep(10);
2209        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2210                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2211}
2212
2213static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2214{
2215        u32 val;
2216
2217        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2218                                SOC_LF_TIMER_CONTROL0_ADDRESS);
2219        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2220                           SOC_LF_TIMER_CONTROL0_ADDRESS,
2221                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2222}
2223
2224static int ath10k_pci_warm_reset(struct ath10k *ar)
2225{
2226        int ret;
2227
2228        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2229
2230        spin_lock_bh(&ar->data_lock);
2231        ar->stats.fw_warm_reset_counter++;
2232        spin_unlock_bh(&ar->data_lock);
2233
2234        ath10k_pci_irq_disable(ar);
2235
2236        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2237         * were to access copy engine while host performs copy engine reset
2238         * then it is possible for the device to confuse pci-e controller to
2239         * the point of bringing host system to a complete stop (i.e. hang).
2240         */
2241        ath10k_pci_warm_reset_si0(ar);
2242        ath10k_pci_warm_reset_cpu(ar);
2243        ath10k_pci_init_pipes(ar);
2244        ath10k_pci_wait_for_target_init(ar);
2245
2246        ath10k_pci_warm_reset_clear_lf(ar);
2247        ath10k_pci_warm_reset_ce(ar);
2248        ath10k_pci_warm_reset_cpu(ar);
2249        ath10k_pci_init_pipes(ar);
2250
2251        ret = ath10k_pci_wait_for_target_init(ar);
2252        if (ret) {
2253                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2254                return ret;
2255        }
2256
2257        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2258
2259        return 0;
2260}
2261
2262static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2263{
2264        if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
2265                return ath10k_pci_warm_reset(ar);
2266        } else if (QCA_REV_99X0(ar)) {
2267                ath10k_pci_irq_disable(ar);
2268                return ath10k_pci_qca99x0_chip_reset(ar);
2269        } else {
2270                return -ENOTSUPP;
2271        }
2272}
2273
2274static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2275{
2276        int i, ret;
2277        u32 val;
2278
2279        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2280
2281        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2282         * It is thus preferred to use warm reset which is safer but may not be
2283         * able to recover the device from all possible fail scenarios.
2284         *
2285         * Warm reset doesn't always work on first try so attempt it a few
2286         * times before giving up.
2287         */
2288        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2289                ret = ath10k_pci_warm_reset(ar);
2290                if (ret) {
2291                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2292                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2293                                    ret);
2294                        continue;
2295                }
2296
2297                /* FIXME: Sometimes copy engine doesn't recover after warm
2298                 * reset. In most cases this needs cold reset. In some of these
2299                 * cases the device is in such a state that a cold reset may
2300                 * lock up the host.
2301                 *
2302                 * Reading any host interest register via copy engine is
2303                 * sufficient to verify if device is capable of booting
2304                 * firmware blob.
2305                 */
2306                ret = ath10k_pci_init_pipes(ar);
2307                if (ret) {
2308                        ath10k_warn(ar, "failed to init copy engine: %d\n",
2309                                    ret);
2310                        continue;
2311                }
2312
2313                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2314                                             &val);
2315                if (ret) {
2316                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
2317                                    ret);
2318                        continue;
2319                }
2320
2321                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2322                return 0;
2323        }
2324
2325        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2326                ath10k_warn(ar, "refusing cold reset as requested\n");
2327                return -EPERM;
2328        }
2329
2330        ret = ath10k_pci_cold_reset(ar);
2331        if (ret) {
2332                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2333                return ret;
2334        }
2335
2336        ret = ath10k_pci_wait_for_target_init(ar);
2337        if (ret) {
2338                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2339                            ret);
2340                return ret;
2341        }
2342
2343        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2344
2345        return 0;
2346}
2347
2348static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2349{
2350        int ret;
2351
2352        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2353
2354        /* FIXME: QCA6174 requires cold + warm reset to work. */
2355
2356        ret = ath10k_pci_cold_reset(ar);
2357        if (ret) {
2358                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2359                return ret;
2360        }
2361
2362        ret = ath10k_pci_wait_for_target_init(ar);
2363        if (ret) {
2364                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2365                            ret);
2366                return ret;
2367        }
2368
2369        ret = ath10k_pci_warm_reset(ar);
2370        if (ret) {
2371                ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2372                return ret;
2373        }
2374
2375        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2376
2377        return 0;
2378}
2379
2380static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2381{
2382        int ret;
2383
2384        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2385
2386        ret = ath10k_pci_cold_reset(ar);
2387        if (ret) {
2388                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2389                return ret;
2390        }
2391
2392        ret = ath10k_pci_wait_for_target_init(ar);
2393        if (ret) {
2394                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2395                            ret);
2396                return ret;
2397        }
2398
2399        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2400
2401        return 0;
2402}
2403
2404static int ath10k_pci_chip_reset(struct ath10k *ar)
2405{
2406        if (QCA_REV_988X(ar))
2407                return ath10k_pci_qca988x_chip_reset(ar);
2408        else if (QCA_REV_6174(ar))
2409                return ath10k_pci_qca6174_chip_reset(ar);
2410        else if (QCA_REV_9377(ar))
2411                return ath10k_pci_qca6174_chip_reset(ar);
2412        else if (QCA_REV_99X0(ar))
2413                return ath10k_pci_qca99x0_chip_reset(ar);
2414        else
2415                return -ENOTSUPP;
2416}
2417
2418static int ath10k_pci_hif_power_up(struct ath10k *ar)
2419{
2420        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2421        int ret;
2422
2423        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2424
2425        pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2426                                  &ar_pci->link_ctl);
2427        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2428                                   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2429
2430        /*
2431         * Bring the target up cleanly.
2432         *
2433         * The target may be in an undefined state with an AUX-powered Target
2434         * and a Host in WoW mode. If the Host crashes, loses power, or is
2435         * restarted (without unloading the driver) then the Target is left
2436         * (aux) powered and running. On a subsequent driver load, the Target
2437         * is in an unexpected state. We try to catch that here in order to
2438         * reset the Target and retry the probe.
2439         */
2440        ret = ath10k_pci_chip_reset(ar);
2441        if (ret) {
2442                if (ath10k_pci_has_fw_crashed(ar)) {
2443                        ath10k_warn(ar, "firmware crashed during chip reset\n");
2444                        ath10k_pci_fw_crashed_clear(ar);
2445                        ath10k_pci_fw_crashed_dump(ar);
2446                }
2447
2448                ath10k_err(ar, "failed to reset chip: %d\n", ret);
2449                goto err_sleep;
2450        }
2451
2452        ret = ath10k_pci_init_pipes(ar);
2453        if (ret) {
2454                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2455                goto err_sleep;
2456        }
2457
2458        ret = ath10k_pci_init_config(ar);
2459        if (ret) {
2460                ath10k_err(ar, "failed to setup init config: %d\n", ret);
2461                goto err_ce;
2462        }
2463
2464        ret = ath10k_pci_wake_target_cpu(ar);
2465        if (ret) {
2466                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2467                goto err_ce;
2468        }
2469
2470        return 0;
2471
2472err_ce:
2473        ath10k_pci_ce_deinit(ar);
2474
2475err_sleep:
2476        return ret;
2477}
2478
2479void ath10k_pci_hif_power_down(struct ath10k *ar)
2480{
2481        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2482
2483        /* Currently hif_power_up performs effectively a reset and hif_stop
2484         * resets the chip as well so there's no point in resetting here.
2485         */
2486}
2487
2488#ifdef CONFIG_PM
2489
2490static int ath10k_pci_hif_suspend(struct ath10k *ar)
2491{
2492        /* The grace timer can still be counting down and ar->ps_awake be true.
2493         * It is known that the device may be asleep after resuming regardless
2494         * of the SoC powersave state before suspending. Hence make sure the
2495         * device is asleep before proceeding.
2496         */
2497        ath10k_pci_sleep_sync(ar);
2498
2499        return 0;
2500}
2501
2502static int ath10k_pci_hif_resume(struct ath10k *ar)
2503{
2504        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2505        struct pci_dev *pdev = ar_pci->pdev;
2506        u32 val;
2507        int ret = 0;
2508
2509        ret = ath10k_pci_force_wake(ar);
2510        if (ret) {
2511                ath10k_err(ar, "failed to wake up target: %d\n", ret);
2512                return ret;
2513        }
2514
2515        /* Suspend/Resume resets the PCI configuration space, so we have to
2516         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2517         * from interfering with C3 CPU state. pci_restore_state won't help
2518         * here since it only restores the first 64 bytes pci config header.
2519         */
2520        pci_read_config_dword(pdev, 0x40, &val);
2521        if ((val & 0x0000ff00) != 0)
2522                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2523
2524        return ret;
2525}
2526#endif
2527
2528static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2529        .tx_sg                  = ath10k_pci_hif_tx_sg,
2530        .diag_read              = ath10k_pci_hif_diag_read,
2531        .diag_write             = ath10k_pci_diag_write_mem,
2532        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2533        .start                  = ath10k_pci_hif_start,
2534        .stop                   = ath10k_pci_hif_stop,
2535        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2536        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2537        .send_complete_check    = ath10k_pci_hif_send_complete_check,
2538        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2539        .power_up               = ath10k_pci_hif_power_up,
2540        .power_down             = ath10k_pci_hif_power_down,
2541        .read32                 = ath10k_pci_read32,
2542        .write32                = ath10k_pci_write32,
2543#ifdef CONFIG_PM
2544        .suspend                = ath10k_pci_hif_suspend,
2545        .resume                 = ath10k_pci_hif_resume,
2546#endif
2547};
2548
2549static void ath10k_pci_ce_tasklet(unsigned long ptr)
2550{
2551        struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2552        struct ath10k_pci *ar_pci = pipe->ar_pci;
2553
2554        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2555}
2556
2557static void ath10k_msi_err_tasklet(unsigned long data)
2558{
2559        struct ath10k *ar = (struct ath10k *)data;
2560
2561        if (!ath10k_pci_has_fw_crashed(ar)) {
2562                ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2563                return;
2564        }
2565
2566        ath10k_pci_irq_disable(ar);
2567        ath10k_pci_fw_crashed_clear(ar);
2568        ath10k_pci_fw_crashed_dump(ar);
2569}
2570
2571/*
2572 * Handler for a per-engine interrupt on a PARTICULAR CE.
2573 * This is used in cases where each CE has a private MSI interrupt.
2574 */
2575static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2576{
2577        struct ath10k *ar = arg;
2578        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2579        int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2580
2581        if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2582                ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2583                            ce_id);
2584                return IRQ_HANDLED;
2585        }
2586
2587        /*
2588         * NOTE: We are able to derive ce_id from irq because we
2589         * use a one-to-one mapping for CE's 0..5.
2590         * CE's 6 & 7 do not use interrupts at all.
2591         *
2592         * This mapping must be kept in sync with the mapping
2593         * used by firmware.
2594         */
2595        tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2596        return IRQ_HANDLED;
2597}
2598
2599static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2600{
2601        struct ath10k *ar = arg;
2602        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2603
2604        tasklet_schedule(&ar_pci->msi_fw_err);
2605        return IRQ_HANDLED;
2606}
2607
2608/*
2609 * Top-level interrupt handler for all PCI interrupts from a Target.
2610 * When a block of MSI interrupts is allocated, this top-level handler
2611 * is not used; instead, we directly call the correct sub-handler.
2612 */
2613static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2614{
2615        struct ath10k *ar = arg;
2616        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2617        int ret;
2618
2619        ret = ath10k_pci_force_wake(ar);
2620        if (ret) {
2621                ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2622                return IRQ_NONE;
2623        }
2624
2625        if (ar_pci->num_msi_intrs == 0) {
2626                if (!ath10k_pci_irq_pending(ar))
2627                        return IRQ_NONE;
2628
2629                ath10k_pci_disable_and_clear_legacy_irq(ar);
2630        }
2631
2632        tasklet_schedule(&ar_pci->intr_tq);
2633
2634        return IRQ_HANDLED;
2635}
2636
2637static void ath10k_pci_tasklet(unsigned long data)
2638{
2639        struct ath10k *ar = (struct ath10k *)data;
2640        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2641
2642        if (ath10k_pci_has_fw_crashed(ar)) {
2643                ath10k_pci_irq_disable(ar);
2644                ath10k_pci_fw_crashed_clear(ar);
2645                ath10k_pci_fw_crashed_dump(ar);
2646                return;
2647        }
2648
2649        ath10k_ce_per_engine_service_any(ar);
2650
2651        /* Re-enable legacy irq that was disabled in the irq handler */
2652        if (ar_pci->num_msi_intrs == 0)
2653                ath10k_pci_enable_legacy_irq(ar);
2654}
2655
2656static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2657{
2658        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2659        int ret, i;
2660
2661        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2662                          ath10k_pci_msi_fw_handler,
2663                          IRQF_SHARED, "ath10k_pci", ar);
2664        if (ret) {
2665                ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2666                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2667                return ret;
2668        }
2669
2670        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2671                ret = request_irq(ar_pci->pdev->irq + i,
2672                                  ath10k_pci_per_engine_handler,
2673                                  IRQF_SHARED, "ath10k_pci", ar);
2674                if (ret) {
2675                        ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2676                                    ar_pci->pdev->irq + i, ret);
2677
2678                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2679                                free_irq(ar_pci->pdev->irq + i, ar);
2680
2681                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2682                        return ret;
2683                }
2684        }
2685
2686        return 0;
2687}
2688
2689static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2690{
2691        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2692        int ret;
2693
2694        ret = request_irq(ar_pci->pdev->irq,
2695                          ath10k_pci_interrupt_handler,
2696                          IRQF_SHARED, "ath10k_pci", ar);
2697        if (ret) {
2698                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2699                            ar_pci->pdev->irq, ret);
2700                return ret;
2701        }
2702
2703        return 0;
2704}
2705
2706static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2707{
2708        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2709        int ret;
2710
2711        ret = request_irq(ar_pci->pdev->irq,
2712                          ath10k_pci_interrupt_handler,
2713                          IRQF_SHARED, "ath10k_pci", ar);
2714        if (ret) {
2715                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2716                            ar_pci->pdev->irq, ret);
2717                return ret;
2718        }
2719
2720        return 0;
2721}
2722
2723static int ath10k_pci_request_irq(struct ath10k *ar)
2724{
2725        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2726
2727        switch (ar_pci->num_msi_intrs) {
2728        case 0:
2729                return ath10k_pci_request_irq_legacy(ar);
2730        case 1:
2731                return ath10k_pci_request_irq_msi(ar);
2732        default:
2733                return ath10k_pci_request_irq_msix(ar);
2734        }
2735}
2736
2737static void ath10k_pci_free_irq(struct ath10k *ar)
2738{
2739        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2740        int i;
2741
2742        /* There's at least one interrupt irregardless whether its legacy INTR
2743         * or MSI or MSI-X */
2744        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2745                free_irq(ar_pci->pdev->irq + i, ar);
2746}
2747
2748void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2749{
2750        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2751        int i;
2752
2753        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2754        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2755                     (unsigned long)ar);
2756
2757        for (i = 0; i < CE_COUNT; i++) {
2758                ar_pci->pipe_info[i].ar_pci = ar_pci;
2759                tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2760                             (unsigned long)&ar_pci->pipe_info[i]);
2761        }
2762}
2763
2764static int ath10k_pci_init_irq(struct ath10k *ar)
2765{
2766        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2767        int ret;
2768
2769        ath10k_pci_init_irq_tasklets(ar);
2770
2771        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2772                ath10k_info(ar, "limiting irq mode to: %d\n",
2773                            ath10k_pci_irq_mode);
2774
2775        /* Try MSI-X */
2776        if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2777                ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
2778                ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2779                                           ar_pci->num_msi_intrs);
2780                if (ret > 0)
2781                        return 0;
2782
2783                /* fall-through */
2784        }
2785
2786        /* Try MSI */
2787        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2788                ar_pci->num_msi_intrs = 1;
2789                ret = pci_enable_msi(ar_pci->pdev);
2790                if (ret == 0)
2791                        return 0;
2792
2793                /* fall-through */
2794        }
2795
2796        /* Try legacy irq
2797         *
2798         * A potential race occurs here: The CORE_BASE write
2799         * depends on target correctly decoding AXI address but
2800         * host won't know when target writes BAR to CORE_CTRL.
2801         * This write might get lost if target has NOT written BAR.
2802         * For now, fix the race by repeating the write in below
2803         * synchronization checking. */
2804        ar_pci->num_msi_intrs = 0;
2805
2806        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2807                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2808
2809        return 0;
2810}
2811
2812static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2813{
2814        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2815                           0);
2816}
2817
2818static int ath10k_pci_deinit_irq(struct ath10k *ar)
2819{
2820        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2821
2822        switch (ar_pci->num_msi_intrs) {
2823        case 0:
2824                ath10k_pci_deinit_irq_legacy(ar);
2825                break;
2826        default:
2827                pci_disable_msi(ar_pci->pdev);
2828                break;
2829        }
2830
2831        return 0;
2832}
2833
2834int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2835{
2836        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2837        unsigned long timeout;
2838        u32 val;
2839
2840        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2841
2842        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2843
2844        do {
2845                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2846
2847                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2848                           val);
2849
2850                /* target should never return this */
2851                if (val == 0xffffffff)
2852                        continue;
2853
2854                /* the device has crashed so don't bother trying anymore */
2855                if (val & FW_IND_EVENT_PENDING)
2856                        break;
2857
2858                if (val & FW_IND_INITIALIZED)
2859                        break;
2860
2861                if (ar_pci->num_msi_intrs == 0)
2862                        /* Fix potential race by repeating CORE_BASE writes */
2863                        ath10k_pci_enable_legacy_irq(ar);
2864
2865                mdelay(10);
2866        } while (time_before(jiffies, timeout));
2867
2868        ath10k_pci_disable_and_clear_legacy_irq(ar);
2869        ath10k_pci_irq_msi_fw_mask(ar);
2870
2871        if (val == 0xffffffff) {
2872                ath10k_err(ar, "failed to read device register, device is gone\n");
2873                return -EIO;
2874        }
2875
2876        if (val & FW_IND_EVENT_PENDING) {
2877                ath10k_warn(ar, "device has crashed during init\n");
2878                return -ECOMM;
2879        }
2880
2881        if (!(val & FW_IND_INITIALIZED)) {
2882                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2883                           val);
2884                return -ETIMEDOUT;
2885        }
2886
2887        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2888        return 0;
2889}
2890
2891static int ath10k_pci_cold_reset(struct ath10k *ar)
2892{
2893        u32 val;
2894
2895        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2896
2897        spin_lock_bh(&ar->data_lock);
2898
2899        ar->stats.fw_cold_reset_counter++;
2900
2901        spin_unlock_bh(&ar->data_lock);
2902
2903        /* Put Target, including PCIe, into RESET. */
2904        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2905        val |= 1;
2906        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2907
2908        /* After writing into SOC_GLOBAL_RESET to put device into
2909         * reset and pulling out of reset pcie may not be stable
2910         * for any immediate pcie register access and cause bus error,
2911         * add delay before any pcie access request to fix this issue.
2912         */
2913        msleep(20);
2914
2915        /* Pull Target, including PCIe, out of RESET. */
2916        val &= ~1;
2917        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2918
2919        msleep(20);
2920
2921        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2922
2923        return 0;
2924}
2925
2926static int ath10k_pci_claim(struct ath10k *ar)
2927{
2928        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2929        struct pci_dev *pdev = ar_pci->pdev;
2930        int ret;
2931
2932        pci_set_drvdata(pdev, ar);
2933
2934        ret = pci_enable_device(pdev);
2935        if (ret) {
2936                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2937                return ret;
2938        }
2939
2940        ret = pci_request_region(pdev, BAR_NUM, "ath");
2941        if (ret) {
2942                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2943                           ret);
2944                goto err_device;
2945        }
2946
2947        /* Target expects 32 bit DMA. Enforce it. */
2948        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2949        if (ret) {
2950                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2951                goto err_region;
2952        }
2953
2954        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2955        if (ret) {
2956                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2957                           ret);
2958                goto err_region;
2959        }
2960
2961        pci_set_master(pdev);
2962
2963        /* Arrange for access to Target SoC registers. */
2964        ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2965        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2966        if (!ar_pci->mem) {
2967                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2968                ret = -EIO;
2969                goto err_master;
2970        }
2971
2972        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2973        return 0;
2974
2975err_master:
2976        pci_clear_master(pdev);
2977
2978err_region:
2979        pci_release_region(pdev, BAR_NUM);
2980
2981err_device:
2982        pci_disable_device(pdev);
2983
2984        return ret;
2985}
2986
2987static void ath10k_pci_release(struct ath10k *ar)
2988{
2989        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2990        struct pci_dev *pdev = ar_pci->pdev;
2991
2992        pci_iounmap(pdev, ar_pci->mem);
2993        pci_release_region(pdev, BAR_NUM);
2994        pci_clear_master(pdev);
2995        pci_disable_device(pdev);
2996}
2997
2998static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2999{
3000        const struct ath10k_pci_supp_chip *supp_chip;
3001        int i;
3002        u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3003
3004        for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3005                supp_chip = &ath10k_pci_supp_chips[i];
3006
3007                if (supp_chip->dev_id == dev_id &&
3008                    supp_chip->rev_id == rev_id)
3009                        return true;
3010        }
3011
3012        return false;
3013}
3014
3015int ath10k_pci_setup_resource(struct ath10k *ar)
3016{
3017        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3018        int ret;
3019
3020        spin_lock_init(&ar_pci->ce_lock);
3021        spin_lock_init(&ar_pci->ps_lock);
3022
3023        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
3024                    (unsigned long)ar);
3025
3026        if (QCA_REV_6174(ar))
3027                ath10k_pci_override_ce_config(ar);
3028
3029        ret = ath10k_pci_alloc_pipes(ar);
3030        if (ret) {
3031                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3032                           ret);
3033                return ret;
3034        }
3035
3036        return 0;
3037}
3038
3039void ath10k_pci_release_resource(struct ath10k *ar)
3040{
3041        ath10k_pci_kill_tasklet(ar);
3042        ath10k_pci_ce_deinit(ar);
3043        ath10k_pci_free_pipes(ar);
3044}
3045
3046static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3047        .read32         = ath10k_bus_pci_read32,
3048        .write32        = ath10k_bus_pci_write32,
3049        .get_num_banks  = ath10k_pci_get_num_banks,
3050};
3051
3052static int ath10k_pci_probe(struct pci_dev *pdev,
3053                            const struct pci_device_id *pci_dev)
3054{
3055        int ret = 0;
3056        struct ath10k *ar;
3057        struct ath10k_pci *ar_pci;
3058        enum ath10k_hw_rev hw_rev;
3059        u32 chip_id;
3060        bool pci_ps;
3061
3062        switch (pci_dev->device) {
3063        case QCA988X_2_0_DEVICE_ID:
3064                hw_rev = ATH10K_HW_QCA988X;
3065                pci_ps = false;
3066                break;
3067        case QCA6164_2_1_DEVICE_ID:
3068        case QCA6174_2_1_DEVICE_ID:
3069                hw_rev = ATH10K_HW_QCA6174;
3070                pci_ps = true;
3071                break;
3072        case QCA99X0_2_0_DEVICE_ID:
3073                hw_rev = ATH10K_HW_QCA99X0;
3074                pci_ps = false;
3075                break;
3076        case QCA9377_1_0_DEVICE_ID:
3077                hw_rev = ATH10K_HW_QCA9377;
3078                pci_ps = true;
3079                break;
3080        default:
3081                WARN_ON(1);
3082                return -ENOTSUPP;
3083        }
3084
3085        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3086                                hw_rev, &ath10k_pci_hif_ops);
3087        if (!ar) {
3088                dev_err(&pdev->dev, "failed to allocate core\n");
3089                return -ENOMEM;
3090        }
3091
3092        ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3093                   pdev->vendor, pdev->device,
3094                   pdev->subsystem_vendor, pdev->subsystem_device);
3095
3096        ar_pci = ath10k_pci_priv(ar);
3097        ar_pci->pdev = pdev;
3098        ar_pci->dev = &pdev->dev;
3099        ar_pci->ar = ar;
3100        ar->dev_id = pci_dev->device;
3101        ar_pci->pci_ps = pci_ps;
3102        ar_pci->bus_ops = &ath10k_pci_bus_ops;
3103
3104        ar->id.vendor = pdev->vendor;
3105        ar->id.device = pdev->device;
3106        ar->id.subsystem_vendor = pdev->subsystem_vendor;
3107        ar->id.subsystem_device = pdev->subsystem_device;
3108
3109        setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3110                    (unsigned long)ar);
3111
3112        ret = ath10k_pci_setup_resource(ar);
3113        if (ret) {
3114                ath10k_err(ar, "failed to setup resource: %d\n", ret);
3115                goto err_core_destroy;
3116        }
3117
3118        ret = ath10k_pci_claim(ar);
3119        if (ret) {
3120                ath10k_err(ar, "failed to claim device: %d\n", ret);
3121                goto err_free_pipes;
3122        }
3123
3124        ret = ath10k_pci_force_wake(ar);
3125        if (ret) {
3126                ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3127                goto err_sleep;
3128        }
3129
3130        ath10k_pci_ce_deinit(ar);
3131        ath10k_pci_irq_disable(ar);
3132
3133        ret = ath10k_pci_init_irq(ar);
3134        if (ret) {
3135                ath10k_err(ar, "failed to init irqs: %d\n", ret);
3136                goto err_sleep;
3137        }
3138
3139        ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
3140                    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
3141                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3142
3143        ret = ath10k_pci_request_irq(ar);
3144        if (ret) {
3145                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3146                goto err_deinit_irq;
3147        }
3148
3149        ret = ath10k_pci_chip_reset(ar);
3150        if (ret) {
3151                ath10k_err(ar, "failed to reset chip: %d\n", ret);
3152                goto err_free_irq;
3153        }
3154
3155        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3156        if (chip_id == 0xffffffff) {
3157                ath10k_err(ar, "failed to get chip id\n");
3158                goto err_free_irq;
3159        }
3160
3161        if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3162                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3163                           pdev->device, chip_id);
3164                goto err_free_irq;
3165        }
3166
3167        ret = ath10k_core_register(ar, chip_id);
3168        if (ret) {
3169                ath10k_err(ar, "failed to register driver core: %d\n", ret);
3170                goto err_free_irq;
3171        }
3172
3173        return 0;
3174
3175err_free_irq:
3176        ath10k_pci_free_irq(ar);
3177        ath10k_pci_kill_tasklet(ar);
3178
3179err_deinit_irq:
3180        ath10k_pci_deinit_irq(ar);
3181
3182err_sleep:
3183        ath10k_pci_sleep_sync(ar);
3184        ath10k_pci_release(ar);
3185
3186err_free_pipes:
3187        ath10k_pci_free_pipes(ar);
3188
3189err_core_destroy:
3190        ath10k_core_destroy(ar);
3191
3192        return ret;
3193}
3194
3195static void ath10k_pci_remove(struct pci_dev *pdev)
3196{
3197        struct ath10k *ar = pci_get_drvdata(pdev);
3198        struct ath10k_pci *ar_pci;
3199
3200        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3201
3202        if (!ar)
3203                return;
3204
3205        ar_pci = ath10k_pci_priv(ar);
3206
3207        if (!ar_pci)
3208                return;
3209
3210        ath10k_core_unregister(ar);
3211        ath10k_pci_free_irq(ar);
3212        ath10k_pci_deinit_irq(ar);
3213        ath10k_pci_release_resource(ar);
3214        ath10k_pci_sleep_sync(ar);
3215        ath10k_pci_release(ar);
3216        ath10k_core_destroy(ar);
3217}
3218
3219MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3220
3221static struct pci_driver ath10k_pci_driver = {
3222        .name = "ath10k_pci",
3223        .id_table = ath10k_pci_id_table,
3224        .probe = ath10k_pci_probe,
3225        .remove = ath10k_pci_remove,
3226};
3227
3228static int __init ath10k_pci_init(void)
3229{
3230        int ret;
3231
3232        ret = pci_register_driver(&ath10k_pci_driver);
3233        if (ret)
3234                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3235                       ret);
3236
3237        ret = ath10k_ahb_init();
3238        if (ret)
3239                printk(KERN_ERR "ahb init failed: %d\n", ret);
3240
3241        return ret;
3242}
3243module_init(ath10k_pci_init);
3244
3245static void __exit ath10k_pci_exit(void)
3246{
3247        pci_unregister_driver(&ath10k_pci_driver);
3248        ath10k_ahb_exit();
3249}
3250
3251module_exit(ath10k_pci_exit);
3252
3253MODULE_AUTHOR("Qualcomm Atheros");
3254MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
3255MODULE_LICENSE("Dual BSD/GPL");
3256
3257/* QCA988x 2.0 firmware files */
3258MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
3259MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3260MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3261MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3262MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3263MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3264MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3265
3266/* QCA6174 2.1 firmware files */
3267MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3268MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3269MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3270MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3271
3272/* QCA6174 3.1 firmware files */
3273MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3274MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3275MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3276MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3277
3278/* QCA9377 1.0 firmware files */
3279MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3280MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3281