linux/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
   7#include <linux/pci.h>
   8#include <linux/interrupt.h>
   9#include <linux/debugfs.h>
  10#include <linux/sched.h>
  11#include <linux/bitops.h>
  12#include <linux/gfp.h>
  13#include <linux/vmalloc.h>
  14#include <linux/module.h>
  15#include <linux/wait.h>
  16#include <linux/seq_file.h>
  17
  18#include "iwl-drv.h"
  19#include "iwl-trans.h"
  20#include "iwl-csr.h"
  21#include "iwl-prph.h"
  22#include "iwl-scd.h"
  23#include "iwl-agn-hw.h"
  24#include "fw/error-dump.h"
  25#include "fw/dbg.h"
  26#include "fw/api/tx.h"
  27#include "internal.h"
  28#include "iwl-fh.h"
  29#include "iwl-context-info-gen3.h"
  30
  31/* extended range in FW SRAM */
  32#define IWL_FW_MEM_EXTENDED_START       0x40000
  33#define IWL_FW_MEM_EXTENDED_END         0x57FFF
  34
  35void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
  36{
  37#define PCI_DUMP_SIZE           352
  38#define PCI_MEM_DUMP_SIZE       64
  39#define PCI_PARENT_DUMP_SIZE    524
  40#define PREFIX_LEN              32
  41        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  42        struct pci_dev *pdev = trans_pcie->pci_dev;
  43        u32 i, pos, alloc_size, *ptr, *buf;
  44        char *prefix;
  45
  46        if (trans_pcie->pcie_dbg_dumped_once)
  47                return;
  48
  49        /* Should be a multiple of 4 */
  50        BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
  51        BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
  52        BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
  53
  54        /* Alloc a max size buffer */
  55        alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
  56        alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
  57        alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
  58        alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
  59
  60        buf = kmalloc(alloc_size, GFP_ATOMIC);
  61        if (!buf)
  62                return;
  63        prefix = (char *)buf + alloc_size - PREFIX_LEN;
  64
  65        IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
  66
  67        /* Print wifi device registers */
  68        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
  69        IWL_ERR(trans, "iwlwifi device config registers:\n");
  70        for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
  71                if (pci_read_config_dword(pdev, i, ptr))
  72                        goto err_read;
  73        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
  74
  75        IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
  76        for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
  77                *ptr = iwl_read32(trans, i);
  78        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
  79
  80        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
  81        if (pos) {
  82                IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
  83                for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
  84                        if (pci_read_config_dword(pdev, pos + i, ptr))
  85                                goto err_read;
  86                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
  87                               32, 4, buf, i, 0);
  88        }
  89
  90        /* Print parent device registers next */
  91        if (!pdev->bus->self)
  92                goto out;
  93
  94        pdev = pdev->bus->self;
  95        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
  96
  97        IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
  98                pci_name(pdev));
  99        for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
 100                if (pci_read_config_dword(pdev, i, ptr))
 101                        goto err_read;
 102        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 103
 104        /* Print root port AER registers */
 105        pos = 0;
 106        pdev = pcie_find_root_port(pdev);
 107        if (pdev)
 108                pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 109        if (pos) {
 110                IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
 111                        pci_name(pdev));
 112                sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 113                for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
 114                        if (pci_read_config_dword(pdev, pos + i, ptr))
 115                                goto err_read;
 116                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
 117                               4, buf, i, 0);
 118        }
 119        goto out;
 120
 121err_read:
 122        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 123        IWL_ERR(trans, "Read failed at 0x%X\n", i);
 124out:
 125        trans_pcie->pcie_dbg_dumped_once = 1;
 126        kfree(buf);
 127}
 128
 129static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
 130{
 131        /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
 132        iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
 133        usleep_range(5000, 6000);
 134}
 135
 136static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 137{
 138        struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
 139
 140        if (!fw_mon->size)
 141                return;
 142
 143        dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
 144                          fw_mon->physical);
 145
 146        fw_mon->block = NULL;
 147        fw_mon->physical = 0;
 148        fw_mon->size = 0;
 149}
 150
 151static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
 152                                            u8 max_power, u8 min_power)
 153{
 154        struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
 155        void *block = NULL;
 156        dma_addr_t physical = 0;
 157        u32 size = 0;
 158        u8 power;
 159
 160        if (fw_mon->size)
 161                return;
 162
 163        for (power = max_power; power >= min_power; power--) {
 164                size = BIT(power);
 165                block = dma_alloc_coherent(trans->dev, size, &physical,
 166                                           GFP_KERNEL | __GFP_NOWARN);
 167                if (!block)
 168                        continue;
 169
 170                IWL_INFO(trans,
 171                         "Allocated 0x%08x bytes for firmware monitor.\n",
 172                         size);
 173                break;
 174        }
 175
 176        if (WARN_ON_ONCE(!block))
 177                return;
 178
 179        if (power != max_power)
 180                IWL_ERR(trans,
 181                        "Sorry - debug buffer is only %luK while you requested %luK\n",
 182                        (unsigned long)BIT(power - 10),
 183                        (unsigned long)BIT(max_power - 10));
 184
 185        fw_mon->block = block;
 186        fw_mon->physical = physical;
 187        fw_mon->size = size;
 188}
 189
 190void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 191{
 192        if (!max_power) {
 193                /* default max_power is maximum */
 194                max_power = 26;
 195        } else {
 196                max_power += 11;
 197        }
 198
 199        if (WARN(max_power > 26,
 200                 "External buffer size for monitor is too big %d, check the FW TLV\n",
 201                 max_power))
 202                return;
 203
 204        if (trans->dbg.fw_mon.size)
 205                return;
 206
 207        iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
 208}
 209
 210static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
 211{
 212        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 213                    ((reg & 0x0000ffff) | (2 << 28)));
 214        return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
 215}
 216
 217static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 218{
 219        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
 220        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 221                    ((reg & 0x0000ffff) | (3 << 28)));
 222}
 223
 224static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 225{
 226        if (trans->cfg->apmg_not_supported)
 227                return;
 228
 229        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
 230                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 231                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
 232                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 233        else
 234                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 235                                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
 236                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 237}
 238
 239/* PCI registers */
 240#define PCI_CFG_RETRY_TIMEOUT   0x041
 241
 242void iwl_pcie_apm_config(struct iwl_trans *trans)
 243{
 244        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 245        u16 lctl;
 246        u16 cap;
 247
 248        /*
 249         * L0S states have been found to be unstable with our devices
 250         * and in newer hardware they are not officially supported at
 251         * all, so we must always set the L0S_DISABLED bit.
 252         */
 253        iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
 254
 255        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
 256        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 257
 258        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
 259        trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
 260        IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
 261                        (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
 262                        trans->ltr_enabled ? "En" : "Dis");
 263}
 264
 265/*
 266 * Start up NIC's basic functionality after it has been reset
 267 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
 268 * NOTE:  This does not load uCode nor start the embedded processor
 269 */
 270static int iwl_pcie_apm_init(struct iwl_trans *trans)
 271{
 272        int ret;
 273
 274        IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
 275
 276        /*
 277         * Use "set_bit" below rather than "write", to preserve any hardware
 278         * bits already set by default after reset.
 279         */
 280
 281        /* Disable L0S exit timer (platform NMI Work/Around) */
 282        if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
 283                iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 284                            CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
 285
 286        /*
 287         * Disable L0s without affecting L1;
 288         *  don't wait for ICH L0s (ICH bug W/A)
 289         */
 290        iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 291                    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
 292
 293        /* Set FH wait threshold to maximum (HW error during stress W/A) */
 294        iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
 295
 296        /*
 297         * Enable HAP INTA (interrupt from management bus) to
 298         * wake device's PCI Express link L1a -> L0s
 299         */
 300        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 301                    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
 302
 303        iwl_pcie_apm_config(trans);
 304
 305        /* Configure analog phase-lock-loop before activating to D0A */
 306        if (trans->trans_cfg->base_params->pll_cfg)
 307                iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
 308
 309        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
 310        if (ret)
 311                return ret;
 312
 313        if (trans->cfg->host_interrupt_operation_mode) {
 314                /*
 315                 * This is a bit of an abuse - This is needed for 7260 / 3160
 316                 * only check host_interrupt_operation_mode even if this is
 317                 * not related to host_interrupt_operation_mode.
 318                 *
 319                 * Enable the oscillator to count wake up time for L1 exit. This
 320                 * consumes slightly more power (100uA) - but allows to be sure
 321                 * that we wake up from L1 on time.
 322                 *
 323                 * This looks weird: read twice the same register, discard the
 324                 * value, set a bit, and yet again, read that same register
 325                 * just to discard the value. But that's the way the hardware
 326                 * seems to like it.
 327                 */
 328                iwl_read_prph(trans, OSC_CLK);
 329                iwl_read_prph(trans, OSC_CLK);
 330                iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
 331                iwl_read_prph(trans, OSC_CLK);
 332                iwl_read_prph(trans, OSC_CLK);
 333        }
 334
 335        /*
 336         * Enable DMA clock and wait for it to stabilize.
 337         *
 338         * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
 339         * bits do not disable clocks.  This preserves any hardware
 340         * bits already set by default in "CLK_CTRL_REG" after reset.
 341         */
 342        if (!trans->cfg->apmg_not_supported) {
 343                iwl_write_prph(trans, APMG_CLK_EN_REG,
 344                               APMG_CLK_VAL_DMA_CLK_RQT);
 345                udelay(20);
 346
 347                /* Disable L1-Active */
 348                iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 349                                  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 350
 351                /* Clear the interrupt in APMG if the NIC is in RFKILL */
 352                iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
 353                               APMG_RTC_INT_STT_RFKILL);
 354        }
 355
 356        set_bit(STATUS_DEVICE_ENABLED, &trans->status);
 357
 358        return 0;
 359}
 360
 361/*
 362 * Enable LP XTAL to avoid HW bug where device may consume much power if
 363 * FW is not loaded after device reset. LP XTAL is disabled by default
 364 * after device HW reset. Do it only if XTAL is fed by internal source.
 365 * Configure device's "persistence" mode to avoid resetting XTAL again when
 366 * SHRD_HW_RST occurs in S3.
 367 */
 368static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
 369{
 370        int ret;
 371        u32 apmg_gp1_reg;
 372        u32 apmg_xtal_cfg_reg;
 373        u32 dl_cfg_reg;
 374
 375        /* Force XTAL ON */
 376        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 377                                 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 378
 379        iwl_trans_pcie_sw_reset(trans);
 380
 381        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
 382        if (WARN_ON(ret)) {
 383                /* Release XTAL ON request */
 384                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 385                                           CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 386                return;
 387        }
 388
 389        /*
 390         * Clear "disable persistence" to avoid LP XTAL resetting when
 391         * SHRD_HW_RST is applied in S3.
 392         */
 393        iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
 394                                    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
 395
 396        /*
 397         * Force APMG XTAL to be active to prevent its disabling by HW
 398         * caused by APMG idle state.
 399         */
 400        apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
 401                                                    SHR_APMG_XTAL_CFG_REG);
 402        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 403                                 apmg_xtal_cfg_reg |
 404                                 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 405
 406        iwl_trans_pcie_sw_reset(trans);
 407
 408        /* Enable LP XTAL by indirect access through CSR */
 409        apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
 410        iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
 411                                 SHR_APMG_GP1_WF_XTAL_LP_EN |
 412                                 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
 413
 414        /* Clear delay line clock power up */
 415        dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
 416        iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
 417                                 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
 418
 419        /*
 420         * Enable persistence mode to avoid LP XTAL resetting when
 421         * SHRD_HW_RST is applied in S3.
 422         */
 423        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 424                    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
 425
 426        /*
 427         * Clear "initialization complete" bit to move adapter from
 428         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 429         */
 430        iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 431
 432        /* Activates XTAL resources monitor */
 433        __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
 434                                 CSR_MONITOR_XTAL_RESOURCES);
 435
 436        /* Release XTAL ON request */
 437        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 438                                   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 439        udelay(10);
 440
 441        /* Release APMG XTAL */
 442        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 443                                 apmg_xtal_cfg_reg &
 444                                 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 445}
 446
 447void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 448{
 449        int ret;
 450
 451        /* stop device's busmaster DMA activity */
 452
 453        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
 454                iwl_set_bit(trans, CSR_GP_CNTRL,
 455                            CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
 456
 457                ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
 458                                   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
 459                                   CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
 460                                   100);
 461        } else {
 462                iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
 463
 464                ret = iwl_poll_bit(trans, CSR_RESET,
 465                                   CSR_RESET_REG_FLAG_MASTER_DISABLED,
 466                                   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
 467        }
 468
 469        if (ret < 0)
 470                IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
 471
 472        IWL_DEBUG_INFO(trans, "stop master\n");
 473}
 474
 475static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 476{
 477        IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
 478
 479        if (op_mode_leave) {
 480                if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
 481                        iwl_pcie_apm_init(trans);
 482
 483                /* inform ME that we are leaving */
 484                if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
 485                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 486                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
 487                else if (trans->trans_cfg->device_family >=
 488                         IWL_DEVICE_FAMILY_8000) {
 489                        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 490                                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 491                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 492                                    CSR_HW_IF_CONFIG_REG_PREPARE |
 493                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
 494                        mdelay(1);
 495                        iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 496                                      CSR_RESET_LINK_PWR_MGMT_DISABLED);
 497                }
 498                mdelay(5);
 499        }
 500
 501        clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
 502
 503        /* Stop device's DMA activity */
 504        iwl_pcie_apm_stop_master(trans);
 505
 506        if (trans->cfg->lp_xtal_workaround) {
 507                iwl_pcie_apm_lp_xtal_enable(trans);
 508                return;
 509        }
 510
 511        iwl_trans_pcie_sw_reset(trans);
 512
 513        /*
 514         * Clear "initialization complete" bit to move adapter from
 515         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 516         */
 517        iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 518}
 519
 520static int iwl_pcie_nic_init(struct iwl_trans *trans)
 521{
 522        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 523        int ret;
 524
 525        /* nic_init */
 526        spin_lock_bh(&trans_pcie->irq_lock);
 527        ret = iwl_pcie_apm_init(trans);
 528        spin_unlock_bh(&trans_pcie->irq_lock);
 529
 530        if (ret)
 531                return ret;
 532
 533        iwl_pcie_set_pwr(trans, false);
 534
 535        iwl_op_mode_nic_config(trans->op_mode);
 536
 537        /* Allocate the RX queue, or reset if it is already allocated */
 538        ret = iwl_pcie_rx_init(trans);
 539        if (ret)
 540                return ret;
 541
 542        /* Allocate or reset and init all Tx and Command queues */
 543        if (iwl_pcie_tx_init(trans)) {
 544                iwl_pcie_rx_free(trans);
 545                return -ENOMEM;
 546        }
 547
 548        if (trans->trans_cfg->base_params->shadow_reg_enable) {
 549                /* enable shadow regs in HW */
 550                iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
 551                IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
 552        }
 553
 554        return 0;
 555}
 556
 557#define HW_READY_TIMEOUT (50)
 558
 559/* Note: returns poll_bit return value, which is >= 0 if success */
 560static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
 561{
 562        int ret;
 563
 564        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 565                    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
 566
 567        /* See if we got it */
 568        ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
 569                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 570                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 571                           HW_READY_TIMEOUT);
 572
 573        if (ret >= 0)
 574                iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
 575
 576        IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
 577        return ret;
 578}
 579
 580/* Note: returns standard 0/-ERROR code */
 581int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 582{
 583        int ret;
 584        int t = 0;
 585        int iter;
 586
 587        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 588
 589        ret = iwl_pcie_set_hw_ready(trans);
 590        /* If the card is ready, exit 0 */
 591        if (ret >= 0)
 592                return 0;
 593
 594        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 595                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 596        usleep_range(1000, 2000);
 597
 598        for (iter = 0; iter < 10; iter++) {
 599                /* If HW is not ready, prepare the conditions to check again */
 600                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 601                            CSR_HW_IF_CONFIG_REG_PREPARE);
 602
 603                do {
 604                        ret = iwl_pcie_set_hw_ready(trans);
 605                        if (ret >= 0)
 606                                return 0;
 607
 608                        usleep_range(200, 1000);
 609                        t += 200;
 610                } while (t < 150000);
 611                msleep(25);
 612        }
 613
 614        IWL_ERR(trans, "Couldn't prepare the card\n");
 615
 616        return ret;
 617}
 618
 619/*
 620 * ucode
 621 */
 622static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
 623                                            u32 dst_addr, dma_addr_t phy_addr,
 624                                            u32 byte_cnt)
 625{
 626        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 627                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
 628
 629        iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
 630                    dst_addr);
 631
 632        iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
 633                    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 634
 635        iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
 636                    (iwl_get_dma_hi_addr(phy_addr)
 637                        << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
 638
 639        iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
 640                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
 641                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
 642                    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
 643
 644        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 645                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
 646                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
 647                    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 648}
 649
 650static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
 651                                        u32 dst_addr, dma_addr_t phy_addr,
 652                                        u32 byte_cnt)
 653{
 654        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 655        int ret;
 656
 657        trans_pcie->ucode_write_complete = false;
 658
 659        if (!iwl_trans_grab_nic_access(trans))
 660                return -EIO;
 661
 662        iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
 663                                        byte_cnt);
 664        iwl_trans_release_nic_access(trans);
 665
 666        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
 667                                 trans_pcie->ucode_write_complete, 5 * HZ);
 668        if (!ret) {
 669                IWL_ERR(trans, "Failed to load firmware chunk!\n");
 670                iwl_trans_pcie_dump_regs(trans);
 671                return -ETIMEDOUT;
 672        }
 673
 674        return 0;
 675}
 676
 677static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 678                            const struct fw_desc *section)
 679{
 680        u8 *v_addr;
 681        dma_addr_t p_addr;
 682        u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
 683        int ret = 0;
 684
 685        IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
 686                     section_num);
 687
 688        v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
 689                                    GFP_KERNEL | __GFP_NOWARN);
 690        if (!v_addr) {
 691                IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
 692                chunk_sz = PAGE_SIZE;
 693                v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
 694                                            &p_addr, GFP_KERNEL);
 695                if (!v_addr)
 696                        return -ENOMEM;
 697        }
 698
 699        for (offset = 0; offset < section->len; offset += chunk_sz) {
 700                u32 copy_size, dst_addr;
 701                bool extended_addr = false;
 702
 703                copy_size = min_t(u32, chunk_sz, section->len - offset);
 704                dst_addr = section->offset + offset;
 705
 706                if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
 707                    dst_addr <= IWL_FW_MEM_EXTENDED_END)
 708                        extended_addr = true;
 709
 710                if (extended_addr)
 711                        iwl_set_bits_prph(trans, LMPM_CHICK,
 712                                          LMPM_CHICK_EXTENDED_ADDR_SPACE);
 713
 714                memcpy(v_addr, (u8 *)section->data + offset, copy_size);
 715                ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
 716                                                   copy_size);
 717
 718                if (extended_addr)
 719                        iwl_clear_bits_prph(trans, LMPM_CHICK,
 720                                            LMPM_CHICK_EXTENDED_ADDR_SPACE);
 721
 722                if (ret) {
 723                        IWL_ERR(trans,
 724                                "Could not load the [%d] uCode section\n",
 725                                section_num);
 726                        break;
 727                }
 728        }
 729
 730        dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
 731        return ret;
 732}
 733
 734static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 735                                           const struct fw_img *image,
 736                                           int cpu,
 737                                           int *first_ucode_section)
 738{
 739        int shift_param;
 740        int i, ret = 0, sec_num = 0x1;
 741        u32 val, last_read_idx = 0;
 742
 743        if (cpu == 1) {
 744                shift_param = 0;
 745                *first_ucode_section = 0;
 746        } else {
 747                shift_param = 16;
 748                (*first_ucode_section)++;
 749        }
 750
 751        for (i = *first_ucode_section; i < image->num_sec; i++) {
 752                last_read_idx = i;
 753
 754                /*
 755                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 756                 * CPU1 to CPU2.
 757                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 758                 * CPU2 non paged to CPU2 paging sec.
 759                 */
 760                if (!image->sec[i].data ||
 761                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 762                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 763                        IWL_DEBUG_FW(trans,
 764                                     "Break since Data not valid or Empty section, sec = %d\n",
 765                                     i);
 766                        break;
 767                }
 768
 769                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 770                if (ret)
 771                        return ret;
 772
 773                /* Notify ucode of loaded section number and status */
 774                val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
 775                val = val | (sec_num << shift_param);
 776                iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
 777
 778                sec_num = (sec_num << 1) | 0x1;
 779        }
 780
 781        *first_ucode_section = last_read_idx;
 782
 783        iwl_enable_interrupts(trans);
 784
 785        if (trans->trans_cfg->use_tfh) {
 786                if (cpu == 1)
 787                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 788                                       0xFFFF);
 789                else
 790                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 791                                       0xFFFFFFFF);
 792        } else {
 793                if (cpu == 1)
 794                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 795                                           0xFFFF);
 796                else
 797                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 798                                           0xFFFFFFFF);
 799        }
 800
 801        return 0;
 802}
 803
 804static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 805                                      const struct fw_img *image,
 806                                      int cpu,
 807                                      int *first_ucode_section)
 808{
 809        int i, ret = 0;
 810        u32 last_read_idx = 0;
 811
 812        if (cpu == 1)
 813                *first_ucode_section = 0;
 814        else
 815                (*first_ucode_section)++;
 816
 817        for (i = *first_ucode_section; i < image->num_sec; i++) {
 818                last_read_idx = i;
 819
 820                /*
 821                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 822                 * CPU1 to CPU2.
 823                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 824                 * CPU2 non paged to CPU2 paging sec.
 825                 */
 826                if (!image->sec[i].data ||
 827                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 828                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 829                        IWL_DEBUG_FW(trans,
 830                                     "Break since Data not valid or Empty section, sec = %d\n",
 831                                     i);
 832                        break;
 833                }
 834
 835                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 836                if (ret)
 837                        return ret;
 838        }
 839
 840        *first_ucode_section = last_read_idx;
 841
 842        return 0;
 843}
 844
 845static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
 846{
 847        enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
 848        struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
 849                &trans->dbg.fw_mon_cfg[alloc_id];
 850        struct iwl_dram_data *frag;
 851
 852        if (!iwl_trans_dbg_ini_valid(trans))
 853                return;
 854
 855        if (le32_to_cpu(fw_mon_cfg->buf_location) ==
 856            IWL_FW_INI_LOCATION_SRAM_PATH) {
 857                IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
 858                /* set sram monitor by enabling bit 7 */
 859                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 860                            CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
 861
 862                return;
 863        }
 864
 865        if (le32_to_cpu(fw_mon_cfg->buf_location) !=
 866            IWL_FW_INI_LOCATION_DRAM_PATH ||
 867            !trans->dbg.fw_mon_ini[alloc_id].num_frags)
 868                return;
 869
 870        frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
 871
 872        IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
 873                     alloc_id);
 874
 875        iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
 876                            frag->physical >> MON_BUFF_SHIFT_VER2);
 877        iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
 878                            (frag->physical + frag->size - 256) >>
 879                            MON_BUFF_SHIFT_VER2);
 880}
 881
 882void iwl_pcie_apply_destination(struct iwl_trans *trans)
 883{
 884        const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
 885        const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
 886        int i;
 887
 888        if (iwl_trans_dbg_ini_valid(trans)) {
 889                iwl_pcie_apply_destination_ini(trans);
 890                return;
 891        }
 892
 893        IWL_INFO(trans, "Applying debug destination %s\n",
 894                 get_fw_dbg_mode_string(dest->monitor_mode));
 895
 896        if (dest->monitor_mode == EXTERNAL_MODE)
 897                iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
 898        else
 899                IWL_WARN(trans, "PCI should have external buffer debug\n");
 900
 901        for (i = 0; i < trans->dbg.n_dest_reg; i++) {
 902                u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
 903                u32 val = le32_to_cpu(dest->reg_ops[i].val);
 904
 905                switch (dest->reg_ops[i].op) {
 906                case CSR_ASSIGN:
 907                        iwl_write32(trans, addr, val);
 908                        break;
 909                case CSR_SETBIT:
 910                        iwl_set_bit(trans, addr, BIT(val));
 911                        break;
 912                case CSR_CLEARBIT:
 913                        iwl_clear_bit(trans, addr, BIT(val));
 914                        break;
 915                case PRPH_ASSIGN:
 916                        iwl_write_prph(trans, addr, val);
 917                        break;
 918                case PRPH_SETBIT:
 919                        iwl_set_bits_prph(trans, addr, BIT(val));
 920                        break;
 921                case PRPH_CLEARBIT:
 922                        iwl_clear_bits_prph(trans, addr, BIT(val));
 923                        break;
 924                case PRPH_BLOCKBIT:
 925                        if (iwl_read_prph(trans, addr) & BIT(val)) {
 926                                IWL_ERR(trans,
 927                                        "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
 928                                        val, addr);
 929                                goto monitor;
 930                        }
 931                        break;
 932                default:
 933                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
 934                                dest->reg_ops[i].op);
 935                        break;
 936                }
 937        }
 938
 939monitor:
 940        if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
 941                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
 942                               fw_mon->physical >> dest->base_shift);
 943                if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
 944                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 945                                       (fw_mon->physical + fw_mon->size -
 946                                        256) >> dest->end_shift);
 947                else
 948                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 949                                       (fw_mon->physical + fw_mon->size) >>
 950                                       dest->end_shift);
 951        }
 952}
 953
 954static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 955                                const struct fw_img *image)
 956{
 957        int ret = 0;
 958        int first_ucode_section;
 959
 960        IWL_DEBUG_FW(trans, "working with %s CPU\n",
 961                     image->is_dual_cpus ? "Dual" : "Single");
 962
 963        /* load to FW the binary non secured sections of CPU1 */
 964        ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
 965        if (ret)
 966                return ret;
 967
 968        if (image->is_dual_cpus) {
 969                /* set CPU2 header address */
 970                iwl_write_prph(trans,
 971                               LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
 972                               LMPM_SECURE_CPU2_HDR_MEM_SPACE);
 973
 974                /* load to FW the binary sections of CPU2 */
 975                ret = iwl_pcie_load_cpu_sections(trans, image, 2,
 976                                                 &first_ucode_section);
 977                if (ret)
 978                        return ret;
 979        }
 980
 981        if (iwl_pcie_dbg_on(trans))
 982                iwl_pcie_apply_destination(trans);
 983
 984        iwl_enable_interrupts(trans);
 985
 986        /* release CPU reset */
 987        iwl_write32(trans, CSR_RESET, 0);
 988
 989        return 0;
 990}
 991
 992static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
 993                                          const struct fw_img *image)
 994{
 995        int ret = 0;
 996        int first_ucode_section;
 997
 998        IWL_DEBUG_FW(trans, "working with %s CPU\n",
 999                     image->is_dual_cpus ? "Dual" : "Single");
1000
1001        if (iwl_pcie_dbg_on(trans))
1002                iwl_pcie_apply_destination(trans);
1003
1004        IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1005                        iwl_read_prph(trans, WFPM_GP2));
1006
1007        /*
1008         * Set default value. On resume reading the values that were
1009         * zeored can provide debug data on the resume flow.
1010         * This is for debugging only and has no functional impact.
1011         */
1012        iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1013
1014        /* configure the ucode to be ready to get the secured image */
1015        /* release CPU reset */
1016        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1017
1018        /* load to FW the binary Secured sections of CPU1 */
1019        ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1020                                              &first_ucode_section);
1021        if (ret)
1022                return ret;
1023
1024        /* load to FW the binary sections of CPU2 */
1025        return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1026                                               &first_ucode_section);
1027}
1028
1029bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1030{
1031        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1032        bool hw_rfkill = iwl_is_rfkill_set(trans);
1033        bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1034        bool report;
1035
1036        if (hw_rfkill) {
1037                set_bit(STATUS_RFKILL_HW, &trans->status);
1038                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1039        } else {
1040                clear_bit(STATUS_RFKILL_HW, &trans->status);
1041                if (trans_pcie->opmode_down)
1042                        clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1043        }
1044
1045        report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1046
1047        if (prev != report)
1048                iwl_trans_pcie_rf_kill(trans, report);
1049
1050        return hw_rfkill;
1051}
1052
1053struct iwl_causes_list {
1054        u32 cause_num;
1055        u32 mask_reg;
1056        u8 addr;
1057};
1058
1059static struct iwl_causes_list causes_list[] = {
1060        {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1061        {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1062        {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1063        {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1064        {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1065        {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
1066        {MSIX_HW_INT_CAUSES_REG_RESET_DONE,     CSR_MSIX_HW_INT_MASK_AD, 0x12},
1067        {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1068        {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1069        {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1070        {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
1071        {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1072        {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1073        {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1074        {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1075};
1076
1077static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1078{
1079        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1080        int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1081        int i, arr_size = ARRAY_SIZE(causes_list);
1082        struct iwl_causes_list *causes = causes_list;
1083
1084        /*
1085         * Access all non RX causes and map them to the default irq.
1086         * In case we are missing at least one interrupt vector,
1087         * the first interrupt vector will serve non-RX and FBQ causes.
1088         */
1089        for (i = 0; i < arr_size; i++) {
1090                iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1091                iwl_clear_bit(trans, causes[i].mask_reg,
1092                              causes[i].cause_num);
1093        }
1094}
1095
1096static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1097{
1098        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1099        u32 offset =
1100                trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1101        u32 val, idx;
1102
1103        /*
1104         * The first RX queue - fallback queue, which is designated for
1105         * management frame, command responses etc, is always mapped to the
1106         * first interrupt vector. The other RX queues are mapped to
1107         * the other (N - 2) interrupt vectors.
1108         */
1109        val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1110        for (idx = 1; idx < trans->num_rx_queues; idx++) {
1111                iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1112                           MSIX_FH_INT_CAUSES_Q(idx - offset));
1113                val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1114        }
1115        iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1116
1117        val = MSIX_FH_INT_CAUSES_Q(0);
1118        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1119                val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1120        iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1121
1122        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1123                iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1124}
1125
1126void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1127{
1128        struct iwl_trans *trans = trans_pcie->trans;
1129
1130        if (!trans_pcie->msix_enabled) {
1131                if (trans->trans_cfg->mq_rx_supported &&
1132                    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1133                        iwl_write_umac_prph(trans, UREG_CHICK,
1134                                            UREG_CHICK_MSI_ENABLE);
1135                return;
1136        }
1137        /*
1138         * The IVAR table needs to be configured again after reset,
1139         * but if the device is disabled, we can't write to
1140         * prph.
1141         */
1142        if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1143                iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1144
1145        /*
1146         * Each cause from the causes list above and the RX causes is
1147         * represented as a byte in the IVAR table. The first nibble
1148         * represents the bound interrupt vector of the cause, the second
1149         * represents no auto clear for this cause. This will be set if its
1150         * interrupt vector is bound to serve other causes.
1151         */
1152        iwl_pcie_map_rx_causes(trans);
1153
1154        iwl_pcie_map_non_rx_causes(trans);
1155}
1156
1157static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1158{
1159        struct iwl_trans *trans = trans_pcie->trans;
1160
1161        iwl_pcie_conf_msix_hw(trans_pcie);
1162
1163        if (!trans_pcie->msix_enabled)
1164                return;
1165
1166        trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1167        trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1168        trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1169        trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1170}
1171
1172static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1173{
1174        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1175
1176        lockdep_assert_held(&trans_pcie->mutex);
1177
1178        if (trans_pcie->is_down)
1179                return;
1180
1181        trans_pcie->is_down = true;
1182
1183        /* tell the device to stop sending interrupts */
1184        iwl_disable_interrupts(trans);
1185
1186        /* device going down, Stop using ICT table */
1187        iwl_pcie_disable_ict(trans);
1188
1189        /*
1190         * If a HW restart happens during firmware loading,
1191         * then the firmware loading might call this function
1192         * and later it might be called again due to the
1193         * restart. So don't process again if the device is
1194         * already dead.
1195         */
1196        if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1197                IWL_DEBUG_INFO(trans,
1198                               "DEVICE_ENABLED bit was set and is now cleared\n");
1199                iwl_pcie_tx_stop(trans);
1200                iwl_pcie_rx_stop(trans);
1201
1202                /* Power-down device's busmaster DMA clocks */
1203                if (!trans->cfg->apmg_not_supported) {
1204                        iwl_write_prph(trans, APMG_CLK_DIS_REG,
1205                                       APMG_CLK_VAL_DMA_CLK_RQT);
1206                        udelay(5);
1207                }
1208        }
1209
1210        /* Make sure (redundant) we've released our request to stay awake */
1211        iwl_clear_bit(trans, CSR_GP_CNTRL,
1212                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1213
1214        /* Stop the device, and put it in low power state */
1215        iwl_pcie_apm_stop(trans, false);
1216
1217        iwl_trans_pcie_sw_reset(trans);
1218
1219        /*
1220         * Upon stop, the IVAR table gets erased, so msi-x won't
1221         * work. This causes a bug in RF-KILL flows, since the interrupt
1222         * that enables radio won't fire on the correct irq, and the
1223         * driver won't be able to handle the interrupt.
1224         * Configure the IVAR table again after reset.
1225         */
1226        iwl_pcie_conf_msix_hw(trans_pcie);
1227
1228        /*
1229         * Upon stop, the APM issues an interrupt if HW RF kill is set.
1230         * This is a bug in certain verions of the hardware.
1231         * Certain devices also keep sending HW RF kill interrupt all
1232         * the time, unless the interrupt is ACKed even if the interrupt
1233         * should be masked. Re-ACK all the interrupts here.
1234         */
1235        iwl_disable_interrupts(trans);
1236
1237        /* clear all status bits */
1238        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1239        clear_bit(STATUS_INT_ENABLED, &trans->status);
1240        clear_bit(STATUS_TPOWER_PMI, &trans->status);
1241
1242        /*
1243         * Even if we stop the HW, we still want the RF kill
1244         * interrupt
1245         */
1246        iwl_enable_rfkill_int(trans);
1247
1248        /* re-take ownership to prevent other users from stealing the device */
1249        iwl_pcie_prepare_card_hw(trans);
1250}
1251
1252void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1253{
1254        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1255
1256        if (trans_pcie->msix_enabled) {
1257                int i;
1258
1259                for (i = 0; i < trans_pcie->alloc_vecs; i++)
1260                        synchronize_irq(trans_pcie->msix_entries[i].vector);
1261        } else {
1262                synchronize_irq(trans_pcie->pci_dev->irq);
1263        }
1264}
1265
1266static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1267                                   const struct fw_img *fw, bool run_in_rfkill)
1268{
1269        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270        bool hw_rfkill;
1271        int ret;
1272
1273        /* This may fail if AMT took ownership of the device */
1274        if (iwl_pcie_prepare_card_hw(trans)) {
1275                IWL_WARN(trans, "Exit HW not ready\n");
1276                ret = -EIO;
1277                goto out;
1278        }
1279
1280        iwl_enable_rfkill_int(trans);
1281
1282        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1283
1284        /*
1285         * We enabled the RF-Kill interrupt and the handler may very
1286         * well be running. Disable the interrupts to make sure no other
1287         * interrupt can be fired.
1288         */
1289        iwl_disable_interrupts(trans);
1290
1291        /* Make sure it finished running */
1292        iwl_pcie_synchronize_irqs(trans);
1293
1294        mutex_lock(&trans_pcie->mutex);
1295
1296        /* If platform's RF_KILL switch is NOT set to KILL */
1297        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1298        if (hw_rfkill && !run_in_rfkill) {
1299                ret = -ERFKILL;
1300                goto out;
1301        }
1302
1303        /* Someone called stop_device, don't try to start_fw */
1304        if (trans_pcie->is_down) {
1305                IWL_WARN(trans,
1306                         "Can't start_fw since the HW hasn't been started\n");
1307                ret = -EIO;
1308                goto out;
1309        }
1310
1311        /* make sure rfkill handshake bits are cleared */
1312        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1313        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1314                    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1315
1316        /* clear (again), then enable host interrupts */
1317        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1318
1319        ret = iwl_pcie_nic_init(trans);
1320        if (ret) {
1321                IWL_ERR(trans, "Unable to init nic\n");
1322                goto out;
1323        }
1324
1325        /*
1326         * Now, we load the firmware and don't want to be interrupted, even
1327         * by the RF-Kill interrupt (hence mask all the interrupt besides the
1328         * FH_TX interrupt which is needed to load the firmware). If the
1329         * RF-Kill switch is toggled, we will find out after having loaded
1330         * the firmware and return the proper value to the caller.
1331         */
1332        iwl_enable_fw_load_int(trans);
1333
1334        /* really make sure rfkill handshake bits are cleared */
1335        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1336        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1337
1338        /* Load the given image to the HW */
1339        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1340                ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1341        else
1342                ret = iwl_pcie_load_given_ucode(trans, fw);
1343
1344        /* re-check RF-Kill state since we may have missed the interrupt */
1345        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1346        if (hw_rfkill && !run_in_rfkill)
1347                ret = -ERFKILL;
1348
1349out:
1350        mutex_unlock(&trans_pcie->mutex);
1351        return ret;
1352}
1353
1354static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1355{
1356        iwl_pcie_reset_ict(trans);
1357        iwl_pcie_tx_start(trans, scd_addr);
1358}
1359
1360void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1361                                       bool was_in_rfkill)
1362{
1363        bool hw_rfkill;
1364
1365        /*
1366         * Check again since the RF kill state may have changed while
1367         * all the interrupts were disabled, in this case we couldn't
1368         * receive the RF kill interrupt and update the state in the
1369         * op_mode.
1370         * Don't call the op_mode if the rkfill state hasn't changed.
1371         * This allows the op_mode to call stop_device from the rfkill
1372         * notification without endless recursion. Under very rare
1373         * circumstances, we might have a small recursion if the rfkill
1374         * state changed exactly now while we were called from stop_device.
1375         * This is very unlikely but can happen and is supported.
1376         */
1377        hw_rfkill = iwl_is_rfkill_set(trans);
1378        if (hw_rfkill) {
1379                set_bit(STATUS_RFKILL_HW, &trans->status);
1380                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1381        } else {
1382                clear_bit(STATUS_RFKILL_HW, &trans->status);
1383                clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1384        }
1385        if (hw_rfkill != was_in_rfkill)
1386                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1387}
1388
1389static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1390{
1391        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1392        bool was_in_rfkill;
1393
1394        iwl_op_mode_time_point(trans->op_mode,
1395                               IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1396                               NULL);
1397
1398        mutex_lock(&trans_pcie->mutex);
1399        trans_pcie->opmode_down = true;
1400        was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1401        _iwl_trans_pcie_stop_device(trans);
1402        iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1403        mutex_unlock(&trans_pcie->mutex);
1404}
1405
1406void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1407{
1408        struct iwl_trans_pcie __maybe_unused *trans_pcie =
1409                IWL_TRANS_GET_PCIE_TRANS(trans);
1410
1411        lockdep_assert_held(&trans_pcie->mutex);
1412
1413        IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1414                 state ? "disabled" : "enabled");
1415        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1416                if (trans->trans_cfg->gen2)
1417                        _iwl_trans_pcie_gen2_stop_device(trans);
1418                else
1419                        _iwl_trans_pcie_stop_device(trans);
1420        }
1421}
1422
1423void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1424                                  bool test, bool reset)
1425{
1426        iwl_disable_interrupts(trans);
1427
1428        /*
1429         * in testing mode, the host stays awake and the
1430         * hardware won't be reset (not even partially)
1431         */
1432        if (test)
1433                return;
1434
1435        iwl_pcie_disable_ict(trans);
1436
1437        iwl_pcie_synchronize_irqs(trans);
1438
1439        iwl_clear_bit(trans, CSR_GP_CNTRL,
1440                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1441        iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1442
1443        if (reset) {
1444                /*
1445                 * reset TX queues -- some of their registers reset during S3
1446                 * so if we don't reset everything here the D3 image would try
1447                 * to execute some invalid memory upon resume
1448                 */
1449                iwl_trans_pcie_tx_reset(trans);
1450        }
1451
1452        iwl_pcie_set_pwr(trans, true);
1453}
1454
1455static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1456                                     bool reset)
1457{
1458        int ret;
1459        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1460
1461        if (!reset)
1462                /* Enable persistence mode to avoid reset */
1463                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1464                            CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1465
1466        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1467                iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1468                                    UREG_DOORBELL_TO_ISR6_SUSPEND);
1469
1470                ret = wait_event_timeout(trans_pcie->sx_waitq,
1471                                         trans_pcie->sx_complete, 2 * HZ);
1472                /*
1473                 * Invalidate it toward resume.
1474                 */
1475                trans_pcie->sx_complete = false;
1476
1477                if (!ret) {
1478                        IWL_ERR(trans, "Timeout entering D3\n");
1479                        return -ETIMEDOUT;
1480                }
1481        }
1482        iwl_pcie_d3_complete_suspend(trans, test, reset);
1483
1484        return 0;
1485}
1486
1487static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1488                                    enum iwl_d3_status *status,
1489                                    bool test,  bool reset)
1490{
1491        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1492        u32 val;
1493        int ret;
1494
1495        if (test) {
1496                iwl_enable_interrupts(trans);
1497                *status = IWL_D3_STATUS_ALIVE;
1498                goto out;
1499        }
1500
1501        iwl_set_bit(trans, CSR_GP_CNTRL,
1502                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1503
1504        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1505        if (ret)
1506                return ret;
1507
1508        /*
1509         * Reconfigure IVAR table in case of MSIX or reset ict table in
1510         * MSI mode since HW reset erased it.
1511         * Also enables interrupts - none will happen as
1512         * the device doesn't know we're waking it up, only when
1513         * the opmode actually tells it after this call.
1514         */
1515        iwl_pcie_conf_msix_hw(trans_pcie);
1516        if (!trans_pcie->msix_enabled)
1517                iwl_pcie_reset_ict(trans);
1518        iwl_enable_interrupts(trans);
1519
1520        iwl_pcie_set_pwr(trans, false);
1521
1522        if (!reset) {
1523                iwl_clear_bit(trans, CSR_GP_CNTRL,
1524                              CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1525        } else {
1526                iwl_trans_pcie_tx_reset(trans);
1527
1528                ret = iwl_pcie_rx_init(trans);
1529                if (ret) {
1530                        IWL_ERR(trans,
1531                                "Failed to resume the device (RX reset)\n");
1532                        return ret;
1533                }
1534        }
1535
1536        IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1537                        iwl_read_umac_prph(trans, WFPM_GP2));
1538
1539        val = iwl_read32(trans, CSR_RESET);
1540        if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1541                *status = IWL_D3_STATUS_RESET;
1542        else
1543                *status = IWL_D3_STATUS_ALIVE;
1544
1545out:
1546        if (*status == IWL_D3_STATUS_ALIVE &&
1547            trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1548                trans_pcie->sx_complete = false;
1549                iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1550                                    UREG_DOORBELL_TO_ISR6_RESUME);
1551
1552                ret = wait_event_timeout(trans_pcie->sx_waitq,
1553                                         trans_pcie->sx_complete, 2 * HZ);
1554                /*
1555                 * Invalidate it toward next suspend.
1556                 */
1557                trans_pcie->sx_complete = false;
1558
1559                if (!ret) {
1560                        IWL_ERR(trans, "Timeout exiting D3\n");
1561                        return -ETIMEDOUT;
1562                }
1563        }
1564        return 0;
1565}
1566
1567static void
1568iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1569                            struct iwl_trans *trans,
1570                            const struct iwl_cfg_trans_params *cfg_trans)
1571{
1572        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1573        int max_irqs, num_irqs, i, ret;
1574        u16 pci_cmd;
1575        u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1576
1577        if (!cfg_trans->mq_rx_supported)
1578                goto enable_msi;
1579
1580        if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1581                max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1582
1583        max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1584        for (i = 0; i < max_irqs; i++)
1585                trans_pcie->msix_entries[i].entry = i;
1586
1587        num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1588                                         MSIX_MIN_INTERRUPT_VECTORS,
1589                                         max_irqs);
1590        if (num_irqs < 0) {
1591                IWL_DEBUG_INFO(trans,
1592                               "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1593                               num_irqs);
1594                goto enable_msi;
1595        }
1596        trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1597
1598        IWL_DEBUG_INFO(trans,
1599                       "MSI-X enabled. %d interrupt vectors were allocated\n",
1600                       num_irqs);
1601
1602        /*
1603         * In case the OS provides fewer interrupts than requested, different
1604         * causes will share the same interrupt vector as follows:
1605         * One interrupt less: non rx causes shared with FBQ.
1606         * Two interrupts less: non rx causes shared with FBQ and RSS.
1607         * More than two interrupts: we will use fewer RSS queues.
1608         */
1609        if (num_irqs <= max_irqs - 2) {
1610                trans_pcie->trans->num_rx_queues = num_irqs + 1;
1611                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1612                        IWL_SHARED_IRQ_FIRST_RSS;
1613        } else if (num_irqs == max_irqs - 1) {
1614                trans_pcie->trans->num_rx_queues = num_irqs;
1615                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1616        } else {
1617                trans_pcie->trans->num_rx_queues = num_irqs - 1;
1618        }
1619
1620        IWL_DEBUG_INFO(trans,
1621                       "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1622                       trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1623
1624        WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1625
1626        trans_pcie->alloc_vecs = num_irqs;
1627        trans_pcie->msix_enabled = true;
1628        return;
1629
1630enable_msi:
1631        ret = pci_enable_msi(pdev);
1632        if (ret) {
1633                dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1634                /* enable rfkill interrupt: hw bug w/a */
1635                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1636                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1637                        pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1638                        pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1639                }
1640        }
1641}
1642
1643static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1644{
1645        int iter_rx_q, i, ret, cpu, offset;
1646        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1647
1648        i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1649        iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1650        offset = 1 + i;
1651        for (; i < iter_rx_q ; i++) {
1652                /*
1653                 * Get the cpu prior to the place to search
1654                 * (i.e. return will be > i - 1).
1655                 */
1656                cpu = cpumask_next(i - offset, cpu_online_mask);
1657                cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1658                ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1659                                            &trans_pcie->affinity_mask[i]);
1660                if (ret)
1661                        IWL_ERR(trans_pcie->trans,
1662                                "Failed to set affinity mask for IRQ %d\n",
1663                                trans_pcie->msix_entries[i].vector);
1664        }
1665}
1666
1667static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1668                                      struct iwl_trans_pcie *trans_pcie)
1669{
1670        int i;
1671
1672        for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1673                int ret;
1674                struct msix_entry *msix_entry;
1675                const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1676
1677                if (!qname)
1678                        return -ENOMEM;
1679
1680                msix_entry = &trans_pcie->msix_entries[i];
1681                ret = devm_request_threaded_irq(&pdev->dev,
1682                                                msix_entry->vector,
1683                                                iwl_pcie_msix_isr,
1684                                                (i == trans_pcie->def_irq) ?
1685                                                iwl_pcie_irq_msix_handler :
1686                                                iwl_pcie_irq_rx_msix_handler,
1687                                                IRQF_SHARED,
1688                                                qname,
1689                                                msix_entry);
1690                if (ret) {
1691                        IWL_ERR(trans_pcie->trans,
1692                                "Error allocating IRQ %d\n", i);
1693
1694                        return ret;
1695                }
1696        }
1697        iwl_pcie_irq_set_affinity(trans_pcie->trans);
1698
1699        return 0;
1700}
1701
1702static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1703{
1704        u32 hpm, wprot;
1705
1706        switch (trans->trans_cfg->device_family) {
1707        case IWL_DEVICE_FAMILY_9000:
1708                wprot = PREG_PRPH_WPROT_9000;
1709                break;
1710        case IWL_DEVICE_FAMILY_22000:
1711                wprot = PREG_PRPH_WPROT_22000;
1712                break;
1713        default:
1714                return 0;
1715        }
1716
1717        hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1718        if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1719                u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1720
1721                if (wprot_val & PREG_WFPM_ACCESS) {
1722                        IWL_ERR(trans,
1723                                "Error, can not clear persistence bit\n");
1724                        return -EPERM;
1725                }
1726                iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1727                                            hpm & ~PERSISTENCE_BIT);
1728        }
1729
1730        return 0;
1731}
1732
1733static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1734{
1735        int ret;
1736
1737        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1738        if (ret < 0)
1739                return ret;
1740
1741        iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1742                          HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1743        udelay(20);
1744        iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1745                          HPM_HIPM_GEN_CFG_CR_PG_EN |
1746                          HPM_HIPM_GEN_CFG_CR_SLP_EN);
1747        udelay(20);
1748        iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1749                            HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1750
1751        iwl_trans_pcie_sw_reset(trans);
1752
1753        return 0;
1754}
1755
1756static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1757{
1758        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1759        int err;
1760
1761        lockdep_assert_held(&trans_pcie->mutex);
1762
1763        err = iwl_pcie_prepare_card_hw(trans);
1764        if (err) {
1765                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1766                return err;
1767        }
1768
1769        err = iwl_trans_pcie_clear_persistence_bit(trans);
1770        if (err)
1771                return err;
1772
1773        iwl_trans_pcie_sw_reset(trans);
1774
1775        if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1776            trans->trans_cfg->integrated) {
1777                err = iwl_pcie_gen2_force_power_gating(trans);
1778                if (err)
1779                        return err;
1780        }
1781
1782        err = iwl_pcie_apm_init(trans);
1783        if (err)
1784                return err;
1785
1786        iwl_pcie_init_msix(trans_pcie);
1787
1788        /* From now on, the op_mode will be kept updated about RF kill state */
1789        iwl_enable_rfkill_int(trans);
1790
1791        trans_pcie->opmode_down = false;
1792
1793        /* Set is_down to false here so that...*/
1794        trans_pcie->is_down = false;
1795
1796        /* ...rfkill can call stop_device and set it false if needed */
1797        iwl_pcie_check_hw_rf_kill(trans);
1798
1799        return 0;
1800}
1801
1802static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1803{
1804        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1805        int ret;
1806
1807        mutex_lock(&trans_pcie->mutex);
1808        ret = _iwl_trans_pcie_start_hw(trans);
1809        mutex_unlock(&trans_pcie->mutex);
1810
1811        return ret;
1812}
1813
1814static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1815{
1816        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1817
1818        mutex_lock(&trans_pcie->mutex);
1819
1820        /* disable interrupts - don't enable HW RF kill interrupt */
1821        iwl_disable_interrupts(trans);
1822
1823        iwl_pcie_apm_stop(trans, true);
1824
1825        iwl_disable_interrupts(trans);
1826
1827        iwl_pcie_disable_ict(trans);
1828
1829        mutex_unlock(&trans_pcie->mutex);
1830
1831        iwl_pcie_synchronize_irqs(trans);
1832}
1833
1834static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1835{
1836        writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1837}
1838
1839static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1840{
1841        writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1842}
1843
1844static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1845{
1846        return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1847}
1848
1849static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1850{
1851        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1852                return 0x00FFFFFF;
1853        else
1854                return 0x000FFFFF;
1855}
1856
1857static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1858{
1859        u32 mask = iwl_trans_pcie_prph_msk(trans);
1860
1861        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1862                               ((reg & mask) | (3 << 24)));
1863        return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1864}
1865
1866static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1867                                      u32 val)
1868{
1869        u32 mask = iwl_trans_pcie_prph_msk(trans);
1870
1871        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1872                               ((addr & mask) | (3 << 24)));
1873        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1874}
1875
1876static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1877                                     const struct iwl_trans_config *trans_cfg)
1878{
1879        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1880
1881        /* free all first - we might be reconfigured for a different size */
1882        iwl_pcie_free_rbs_pool(trans);
1883
1884        trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1885        trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1886        trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1887        trans->txqs.page_offs = trans_cfg->cb_data_offs;
1888        trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1889
1890        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1891                trans_pcie->n_no_reclaim_cmds = 0;
1892        else
1893                trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1894        if (trans_pcie->n_no_reclaim_cmds)
1895                memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1896                       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1897
1898        trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1899        trans_pcie->rx_page_order =
1900                iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1901        trans_pcie->rx_buf_bytes =
1902                iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1903        trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1904        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1905                trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1906
1907        trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1908        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1909
1910        trans->command_groups = trans_cfg->command_groups;
1911        trans->command_groups_size = trans_cfg->command_groups_size;
1912
1913        /* Initialize NAPI here - it should be before registering to mac80211
1914         * in the opmode but after the HW struct is allocated.
1915         * As this function may be called again in some corner cases don't
1916         * do anything if NAPI was already initialized.
1917         */
1918        if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1919                init_dummy_netdev(&trans_pcie->napi_dev);
1920
1921        trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
1922}
1923
1924void iwl_trans_pcie_free(struct iwl_trans *trans)
1925{
1926        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1927        int i;
1928
1929        iwl_pcie_synchronize_irqs(trans);
1930
1931        if (trans->trans_cfg->gen2)
1932                iwl_txq_gen2_tx_free(trans);
1933        else
1934                iwl_pcie_tx_free(trans);
1935        iwl_pcie_rx_free(trans);
1936
1937        if (trans_pcie->rba.alloc_wq) {
1938                destroy_workqueue(trans_pcie->rba.alloc_wq);
1939                trans_pcie->rba.alloc_wq = NULL;
1940        }
1941
1942        if (trans_pcie->msix_enabled) {
1943                for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1944                        irq_set_affinity_hint(
1945                                trans_pcie->msix_entries[i].vector,
1946                                NULL);
1947                }
1948
1949                trans_pcie->msix_enabled = false;
1950        } else {
1951                iwl_pcie_free_ict(trans);
1952        }
1953
1954        iwl_pcie_free_fw_monitor(trans);
1955
1956        if (trans_pcie->pnvm_dram.size)
1957                dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
1958                                  trans_pcie->pnvm_dram.block,
1959                                  trans_pcie->pnvm_dram.physical);
1960
1961        if (trans_pcie->reduce_power_dram.size)
1962                dma_free_coherent(trans->dev,
1963                                  trans_pcie->reduce_power_dram.size,
1964                                  trans_pcie->reduce_power_dram.block,
1965                                  trans_pcie->reduce_power_dram.physical);
1966
1967        mutex_destroy(&trans_pcie->mutex);
1968        iwl_trans_free(trans);
1969}
1970
1971static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1972{
1973        if (state)
1974                set_bit(STATUS_TPOWER_PMI, &trans->status);
1975        else
1976                clear_bit(STATUS_TPOWER_PMI, &trans->status);
1977}
1978
1979struct iwl_trans_pcie_removal {
1980        struct pci_dev *pdev;
1981        struct work_struct work;
1982};
1983
1984static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1985{
1986        struct iwl_trans_pcie_removal *removal =
1987                container_of(wk, struct iwl_trans_pcie_removal, work);
1988        struct pci_dev *pdev = removal->pdev;
1989        static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1990
1991        dev_err(&pdev->dev, "Device gone - attempting removal\n");
1992        kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1993        pci_lock_rescan_remove();
1994        pci_dev_put(pdev);
1995        pci_stop_and_remove_bus_device(pdev);
1996        pci_unlock_rescan_remove();
1997
1998        kfree(removal);
1999        module_put(THIS_MODULE);
2000}
2001
2002/*
2003 * This version doesn't disable BHs but rather assumes they're
2004 * already disabled.
2005 */
2006bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2007{
2008        int ret;
2009        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2010        u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
2011        u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2012                   CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
2013        u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
2014
2015        spin_lock(&trans_pcie->reg_lock);
2016
2017        if (trans_pcie->cmd_hold_nic_awake)
2018                goto out;
2019
2020        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2021                write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
2022                mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2023                poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2024        }
2025
2026        /* this bit wakes up the NIC */
2027        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
2028        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2029                udelay(2);
2030
2031        /*
2032         * These bits say the device is running, and should keep running for
2033         * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2034         * but they do not indicate that embedded SRAM is restored yet;
2035         * HW with volatile SRAM must save/restore contents to/from
2036         * host DRAM when sleeping/waking for power-saving.
2037         * Each direction takes approximately 1/4 millisecond; with this
2038         * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2039         * series of register accesses are expected (e.g. reading Event Log),
2040         * to keep device from sleeping.
2041         *
2042         * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2043         * SRAM is okay/restored.  We don't check that here because this call
2044         * is just for hardware register access; but GP1 MAC_SLEEP
2045         * check is a good idea before accessing the SRAM of HW with
2046         * volatile SRAM (e.g. reading Event Log).
2047         *
2048         * 5000 series and later (including 1000 series) have non-volatile SRAM,
2049         * and do not save/restore SRAM when power cycling.
2050         */
2051        ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
2052        if (unlikely(ret < 0)) {
2053                u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2054
2055                WARN_ONCE(1,
2056                          "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2057                          cntrl);
2058
2059                iwl_trans_pcie_dump_regs(trans);
2060
2061                if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2062                        struct iwl_trans_pcie_removal *removal;
2063
2064                        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2065                                goto err;
2066
2067                        IWL_ERR(trans, "Device gone - scheduling removal!\n");
2068
2069                        /*
2070                         * get a module reference to avoid doing this
2071                         * while unloading anyway and to avoid
2072                         * scheduling a work with code that's being
2073                         * removed.
2074                         */
2075                        if (!try_module_get(THIS_MODULE)) {
2076                                IWL_ERR(trans,
2077                                        "Module is being unloaded - abort\n");
2078                                goto err;
2079                        }
2080
2081                        removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2082                        if (!removal) {
2083                                module_put(THIS_MODULE);
2084                                goto err;
2085                        }
2086                        /*
2087                         * we don't need to clear this flag, because
2088                         * the trans will be freed and reallocated.
2089                        */
2090                        set_bit(STATUS_TRANS_DEAD, &trans->status);
2091
2092                        removal->pdev = to_pci_dev(trans->dev);
2093                        INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2094                        pci_dev_get(removal->pdev);
2095                        schedule_work(&removal->work);
2096                } else {
2097                        iwl_write32(trans, CSR_RESET,
2098                                    CSR_RESET_REG_FLAG_FORCE_NMI);
2099                }
2100
2101err:
2102                spin_unlock(&trans_pcie->reg_lock);
2103                return false;
2104        }
2105
2106out:
2107        /*
2108         * Fool sparse by faking we release the lock - sparse will
2109         * track nic_access anyway.
2110         */
2111        __release(&trans_pcie->reg_lock);
2112        return true;
2113}
2114
2115static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2116{
2117        bool ret;
2118
2119        local_bh_disable();
2120        ret = __iwl_trans_pcie_grab_nic_access(trans);
2121        if (ret) {
2122                /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2123                return ret;
2124        }
2125        local_bh_enable();
2126        return false;
2127}
2128
2129static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2130{
2131        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2132
2133        lockdep_assert_held(&trans_pcie->reg_lock);
2134
2135        /*
2136         * Fool sparse by faking we acquiring the lock - sparse will
2137         * track nic_access anyway.
2138         */
2139        __acquire(&trans_pcie->reg_lock);
2140
2141        if (trans_pcie->cmd_hold_nic_awake)
2142                goto out;
2143
2144        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2145                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2146        /*
2147         * Above we read the CSR_GP_CNTRL register, which will flush
2148         * any previous writes, but we need the write that clears the
2149         * MAC_ACCESS_REQ bit to be performed before any other writes
2150         * scheduled on different CPUs (after we drop reg_lock).
2151         */
2152out:
2153        spin_unlock_bh(&trans_pcie->reg_lock);
2154}
2155
2156static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2157                                   void *buf, int dwords)
2158{
2159        int offs = 0;
2160        u32 *vals = buf;
2161
2162        while (offs < dwords) {
2163                /* limit the time we spin here under lock to 1/2s */
2164                unsigned long end = jiffies + HZ / 2;
2165                bool resched = false;
2166
2167                if (iwl_trans_grab_nic_access(trans)) {
2168                        iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2169                                    addr + 4 * offs);
2170
2171                        while (offs < dwords) {
2172                                vals[offs] = iwl_read32(trans,
2173                                                        HBUS_TARG_MEM_RDAT);
2174                                offs++;
2175
2176                                if (time_after(jiffies, end)) {
2177                                        resched = true;
2178                                        break;
2179                                }
2180                        }
2181                        iwl_trans_release_nic_access(trans);
2182
2183                        if (resched)
2184                                cond_resched();
2185                } else {
2186                        return -EBUSY;
2187                }
2188        }
2189
2190        return 0;
2191}
2192
2193static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2194                                    const void *buf, int dwords)
2195{
2196        int offs, ret = 0;
2197        const u32 *vals = buf;
2198
2199        if (iwl_trans_grab_nic_access(trans)) {
2200                iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2201                for (offs = 0; offs < dwords; offs++)
2202                        iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2203                                    vals ? vals[offs] : 0);
2204                iwl_trans_release_nic_access(trans);
2205        } else {
2206                ret = -EBUSY;
2207        }
2208        return ret;
2209}
2210
2211static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2212                                        u32 *val)
2213{
2214        return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2215                                     ofs, val);
2216}
2217
2218static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2219{
2220        int i;
2221
2222        for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2223                struct iwl_txq *txq = trans->txqs.txq[i];
2224
2225                if (i == trans->txqs.cmd.q_id)
2226                        continue;
2227
2228                spin_lock_bh(&txq->lock);
2229
2230                if (!block && !(WARN_ON_ONCE(!txq->block))) {
2231                        txq->block--;
2232                        if (!txq->block) {
2233                                iwl_write32(trans, HBUS_TARG_WRPTR,
2234                                            txq->write_ptr | (i << 8));
2235                        }
2236                } else if (block) {
2237                        txq->block++;
2238                }
2239
2240                spin_unlock_bh(&txq->lock);
2241        }
2242}
2243
2244#define IWL_FLUSH_WAIT_MS       2000
2245
2246static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2247                                       struct iwl_trans_rxq_dma_data *data)
2248{
2249        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2250
2251        if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2252                return -EINVAL;
2253
2254        data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2255        data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2256        data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2257        data->fr_bd_wid = 0;
2258
2259        return 0;
2260}
2261
2262static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2263{
2264        struct iwl_txq *txq;
2265        unsigned long now = jiffies;
2266        bool overflow_tx;
2267        u8 wr_ptr;
2268
2269        /* Make sure the NIC is still alive in the bus */
2270        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2271                return -ENODEV;
2272
2273        if (!test_bit(txq_idx, trans->txqs.queue_used))
2274                return -EINVAL;
2275
2276        IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2277        txq = trans->txqs.txq[txq_idx];
2278
2279        spin_lock_bh(&txq->lock);
2280        overflow_tx = txq->overflow_tx ||
2281                      !skb_queue_empty(&txq->overflow_q);
2282        spin_unlock_bh(&txq->lock);
2283
2284        wr_ptr = READ_ONCE(txq->write_ptr);
2285
2286        while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2287                overflow_tx) &&
2288               !time_after(jiffies,
2289                           now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2290                u8 write_ptr = READ_ONCE(txq->write_ptr);
2291
2292                /*
2293                 * If write pointer moved during the wait, warn only
2294                 * if the TX came from op mode. In case TX came from
2295                 * trans layer (overflow TX) don't warn.
2296                 */
2297                if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2298                              "WR pointer moved while flushing %d -> %d\n",
2299                              wr_ptr, write_ptr))
2300                        return -ETIMEDOUT;
2301                wr_ptr = write_ptr;
2302
2303                usleep_range(1000, 2000);
2304
2305                spin_lock_bh(&txq->lock);
2306                overflow_tx = txq->overflow_tx ||
2307                              !skb_queue_empty(&txq->overflow_q);
2308                spin_unlock_bh(&txq->lock);
2309        }
2310
2311        if (txq->read_ptr != txq->write_ptr) {
2312                IWL_ERR(trans,
2313                        "fail to flush all tx fifo queues Q %d\n", txq_idx);
2314                iwl_txq_log_scd_error(trans, txq);
2315                return -ETIMEDOUT;
2316        }
2317
2318        IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2319
2320        return 0;
2321}
2322
2323static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2324{
2325        int cnt;
2326        int ret = 0;
2327
2328        /* waiting for all the tx frames complete might take a while */
2329        for (cnt = 0;
2330             cnt < trans->trans_cfg->base_params->num_of_queues;
2331             cnt++) {
2332
2333                if (cnt == trans->txqs.cmd.q_id)
2334                        continue;
2335                if (!test_bit(cnt, trans->txqs.queue_used))
2336                        continue;
2337                if (!(BIT(cnt) & txq_bm))
2338                        continue;
2339
2340                ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2341                if (ret)
2342                        break;
2343        }
2344
2345        return ret;
2346}
2347
2348static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2349                                         u32 mask, u32 value)
2350{
2351        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2352
2353        spin_lock_bh(&trans_pcie->reg_lock);
2354        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2355        spin_unlock_bh(&trans_pcie->reg_lock);
2356}
2357
2358static const char *get_csr_string(int cmd)
2359{
2360#define IWL_CMD(x) case x: return #x
2361        switch (cmd) {
2362        IWL_CMD(CSR_HW_IF_CONFIG_REG);
2363        IWL_CMD(CSR_INT_COALESCING);
2364        IWL_CMD(CSR_INT);
2365        IWL_CMD(CSR_INT_MASK);
2366        IWL_CMD(CSR_FH_INT_STATUS);
2367        IWL_CMD(CSR_GPIO_IN);
2368        IWL_CMD(CSR_RESET);
2369        IWL_CMD(CSR_GP_CNTRL);
2370        IWL_CMD(CSR_HW_REV);
2371        IWL_CMD(CSR_EEPROM_REG);
2372        IWL_CMD(CSR_EEPROM_GP);
2373        IWL_CMD(CSR_OTP_GP_REG);
2374        IWL_CMD(CSR_GIO_REG);
2375        IWL_CMD(CSR_GP_UCODE_REG);
2376        IWL_CMD(CSR_GP_DRIVER_REG);
2377        IWL_CMD(CSR_UCODE_DRV_GP1);
2378        IWL_CMD(CSR_UCODE_DRV_GP2);
2379        IWL_CMD(CSR_LED_REG);
2380        IWL_CMD(CSR_DRAM_INT_TBL_REG);
2381        IWL_CMD(CSR_GIO_CHICKEN_BITS);
2382        IWL_CMD(CSR_ANA_PLL_CFG);
2383        IWL_CMD(CSR_HW_REV_WA_REG);
2384        IWL_CMD(CSR_MONITOR_STATUS_REG);
2385        IWL_CMD(CSR_DBG_HPET_MEM_REG);
2386        default:
2387                return "UNKNOWN";
2388        }
2389#undef IWL_CMD
2390}
2391
2392void iwl_pcie_dump_csr(struct iwl_trans *trans)
2393{
2394        int i;
2395        static const u32 csr_tbl[] = {
2396                CSR_HW_IF_CONFIG_REG,
2397                CSR_INT_COALESCING,
2398                CSR_INT,
2399                CSR_INT_MASK,
2400                CSR_FH_INT_STATUS,
2401                CSR_GPIO_IN,
2402                CSR_RESET,
2403                CSR_GP_CNTRL,
2404                CSR_HW_REV,
2405                CSR_EEPROM_REG,
2406                CSR_EEPROM_GP,
2407                CSR_OTP_GP_REG,
2408                CSR_GIO_REG,
2409                CSR_GP_UCODE_REG,
2410                CSR_GP_DRIVER_REG,
2411                CSR_UCODE_DRV_GP1,
2412                CSR_UCODE_DRV_GP2,
2413                CSR_LED_REG,
2414                CSR_DRAM_INT_TBL_REG,
2415                CSR_GIO_CHICKEN_BITS,
2416                CSR_ANA_PLL_CFG,
2417                CSR_MONITOR_STATUS_REG,
2418                CSR_HW_REV_WA_REG,
2419                CSR_DBG_HPET_MEM_REG
2420        };
2421        IWL_ERR(trans, "CSR values:\n");
2422        IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2423                "CSR_INT_PERIODIC_REG)\n");
2424        for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2425                IWL_ERR(trans, "  %25s: 0X%08x\n",
2426                        get_csr_string(csr_tbl[i]),
2427                        iwl_read32(trans, csr_tbl[i]));
2428        }
2429}
2430
2431#ifdef CONFIG_IWLWIFI_DEBUGFS
2432/* create and remove of files */
2433#define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
2434        debugfs_create_file(#name, mode, parent, trans,                 \
2435                            &iwl_dbgfs_##name##_ops);                   \
2436} while (0)
2437
2438/* file operation */
2439#define DEBUGFS_READ_FILE_OPS(name)                                     \
2440static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2441        .read = iwl_dbgfs_##name##_read,                                \
2442        .open = simple_open,                                            \
2443        .llseek = generic_file_llseek,                                  \
2444};
2445
2446#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2447static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2448        .write = iwl_dbgfs_##name##_write,                              \
2449        .open = simple_open,                                            \
2450        .llseek = generic_file_llseek,                                  \
2451};
2452
2453#define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
2454static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2455        .write = iwl_dbgfs_##name##_write,                              \
2456        .read = iwl_dbgfs_##name##_read,                                \
2457        .open = simple_open,                                            \
2458        .llseek = generic_file_llseek,                                  \
2459};
2460
2461struct iwl_dbgfs_tx_queue_priv {
2462        struct iwl_trans *trans;
2463};
2464
2465struct iwl_dbgfs_tx_queue_state {
2466        loff_t pos;
2467};
2468
2469static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2470{
2471        struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2472        struct iwl_dbgfs_tx_queue_state *state;
2473
2474        if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2475                return NULL;
2476
2477        state = kmalloc(sizeof(*state), GFP_KERNEL);
2478        if (!state)
2479                return NULL;
2480        state->pos = *pos;
2481        return state;
2482}
2483
2484static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2485                                         void *v, loff_t *pos)
2486{
2487        struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2488        struct iwl_dbgfs_tx_queue_state *state = v;
2489
2490        *pos = ++state->pos;
2491
2492        if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2493                return NULL;
2494
2495        return state;
2496}
2497
2498static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2499{
2500        kfree(v);
2501}
2502
2503static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2504{
2505        struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2506        struct iwl_dbgfs_tx_queue_state *state = v;
2507        struct iwl_trans *trans = priv->trans;
2508        struct iwl_txq *txq = trans->txqs.txq[state->pos];
2509
2510        seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2511                   (unsigned int)state->pos,
2512                   !!test_bit(state->pos, trans->txqs.queue_used),
2513                   !!test_bit(state->pos, trans->txqs.queue_stopped));
2514        if (txq)
2515                seq_printf(seq,
2516                           "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2517                           txq->read_ptr, txq->write_ptr,
2518                           txq->need_update, txq->frozen,
2519                           txq->n_window, txq->ampdu);
2520        else
2521                seq_puts(seq, "(unallocated)");
2522
2523        if (state->pos == trans->txqs.cmd.q_id)
2524                seq_puts(seq, " (HCMD)");
2525        seq_puts(seq, "\n");
2526
2527        return 0;
2528}
2529
2530static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2531        .start = iwl_dbgfs_tx_queue_seq_start,
2532        .next = iwl_dbgfs_tx_queue_seq_next,
2533        .stop = iwl_dbgfs_tx_queue_seq_stop,
2534        .show = iwl_dbgfs_tx_queue_seq_show,
2535};
2536
2537static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2538{
2539        struct iwl_dbgfs_tx_queue_priv *priv;
2540
2541        priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2542                                  sizeof(*priv));
2543
2544        if (!priv)
2545                return -ENOMEM;
2546
2547        priv->trans = inode->i_private;
2548        return 0;
2549}
2550
2551static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2552                                       char __user *user_buf,
2553                                       size_t count, loff_t *ppos)
2554{
2555        struct iwl_trans *trans = file->private_data;
2556        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2557        char *buf;
2558        int pos = 0, i, ret;
2559        size_t bufsz;
2560
2561        bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2562
2563        if (!trans_pcie->rxq)
2564                return -EAGAIN;
2565
2566        buf = kzalloc(bufsz, GFP_KERNEL);
2567        if (!buf)
2568                return -ENOMEM;
2569
2570        for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2571                struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2572
2573                pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2574                                 i);
2575                pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2576                                 rxq->read);
2577                pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2578                                 rxq->write);
2579                pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2580                                 rxq->write_actual);
2581                pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2582                                 rxq->need_update);
2583                pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2584                                 rxq->free_count);
2585                if (rxq->rb_stts) {
2586                        u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2587                                                                     rxq));
2588                        pos += scnprintf(buf + pos, bufsz - pos,
2589                                         "\tclosed_rb_num: %u\n",
2590                                         r & 0x0FFF);
2591                } else {
2592                        pos += scnprintf(buf + pos, bufsz - pos,
2593                                         "\tclosed_rb_num: Not Allocated\n");
2594                }
2595        }
2596        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2597        kfree(buf);
2598
2599        return ret;
2600}
2601
2602static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2603                                        char __user *user_buf,
2604                                        size_t count, loff_t *ppos)
2605{
2606        struct iwl_trans *trans = file->private_data;
2607        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2608        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2609
2610        int pos = 0;
2611        char *buf;
2612        int bufsz = 24 * 64; /* 24 items * 64 char per item */
2613        ssize_t ret;
2614
2615        buf = kzalloc(bufsz, GFP_KERNEL);
2616        if (!buf)
2617                return -ENOMEM;
2618
2619        pos += scnprintf(buf + pos, bufsz - pos,
2620                        "Interrupt Statistics Report:\n");
2621
2622        pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2623                isr_stats->hw);
2624        pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2625                isr_stats->sw);
2626        if (isr_stats->sw || isr_stats->hw) {
2627                pos += scnprintf(buf + pos, bufsz - pos,
2628                        "\tLast Restarting Code:  0x%X\n",
2629                        isr_stats->err_code);
2630        }
2631#ifdef CONFIG_IWLWIFI_DEBUG
2632        pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2633                isr_stats->sch);
2634        pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2635                isr_stats->alive);
2636#endif
2637        pos += scnprintf(buf + pos, bufsz - pos,
2638                "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2639
2640        pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2641                isr_stats->ctkill);
2642
2643        pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2644                isr_stats->wakeup);
2645
2646        pos += scnprintf(buf + pos, bufsz - pos,
2647                "Rx command responses:\t\t %u\n", isr_stats->rx);
2648
2649        pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2650                isr_stats->tx);
2651
2652        pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2653                isr_stats->unhandled);
2654
2655        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2656        kfree(buf);
2657        return ret;
2658}
2659
2660static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2661                                         const char __user *user_buf,
2662                                         size_t count, loff_t *ppos)
2663{
2664        struct iwl_trans *trans = file->private_data;
2665        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2666        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2667        u32 reset_flag;
2668        int ret;
2669
2670        ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2671        if (ret)
2672                return ret;
2673        if (reset_flag == 0)
2674                memset(isr_stats, 0, sizeof(*isr_stats));
2675
2676        return count;
2677}
2678
2679static ssize_t iwl_dbgfs_csr_write(struct file *file,
2680                                   const char __user *user_buf,
2681                                   size_t count, loff_t *ppos)
2682{
2683        struct iwl_trans *trans = file->private_data;
2684
2685        iwl_pcie_dump_csr(trans);
2686
2687        return count;
2688}
2689
2690static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2691                                     char __user *user_buf,
2692                                     size_t count, loff_t *ppos)
2693{
2694        struct iwl_trans *trans = file->private_data;
2695        char *buf = NULL;
2696        ssize_t ret;
2697
2698        ret = iwl_dump_fh(trans, &buf);
2699        if (ret < 0)
2700                return ret;
2701        if (!buf)
2702                return -EINVAL;
2703        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2704        kfree(buf);
2705        return ret;
2706}
2707
2708static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2709                                     char __user *user_buf,
2710                                     size_t count, loff_t *ppos)
2711{
2712        struct iwl_trans *trans = file->private_data;
2713        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2714        char buf[100];
2715        int pos;
2716
2717        pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2718                        trans_pcie->debug_rfkill,
2719                        !(iwl_read32(trans, CSR_GP_CNTRL) &
2720                                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2721
2722        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2723}
2724
2725static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2726                                      const char __user *user_buf,
2727                                      size_t count, loff_t *ppos)
2728{
2729        struct iwl_trans *trans = file->private_data;
2730        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2731        bool new_value;
2732        int ret;
2733
2734        ret = kstrtobool_from_user(user_buf, count, &new_value);
2735        if (ret)
2736                return ret;
2737        if (new_value == trans_pcie->debug_rfkill)
2738                return count;
2739        IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2740                 trans_pcie->debug_rfkill, new_value);
2741        trans_pcie->debug_rfkill = new_value;
2742        iwl_pcie_handle_rfkill_irq(trans);
2743
2744        return count;
2745}
2746
2747static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2748                                       struct file *file)
2749{
2750        struct iwl_trans *trans = inode->i_private;
2751        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2752
2753        if (!trans->dbg.dest_tlv ||
2754            trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2755                IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2756                return -ENOENT;
2757        }
2758
2759        if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2760                return -EBUSY;
2761
2762        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2763        return simple_open(inode, file);
2764}
2765
2766static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2767                                          struct file *file)
2768{
2769        struct iwl_trans_pcie *trans_pcie =
2770                IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2771
2772        if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2773                trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2774        return 0;
2775}
2776
2777static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2778                                  void *buf, ssize_t *size,
2779                                  ssize_t *bytes_copied)
2780{
2781        int buf_size_left = count - *bytes_copied;
2782
2783        buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2784        if (*size > buf_size_left)
2785                *size = buf_size_left;
2786
2787        *size -= copy_to_user(user_buf, buf, *size);
2788        *bytes_copied += *size;
2789
2790        if (buf_size_left == *size)
2791                return true;
2792        return false;
2793}
2794
2795static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2796                                           char __user *user_buf,
2797                                           size_t count, loff_t *ppos)
2798{
2799        struct iwl_trans *trans = file->private_data;
2800        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2801        void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2802        struct cont_rec *data = &trans_pcie->fw_mon_data;
2803        u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2804        ssize_t size, bytes_copied = 0;
2805        bool b_full;
2806
2807        if (trans->dbg.dest_tlv) {
2808                write_ptr_addr =
2809                        le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2810                wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2811        } else {
2812                write_ptr_addr = MON_BUFF_WRPTR;
2813                wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2814        }
2815
2816        if (unlikely(!trans->dbg.rec_on))
2817                return 0;
2818
2819        mutex_lock(&data->mutex);
2820        if (data->state ==
2821            IWL_FW_MON_DBGFS_STATE_DISABLED) {
2822                mutex_unlock(&data->mutex);
2823                return 0;
2824        }
2825
2826        /* write_ptr position in bytes rather then DW */
2827        write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2828        wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2829
2830        if (data->prev_wrap_cnt == wrap_cnt) {
2831                size = write_ptr - data->prev_wr_ptr;
2832                curr_buf = cpu_addr + data->prev_wr_ptr;
2833                b_full = iwl_write_to_user_buf(user_buf, count,
2834                                               curr_buf, &size,
2835                                               &bytes_copied);
2836                data->prev_wr_ptr += size;
2837
2838        } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2839                   write_ptr < data->prev_wr_ptr) {
2840                size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2841                curr_buf = cpu_addr + data->prev_wr_ptr;
2842                b_full = iwl_write_to_user_buf(user_buf, count,
2843                                               curr_buf, &size,
2844                                               &bytes_copied);
2845                data->prev_wr_ptr += size;
2846
2847                if (!b_full) {
2848                        size = write_ptr;
2849                        b_full = iwl_write_to_user_buf(user_buf, count,
2850                                                       cpu_addr, &size,
2851                                                       &bytes_copied);
2852                        data->prev_wr_ptr = size;
2853                        data->prev_wrap_cnt++;
2854                }
2855        } else {
2856                if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2857                    write_ptr > data->prev_wr_ptr)
2858                        IWL_WARN(trans,
2859                                 "write pointer passed previous write pointer, start copying from the beginning\n");
2860                else if (!unlikely(data->prev_wrap_cnt == 0 &&
2861                                   data->prev_wr_ptr == 0))
2862                        IWL_WARN(trans,
2863                                 "monitor data is out of sync, start copying from the beginning\n");
2864
2865                size = write_ptr;
2866                b_full = iwl_write_to_user_buf(user_buf, count,
2867                                               cpu_addr, &size,
2868                                               &bytes_copied);
2869                data->prev_wr_ptr = size;
2870                data->prev_wrap_cnt = wrap_cnt;
2871        }
2872
2873        mutex_unlock(&data->mutex);
2874
2875        return bytes_copied;
2876}
2877
2878static ssize_t iwl_dbgfs_rf_read(struct file *file,
2879                                 char __user *user_buf,
2880                                 size_t count, loff_t *ppos)
2881{
2882        struct iwl_trans *trans = file->private_data;
2883        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2884
2885        if (!trans_pcie->rf_name[0])
2886                return -ENODEV;
2887
2888        return simple_read_from_buffer(user_buf, count, ppos,
2889                                       trans_pcie->rf_name,
2890                                       strlen(trans_pcie->rf_name));
2891}
2892
2893DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2894DEBUGFS_READ_FILE_OPS(fh_reg);
2895DEBUGFS_READ_FILE_OPS(rx_queue);
2896DEBUGFS_WRITE_FILE_OPS(csr);
2897DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2898DEBUGFS_READ_FILE_OPS(rf);
2899
2900static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2901        .owner = THIS_MODULE,
2902        .open = iwl_dbgfs_tx_queue_open,
2903        .read = seq_read,
2904        .llseek = seq_lseek,
2905        .release = seq_release_private,
2906};
2907
2908static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2909        .read = iwl_dbgfs_monitor_data_read,
2910        .open = iwl_dbgfs_monitor_data_open,
2911        .release = iwl_dbgfs_monitor_data_release,
2912};
2913
2914/* Create the debugfs files and directories */
2915void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2916{
2917        struct dentry *dir = trans->dbgfs_dir;
2918
2919        DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2920        DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2921        DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2922        DEBUGFS_ADD_FILE(csr, dir, 0200);
2923        DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2924        DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2925        DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2926        DEBUGFS_ADD_FILE(rf, dir, 0400);
2927}
2928
2929static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2930{
2931        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2932        struct cont_rec *data = &trans_pcie->fw_mon_data;
2933
2934        mutex_lock(&data->mutex);
2935        data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2936        mutex_unlock(&data->mutex);
2937}
2938#endif /*CONFIG_IWLWIFI_DEBUGFS */
2939
2940static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2941{
2942        u32 cmdlen = 0;
2943        int i;
2944
2945        for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
2946                cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
2947
2948        return cmdlen;
2949}
2950
2951static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2952                                   struct iwl_fw_error_dump_data **data,
2953                                   int allocated_rb_nums)
2954{
2955        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2956        int max_len = trans_pcie->rx_buf_bytes;
2957        /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2958        struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2959        u32 i, r, j, rb_len = 0;
2960
2961        spin_lock(&rxq->lock);
2962
2963        r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2964
2965        for (i = rxq->read, j = 0;
2966             i != r && j < allocated_rb_nums;
2967             i = (i + 1) & RX_QUEUE_MASK, j++) {
2968                struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2969                struct iwl_fw_error_dump_rb *rb;
2970
2971                dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
2972                                        max_len, DMA_FROM_DEVICE);
2973
2974                rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2975
2976                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2977                (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2978                rb = (void *)(*data)->data;
2979                rb->index = cpu_to_le32(i);
2980                memcpy(rb->data, page_address(rxb->page), max_len);
2981
2982                *data = iwl_fw_error_next_data(*data);
2983        }
2984
2985        spin_unlock(&rxq->lock);
2986
2987        return rb_len;
2988}
2989#define IWL_CSR_TO_DUMP (0x250)
2990
2991static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2992                                   struct iwl_fw_error_dump_data **data)
2993{
2994        u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2995        __le32 *val;
2996        int i;
2997
2998        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2999        (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3000        val = (void *)(*data)->data;
3001
3002        for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3003                *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3004
3005        *data = iwl_fw_error_next_data(*data);
3006
3007        return csr_len;
3008}
3009
3010static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3011                                       struct iwl_fw_error_dump_data **data)
3012{
3013        u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3014        __le32 *val;
3015        int i;
3016
3017        if (!iwl_trans_grab_nic_access(trans))
3018                return 0;
3019
3020        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3021        (*data)->len = cpu_to_le32(fh_regs_len);
3022        val = (void *)(*data)->data;
3023
3024        if (!trans->trans_cfg->gen2)
3025                for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3026                     i += sizeof(u32))
3027                        *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3028        else
3029                for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3030                     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3031                     i += sizeof(u32))
3032                        *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3033                                                                      i));
3034
3035        iwl_trans_release_nic_access(trans);
3036
3037        *data = iwl_fw_error_next_data(*data);
3038
3039        return sizeof(**data) + fh_regs_len;
3040}
3041
3042static u32
3043iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3044                                 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3045                                 u32 monitor_len)
3046{
3047        u32 buf_size_in_dwords = (monitor_len >> 2);
3048        u32 *buffer = (u32 *)fw_mon_data->data;
3049        u32 i;
3050
3051        if (!iwl_trans_grab_nic_access(trans))
3052                return 0;
3053
3054        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3055        for (i = 0; i < buf_size_in_dwords; i++)
3056                buffer[i] = iwl_read_umac_prph_no_grab(trans,
3057                                                       MON_DMARB_RD_DATA_ADDR);
3058        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3059
3060        iwl_trans_release_nic_access(trans);
3061
3062        return monitor_len;
3063}
3064
3065static void
3066iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3067                             struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3068{
3069        u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3070
3071        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3072                base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3073                base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3074                write_ptr = DBGC_CUR_DBGBUF_STATUS;
3075                wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3076        } else if (trans->dbg.dest_tlv) {
3077                write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3078                wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3079                base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3080        } else {
3081                base = MON_BUFF_BASE_ADDR;
3082                write_ptr = MON_BUFF_WRPTR;
3083                wrap_cnt = MON_BUFF_CYCLE_CNT;
3084        }
3085
3086        write_ptr_val = iwl_read_prph(trans, write_ptr);
3087        fw_mon_data->fw_mon_cycle_cnt =
3088                cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3089        fw_mon_data->fw_mon_base_ptr =
3090                cpu_to_le32(iwl_read_prph(trans, base));
3091        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3092                fw_mon_data->fw_mon_base_high_ptr =
3093                        cpu_to_le32(iwl_read_prph(trans, base_high));
3094                write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3095                /* convert wrtPtr to DWs, to align with all HWs */
3096                write_ptr_val >>= 2;
3097        }
3098        fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3099}
3100
3101static u32
3102iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3103                            struct iwl_fw_error_dump_data **data,
3104                            u32 monitor_len)
3105{
3106        struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3107        u32 len = 0;
3108
3109        if (trans->dbg.dest_tlv ||
3110            (fw_mon->size &&
3111             (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3112              trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3113                struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3114
3115                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3116                fw_mon_data = (void *)(*data)->data;
3117
3118                iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3119
3120                len += sizeof(**data) + sizeof(*fw_mon_data);
3121                if (fw_mon->size) {
3122                        memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3123                        monitor_len = fw_mon->size;
3124                } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3125                        u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3126                        /*
3127                         * Update pointers to reflect actual values after
3128                         * shifting
3129                         */
3130                        if (trans->dbg.dest_tlv->version) {
3131                                base = (iwl_read_prph(trans, base) &
3132                                        IWL_LDBG_M2S_BUF_BA_MSK) <<
3133                                       trans->dbg.dest_tlv->base_shift;
3134                                base *= IWL_M2S_UNIT_SIZE;
3135                                base += trans->cfg->smem_offset;
3136                        } else {
3137                                base = iwl_read_prph(trans, base) <<
3138                                       trans->dbg.dest_tlv->base_shift;
3139                        }
3140
3141                        iwl_trans_read_mem(trans, base, fw_mon_data->data,
3142                                           monitor_len / sizeof(u32));
3143                } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3144                        monitor_len =
3145                                iwl_trans_pci_dump_marbh_monitor(trans,
3146                                                                 fw_mon_data,
3147                                                                 monitor_len);
3148                } else {
3149                        /* Didn't match anything - output no monitor data */
3150                        monitor_len = 0;
3151                }
3152
3153                len += monitor_len;
3154                (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3155        }
3156
3157        return len;
3158}
3159
3160static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3161{
3162        if (trans->dbg.fw_mon.size) {
3163                *len += sizeof(struct iwl_fw_error_dump_data) +
3164                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3165                        trans->dbg.fw_mon.size;
3166                return trans->dbg.fw_mon.size;
3167        } else if (trans->dbg.dest_tlv) {
3168                u32 base, end, cfg_reg, monitor_len;
3169
3170                if (trans->dbg.dest_tlv->version == 1) {
3171                        cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3172                        cfg_reg = iwl_read_prph(trans, cfg_reg);
3173                        base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3174                                trans->dbg.dest_tlv->base_shift;
3175                        base *= IWL_M2S_UNIT_SIZE;
3176                        base += trans->cfg->smem_offset;
3177
3178                        monitor_len =
3179                                (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3180                                trans->dbg.dest_tlv->end_shift;
3181                        monitor_len *= IWL_M2S_UNIT_SIZE;
3182                } else {
3183                        base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3184                        end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3185
3186                        base = iwl_read_prph(trans, base) <<
3187                               trans->dbg.dest_tlv->base_shift;
3188                        end = iwl_read_prph(trans, end) <<
3189                              trans->dbg.dest_tlv->end_shift;
3190
3191                        /* Make "end" point to the actual end */
3192                        if (trans->trans_cfg->device_family >=
3193                            IWL_DEVICE_FAMILY_8000 ||
3194                            trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3195                                end += (1 << trans->dbg.dest_tlv->end_shift);
3196                        monitor_len = end - base;
3197                }
3198                *len += sizeof(struct iwl_fw_error_dump_data) +
3199                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3200                        monitor_len;
3201                return monitor_len;
3202        }
3203        return 0;
3204}
3205
3206static struct iwl_trans_dump_data
3207*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3208                          u32 dump_mask)
3209{
3210        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3211        struct iwl_fw_error_dump_data *data;
3212        struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3213        struct iwl_fw_error_dump_txcmd *txcmd;
3214        struct iwl_trans_dump_data *dump_data;
3215        u32 len, num_rbs = 0, monitor_len = 0;
3216        int i, ptr;
3217        bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3218                        !trans->trans_cfg->mq_rx_supported &&
3219                        dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3220
3221        if (!dump_mask)
3222                return NULL;
3223
3224        /* transport dump header */
3225        len = sizeof(*dump_data);
3226
3227        /* host commands */
3228        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3229                len += sizeof(*data) +
3230                        cmdq->n_window * (sizeof(*txcmd) +
3231                                          TFD_MAX_PAYLOAD_SIZE);
3232
3233        /* FW monitor */
3234        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3235                monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3236
3237        /* CSR registers */
3238        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3239                len += sizeof(*data) + IWL_CSR_TO_DUMP;
3240
3241        /* FH registers */
3242        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3243                if (trans->trans_cfg->gen2)
3244                        len += sizeof(*data) +
3245                               (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3246                                iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3247                else
3248                        len += sizeof(*data) +
3249                               (FH_MEM_UPPER_BOUND -
3250                                FH_MEM_LOWER_BOUND);
3251        }
3252
3253        if (dump_rbs) {
3254                /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3255                struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3256                /* RBs */
3257                num_rbs =
3258                        le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3259                        & 0x0FFF;
3260                num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3261                len += num_rbs * (sizeof(*data) +
3262                                  sizeof(struct iwl_fw_error_dump_rb) +
3263                                  (PAGE_SIZE << trans_pcie->rx_page_order));
3264        }
3265
3266        /* Paged memory for gen2 HW */
3267        if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3268                for (i = 0; i < trans->init_dram.paging_cnt; i++)
3269                        len += sizeof(*data) +
3270                               sizeof(struct iwl_fw_error_dump_paging) +
3271                               trans->init_dram.paging[i].size;
3272
3273        dump_data = vzalloc(len);
3274        if (!dump_data)
3275                return NULL;
3276
3277        len = 0;
3278        data = (void *)dump_data->data;
3279
3280        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3281                u16 tfd_size = trans->txqs.tfd.size;
3282
3283                data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3284                txcmd = (void *)data->data;
3285                spin_lock_bh(&cmdq->lock);
3286                ptr = cmdq->write_ptr;
3287                for (i = 0; i < cmdq->n_window; i++) {
3288                        u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3289                        u8 tfdidx;
3290                        u32 caplen, cmdlen;
3291
3292                        if (trans->trans_cfg->use_tfh)
3293                                tfdidx = idx;
3294                        else
3295                                tfdidx = ptr;
3296
3297                        cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3298                                                           (u8 *)cmdq->tfds +
3299                                                           tfd_size * tfdidx);
3300                        caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3301
3302                        if (cmdlen) {
3303                                len += sizeof(*txcmd) + caplen;
3304                                txcmd->cmdlen = cpu_to_le32(cmdlen);
3305                                txcmd->caplen = cpu_to_le32(caplen);
3306                                memcpy(txcmd->data, cmdq->entries[idx].cmd,
3307                                       caplen);
3308                                txcmd = (void *)((u8 *)txcmd->data + caplen);
3309                        }
3310
3311                        ptr = iwl_txq_dec_wrap(trans, ptr);
3312                }
3313                spin_unlock_bh(&cmdq->lock);
3314
3315                data->len = cpu_to_le32(len);
3316                len += sizeof(*data);
3317                data = iwl_fw_error_next_data(data);
3318        }
3319
3320        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3321                len += iwl_trans_pcie_dump_csr(trans, &data);
3322        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3323                len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3324        if (dump_rbs)
3325                len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3326
3327        /* Paged memory for gen2 HW */
3328        if (trans->trans_cfg->gen2 &&
3329            dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3330                for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3331                        struct iwl_fw_error_dump_paging *paging;
3332                        u32 page_len = trans->init_dram.paging[i].size;
3333
3334                        data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3335                        data->len = cpu_to_le32(sizeof(*paging) + page_len);
3336                        paging = (void *)data->data;
3337                        paging->index = cpu_to_le32(i);
3338                        memcpy(paging->data,
3339                               trans->init_dram.paging[i].block, page_len);
3340                        data = iwl_fw_error_next_data(data);
3341
3342                        len += sizeof(*data) + sizeof(*paging) + page_len;
3343                }
3344        }
3345        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3346                len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3347
3348        dump_data->len = len;
3349
3350        return dump_data;
3351}
3352
3353static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3354{
3355        if (enable)
3356                iwl_enable_interrupts(trans);
3357        else
3358                iwl_disable_interrupts(trans);
3359}
3360
3361static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3362{
3363        u32 inta_addr, sw_err_bit;
3364        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3365
3366        if (trans_pcie->msix_enabled) {
3367                inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3368                sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3369        } else {
3370                inta_addr = CSR_INT;
3371                sw_err_bit = CSR_INT_BIT_SW_ERR;
3372        }
3373
3374        iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3375}
3376
3377#define IWL_TRANS_COMMON_OPS                                            \
3378        .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
3379        .write8 = iwl_trans_pcie_write8,                                \
3380        .write32 = iwl_trans_pcie_write32,                              \
3381        .read32 = iwl_trans_pcie_read32,                                \
3382        .read_prph = iwl_trans_pcie_read_prph,                          \
3383        .write_prph = iwl_trans_pcie_write_prph,                        \
3384        .read_mem = iwl_trans_pcie_read_mem,                            \
3385        .write_mem = iwl_trans_pcie_write_mem,                          \
3386        .read_config32 = iwl_trans_pcie_read_config32,                  \
3387        .configure = iwl_trans_pcie_configure,                          \
3388        .set_pmi = iwl_trans_pcie_set_pmi,                              \
3389        .sw_reset = iwl_trans_pcie_sw_reset,                            \
3390        .grab_nic_access = iwl_trans_pcie_grab_nic_access,              \
3391        .release_nic_access = iwl_trans_pcie_release_nic_access,        \
3392        .set_bits_mask = iwl_trans_pcie_set_bits_mask,                  \
3393        .dump_data = iwl_trans_pcie_dump_data,                          \
3394        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
3395        .d3_resume = iwl_trans_pcie_d3_resume,                          \
3396        .interrupts = iwl_trans_pci_interrupts,                         \
3397        .sync_nmi = iwl_trans_pcie_sync_nmi                             \
3398
3399static const struct iwl_trans_ops trans_ops_pcie = {
3400        IWL_TRANS_COMMON_OPS,
3401        .start_hw = iwl_trans_pcie_start_hw,
3402        .fw_alive = iwl_trans_pcie_fw_alive,
3403        .start_fw = iwl_trans_pcie_start_fw,
3404        .stop_device = iwl_trans_pcie_stop_device,
3405
3406        .send_cmd = iwl_pcie_enqueue_hcmd,
3407
3408        .tx = iwl_trans_pcie_tx,
3409        .reclaim = iwl_txq_reclaim,
3410
3411        .txq_disable = iwl_trans_pcie_txq_disable,
3412        .txq_enable = iwl_trans_pcie_txq_enable,
3413
3414        .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3415
3416        .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3417
3418        .freeze_txq_timer = iwl_trans_txq_freeze_timer,
3419        .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3420#ifdef CONFIG_IWLWIFI_DEBUGFS
3421        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3422#endif
3423};
3424
3425static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3426        IWL_TRANS_COMMON_OPS,
3427        .start_hw = iwl_trans_pcie_start_hw,
3428        .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3429        .start_fw = iwl_trans_pcie_gen2_start_fw,
3430        .stop_device = iwl_trans_pcie_gen2_stop_device,
3431
3432        .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
3433
3434        .tx = iwl_txq_gen2_tx,
3435        .reclaim = iwl_txq_reclaim,
3436
3437        .set_q_ptrs = iwl_txq_set_q_ptrs,
3438
3439        .txq_alloc = iwl_txq_dyn_alloc,
3440        .txq_free = iwl_txq_dyn_free,
3441        .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3442        .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3443        .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3444        .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
3445#ifdef CONFIG_IWLWIFI_DEBUGFS
3446        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3447#endif
3448};
3449
3450struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3451                               const struct pci_device_id *ent,
3452                               const struct iwl_cfg_trans_params *cfg_trans)
3453{
3454        struct iwl_trans_pcie *trans_pcie;
3455        struct iwl_trans *trans;
3456        int ret, addr_size;
3457        const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3458        void __iomem * const *table;
3459
3460        if (!cfg_trans->gen2)
3461                ops = &trans_ops_pcie;
3462
3463        ret = pcim_enable_device(pdev);
3464        if (ret)
3465                return ERR_PTR(ret);
3466
3467        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3468                                cfg_trans);
3469        if (!trans)
3470                return ERR_PTR(-ENOMEM);
3471
3472        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3473
3474        trans_pcie->trans = trans;
3475        trans_pcie->opmode_down = true;
3476        spin_lock_init(&trans_pcie->irq_lock);
3477        spin_lock_init(&trans_pcie->reg_lock);
3478        spin_lock_init(&trans_pcie->alloc_page_lock);
3479        mutex_init(&trans_pcie->mutex);
3480        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3481        init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3482
3483        trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3484                                                   WQ_HIGHPRI | WQ_UNBOUND, 1);
3485        if (!trans_pcie->rba.alloc_wq) {
3486                ret = -ENOMEM;
3487                goto out_free_trans;
3488        }
3489        INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3490
3491        trans_pcie->debug_rfkill = -1;
3492
3493        if (!cfg_trans->base_params->pcie_l1_allowed) {
3494                /*
3495                 * W/A - seems to solve weird behavior. We need to remove this
3496                 * if we don't want to stay in L1 all the time. This wastes a
3497                 * lot of power.
3498                 */
3499                pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3500                                       PCIE_LINK_STATE_L1 |
3501                                       PCIE_LINK_STATE_CLKPM);
3502        }
3503
3504        trans_pcie->def_rx_queue = 0;
3505
3506        pci_set_master(pdev);
3507
3508        addr_size = trans->txqs.tfd.addr_size;
3509        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3510        if (ret) {
3511                ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3512                /* both attempts failed: */
3513                if (ret) {
3514                        dev_err(&pdev->dev, "No suitable DMA available\n");
3515                        goto out_no_pci;
3516                }
3517        }
3518
3519        ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3520        if (ret) {
3521                dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3522                goto out_no_pci;
3523        }
3524
3525        table = pcim_iomap_table(pdev);
3526        if (!table) {
3527                dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3528                ret = -ENOMEM;
3529                goto out_no_pci;
3530        }
3531
3532        trans_pcie->hw_base = table[0];
3533        if (!trans_pcie->hw_base) {
3534                dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
3535                ret = -ENODEV;
3536                goto out_no_pci;
3537        }
3538
3539        /* We disable the RETRY_TIMEOUT register (0x41) to keep
3540         * PCI Tx retries from interfering with C3 CPU state */
3541        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3542
3543        trans_pcie->pci_dev = pdev;
3544        iwl_disable_interrupts(trans);
3545
3546        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3547        if (trans->hw_rev == 0xffffffff) {
3548                dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3549                ret = -EIO;
3550                goto out_no_pci;
3551        }
3552
3553        /*
3554         * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3555         * changed, and now the revision step also includes bit 0-1 (no more
3556         * "dash" value). To keep hw_rev backwards compatible - we'll store it
3557         * in the old format.
3558         */
3559        if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3560                trans->hw_rev = (trans->hw_rev & 0xfff0) |
3561                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3562
3563        IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3564
3565        iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3566        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3567        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3568                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3569
3570        init_waitqueue_head(&trans_pcie->sx_waitq);
3571
3572
3573        if (trans_pcie->msix_enabled) {
3574                ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3575                if (ret)
3576                        goto out_no_pci;
3577         } else {
3578                ret = iwl_pcie_alloc_ict(trans);
3579                if (ret)
3580                        goto out_no_pci;
3581
3582                ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3583                                                iwl_pcie_isr,
3584                                                iwl_pcie_irq_handler,
3585                                                IRQF_SHARED, DRV_NAME, trans);
3586                if (ret) {
3587                        IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3588                        goto out_free_ict;
3589                }
3590         }
3591
3592#ifdef CONFIG_IWLWIFI_DEBUGFS
3593        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3594        mutex_init(&trans_pcie->fw_mon_data.mutex);
3595#endif
3596
3597        iwl_dbg_tlv_init(trans);
3598
3599        return trans;
3600
3601out_free_ict:
3602        iwl_pcie_free_ict(trans);
3603out_no_pci:
3604        destroy_workqueue(trans_pcie->rba.alloc_wq);
3605out_free_trans:
3606        iwl_trans_free(trans);
3607        return ERR_PTR(ret);
3608}
3609