linux/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 * Copyright(c) 2018 - 2019 Intel Corporation
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of version 2 of the GNU General Public License as
  15 * published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * The full GNU General Public License is included in this distribution
  23 * in the file called COPYING.
  24 *
  25 * Contact Information:
  26 *  Intel Linux Wireless <linuxwifi@intel.com>
  27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28 *
  29 * BSD LICENSE
  30 *
  31 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
  32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  34 * Copyright(c) 2018 - 2019 Intel Corporation
  35 * All rights reserved.
  36 *
  37 * Redistribution and use in source and binary forms, with or without
  38 * modification, are permitted provided that the following conditions
  39 * are met:
  40 *
  41 *  * Redistributions of source code must retain the above copyright
  42 *    notice, this list of conditions and the following disclaimer.
  43 *  * Redistributions in binary form must reproduce the above copyright
  44 *    notice, this list of conditions and the following disclaimer in
  45 *    the documentation and/or other materials provided with the
  46 *    distribution.
  47 *  * Neither the name Intel Corporation nor the names of its
  48 *    contributors may be used to endorse or promote products derived
  49 *    from this software without specific prior written permission.
  50 *
  51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  62 *
  63 *****************************************************************************/
  64#include <linux/pci.h>
  65#include <linux/pci-aspm.h>
  66#include <linux/interrupt.h>
  67#include <linux/debugfs.h>
  68#include <linux/sched.h>
  69#include <linux/bitops.h>
  70#include <linux/gfp.h>
  71#include <linux/vmalloc.h>
  72#include <linux/pm_runtime.h>
  73#include <linux/module.h>
  74#include <linux/wait.h>
  75
  76#include "iwl-drv.h"
  77#include "iwl-trans.h"
  78#include "iwl-csr.h"
  79#include "iwl-prph.h"
  80#include "iwl-scd.h"
  81#include "iwl-agn-hw.h"
  82#include "fw/error-dump.h"
  83#include "fw/dbg.h"
  84#include "internal.h"
  85#include "iwl-fh.h"
  86
  87/* extended range in FW SRAM */
  88#define IWL_FW_MEM_EXTENDED_START       0x40000
  89#define IWL_FW_MEM_EXTENDED_END         0x57FFF
  90
  91void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
  92{
  93#define PCI_DUMP_SIZE   64
  94#define PREFIX_LEN      32
  95        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  96        struct pci_dev *pdev = trans_pcie->pci_dev;
  97        u32 i, pos, alloc_size, *ptr, *buf;
  98        char *prefix;
  99
 100        if (trans_pcie->pcie_dbg_dumped_once)
 101                return;
 102
 103        /* Should be a multiple of 4 */
 104        BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
 105        /* Alloc a max size buffer */
 106        if (PCI_ERR_ROOT_ERR_SRC +  4 > PCI_DUMP_SIZE)
 107                alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
 108        else
 109                alloc_size = PCI_DUMP_SIZE + PREFIX_LEN;
 110        buf = kmalloc(alloc_size, GFP_ATOMIC);
 111        if (!buf)
 112                return;
 113        prefix = (char *)buf + alloc_size - PREFIX_LEN;
 114
 115        IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
 116
 117        /* Print wifi device registers */
 118        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 119        IWL_ERR(trans, "iwlwifi device config registers:\n");
 120        for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
 121                if (pci_read_config_dword(pdev, i, ptr))
 122                        goto err_read;
 123        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 124
 125        IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
 126        for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
 127                *ptr = iwl_read32(trans, i);
 128        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 129
 130        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 131        if (pos) {
 132                IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
 133                for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
 134                        if (pci_read_config_dword(pdev, pos + i, ptr))
 135                                goto err_read;
 136                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
 137                               32, 4, buf, i, 0);
 138        }
 139
 140        /* Print parent device registers next */
 141        if (!pdev->bus->self)
 142                goto out;
 143
 144        pdev = pdev->bus->self;
 145        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 146
 147        IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
 148                pci_name(pdev));
 149        for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
 150                if (pci_read_config_dword(pdev, i, ptr))
 151                        goto err_read;
 152        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 153
 154        /* Print root port AER registers */
 155        pos = 0;
 156        pdev = pcie_find_root_port(pdev);
 157        if (pdev)
 158                pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 159        if (pos) {
 160                IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
 161                        pci_name(pdev));
 162                sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 163                for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
 164                        if (pci_read_config_dword(pdev, pos + i, ptr))
 165                                goto err_read;
 166                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
 167                               4, buf, i, 0);
 168        }
 169        goto out;
 170
 171err_read:
 172        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 173        IWL_ERR(trans, "Read failed at 0x%X\n", i);
 174out:
 175        trans_pcie->pcie_dbg_dumped_once = 1;
 176        kfree(buf);
 177}
 178
 179static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
 180{
 181        /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
 182        iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
 183                    BIT(trans->cfg->csr->flag_sw_reset));
 184        usleep_range(5000, 6000);
 185}
 186
 187static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 188{
 189        int i;
 190
 191        for (i = 0; i < trans->num_blocks; i++) {
 192                dma_free_coherent(trans->dev, trans->fw_mon[i].size,
 193                                  trans->fw_mon[i].block,
 194                                  trans->fw_mon[i].physical);
 195                trans->fw_mon[i].block = NULL;
 196                trans->fw_mon[i].physical = 0;
 197                trans->fw_mon[i].size = 0;
 198                trans->num_blocks--;
 199        }
 200}
 201
 202static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
 203                                            u8 max_power, u8 min_power)
 204{
 205        void *cpu_addr = NULL;
 206        dma_addr_t phys = 0;
 207        u32 size = 0;
 208        u8 power;
 209
 210        for (power = max_power; power >= min_power; power--) {
 211                size = BIT(power);
 212                cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
 213                                              GFP_KERNEL | __GFP_NOWARN |
 214                                              __GFP_ZERO | __GFP_COMP);
 215                if (!cpu_addr)
 216                        continue;
 217
 218                IWL_INFO(trans,
 219                         "Allocated 0x%08x bytes for firmware monitor.\n",
 220                         size);
 221                break;
 222        }
 223
 224        if (WARN_ON_ONCE(!cpu_addr))
 225                return;
 226
 227        if (power != max_power)
 228                IWL_ERR(trans,
 229                        "Sorry - debug buffer is only %luK while you requested %luK\n",
 230                        (unsigned long)BIT(power - 10),
 231                        (unsigned long)BIT(max_power - 10));
 232
 233        trans->fw_mon[trans->num_blocks].block = cpu_addr;
 234        trans->fw_mon[trans->num_blocks].physical = phys;
 235        trans->fw_mon[trans->num_blocks].size = size;
 236        trans->num_blocks++;
 237}
 238
 239void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 240{
 241        if (!max_power) {
 242                /* default max_power is maximum */
 243                max_power = 26;
 244        } else {
 245                max_power += 11;
 246        }
 247
 248        if (WARN(max_power > 26,
 249                 "External buffer size for monitor is too big %d, check the FW TLV\n",
 250                 max_power))
 251                return;
 252
 253        /*
 254         * This function allocats the default fw monitor.
 255         * The optional additional ones will be allocated in runtime
 256         */
 257        if (trans->num_blocks)
 258                return;
 259
 260        iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
 261}
 262
 263static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
 264{
 265        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 266                    ((reg & 0x0000ffff) | (2 << 28)));
 267        return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
 268}
 269
 270static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 271{
 272        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
 273        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 274                    ((reg & 0x0000ffff) | (3 << 28)));
 275}
 276
 277static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 278{
 279        if (trans->cfg->apmg_not_supported)
 280                return;
 281
 282        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
 283                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 284                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
 285                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 286        else
 287                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 288                                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
 289                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 290}
 291
 292/* PCI registers */
 293#define PCI_CFG_RETRY_TIMEOUT   0x041
 294
 295void iwl_pcie_apm_config(struct iwl_trans *trans)
 296{
 297        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 298        u16 lctl;
 299        u16 cap;
 300
 301        /*
 302         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
 303         * Check if BIOS (or OS) enabled L1-ASPM on this device.
 304         * If so (likely), disable L0S, so device moves directly L0->L1;
 305         *    costs negligible amount of power savings.
 306         * If not (unlikely), enable L0S, so there is at least some
 307         *    power savings, even without L1.
 308         */
 309        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
 310        if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
 311                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 312        else
 313                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 314        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 315
 316        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
 317        trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
 318        IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
 319                        (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
 320                        trans->ltr_enabled ? "En" : "Dis");
 321}
 322
 323/*
 324 * Start up NIC's basic functionality after it has been reset
 325 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
 326 * NOTE:  This does not load uCode nor start the embedded processor
 327 */
 328static int iwl_pcie_apm_init(struct iwl_trans *trans)
 329{
 330        int ret;
 331
 332        IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
 333
 334        /*
 335         * Use "set_bit" below rather than "write", to preserve any hardware
 336         * bits already set by default after reset.
 337         */
 338
 339        /* Disable L0S exit timer (platform NMI Work/Around) */
 340        if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
 341                iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 342                            CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
 343
 344        /*
 345         * Disable L0s without affecting L1;
 346         *  don't wait for ICH L0s (ICH bug W/A)
 347         */
 348        iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 349                    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
 350
 351        /* Set FH wait threshold to maximum (HW error during stress W/A) */
 352        iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
 353
 354        /*
 355         * Enable HAP INTA (interrupt from management bus) to
 356         * wake device's PCI Express link L1a -> L0s
 357         */
 358        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 359                    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
 360
 361        iwl_pcie_apm_config(trans);
 362
 363        /* Configure analog phase-lock-loop before activating to D0A */
 364        if (trans->cfg->base_params->pll_cfg)
 365                iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
 366
 367        ret = iwl_finish_nic_init(trans);
 368        if (ret)
 369                return ret;
 370
 371        if (trans->cfg->host_interrupt_operation_mode) {
 372                /*
 373                 * This is a bit of an abuse - This is needed for 7260 / 3160
 374                 * only check host_interrupt_operation_mode even if this is
 375                 * not related to host_interrupt_operation_mode.
 376                 *
 377                 * Enable the oscillator to count wake up time for L1 exit. This
 378                 * consumes slightly more power (100uA) - but allows to be sure
 379                 * that we wake up from L1 on time.
 380                 *
 381                 * This looks weird: read twice the same register, discard the
 382                 * value, set a bit, and yet again, read that same register
 383                 * just to discard the value. But that's the way the hardware
 384                 * seems to like it.
 385                 */
 386                iwl_read_prph(trans, OSC_CLK);
 387                iwl_read_prph(trans, OSC_CLK);
 388                iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
 389                iwl_read_prph(trans, OSC_CLK);
 390                iwl_read_prph(trans, OSC_CLK);
 391        }
 392
 393        /*
 394         * Enable DMA clock and wait for it to stabilize.
 395         *
 396         * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
 397         * bits do not disable clocks.  This preserves any hardware
 398         * bits already set by default in "CLK_CTRL_REG" after reset.
 399         */
 400        if (!trans->cfg->apmg_not_supported) {
 401                iwl_write_prph(trans, APMG_CLK_EN_REG,
 402                               APMG_CLK_VAL_DMA_CLK_RQT);
 403                udelay(20);
 404
 405                /* Disable L1-Active */
 406                iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 407                                  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 408
 409                /* Clear the interrupt in APMG if the NIC is in RFKILL */
 410                iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
 411                               APMG_RTC_INT_STT_RFKILL);
 412        }
 413
 414        set_bit(STATUS_DEVICE_ENABLED, &trans->status);
 415
 416        return 0;
 417}
 418
 419/*
 420 * Enable LP XTAL to avoid HW bug where device may consume much power if
 421 * FW is not loaded after device reset. LP XTAL is disabled by default
 422 * after device HW reset. Do it only if XTAL is fed by internal source.
 423 * Configure device's "persistence" mode to avoid resetting XTAL again when
 424 * SHRD_HW_RST occurs in S3.
 425 */
 426static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
 427{
 428        int ret;
 429        u32 apmg_gp1_reg;
 430        u32 apmg_xtal_cfg_reg;
 431        u32 dl_cfg_reg;
 432
 433        /* Force XTAL ON */
 434        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 435                                 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 436
 437        iwl_trans_pcie_sw_reset(trans);
 438
 439        ret = iwl_finish_nic_init(trans);
 440        if (WARN_ON(ret)) {
 441                /* Release XTAL ON request */
 442                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 443                                           CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 444                return;
 445        }
 446
 447        /*
 448         * Clear "disable persistence" to avoid LP XTAL resetting when
 449         * SHRD_HW_RST is applied in S3.
 450         */
 451        iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
 452                                    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
 453
 454        /*
 455         * Force APMG XTAL to be active to prevent its disabling by HW
 456         * caused by APMG idle state.
 457         */
 458        apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
 459                                                    SHR_APMG_XTAL_CFG_REG);
 460        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 461                                 apmg_xtal_cfg_reg |
 462                                 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 463
 464        iwl_trans_pcie_sw_reset(trans);
 465
 466        /* Enable LP XTAL by indirect access through CSR */
 467        apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
 468        iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
 469                                 SHR_APMG_GP1_WF_XTAL_LP_EN |
 470                                 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
 471
 472        /* Clear delay line clock power up */
 473        dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
 474        iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
 475                                 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
 476
 477        /*
 478         * Enable persistence mode to avoid LP XTAL resetting when
 479         * SHRD_HW_RST is applied in S3.
 480         */
 481        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 482                    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
 483
 484        /*
 485         * Clear "initialization complete" bit to move adapter from
 486         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 487         */
 488        iwl_clear_bit(trans, CSR_GP_CNTRL,
 489                      BIT(trans->cfg->csr->flag_init_done));
 490
 491        /* Activates XTAL resources monitor */
 492        __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
 493                                 CSR_MONITOR_XTAL_RESOURCES);
 494
 495        /* Release XTAL ON request */
 496        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 497                                   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 498        udelay(10);
 499
 500        /* Release APMG XTAL */
 501        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 502                                 apmg_xtal_cfg_reg &
 503                                 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 504}
 505
 506void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 507{
 508        int ret;
 509
 510        /* stop device's busmaster DMA activity */
 511        iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
 512                    BIT(trans->cfg->csr->flag_stop_master));
 513
 514        ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
 515                           BIT(trans->cfg->csr->flag_master_dis),
 516                           BIT(trans->cfg->csr->flag_master_dis), 100);
 517        if (ret < 0)
 518                IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
 519
 520        IWL_DEBUG_INFO(trans, "stop master\n");
 521}
 522
 523static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 524{
 525        IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
 526
 527        if (op_mode_leave) {
 528                if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
 529                        iwl_pcie_apm_init(trans);
 530
 531                /* inform ME that we are leaving */
 532                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
 533                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 534                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
 535                else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
 536                        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 537                                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 538                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 539                                    CSR_HW_IF_CONFIG_REG_PREPARE |
 540                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
 541                        mdelay(1);
 542                        iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 543                                      CSR_RESET_LINK_PWR_MGMT_DISABLED);
 544                }
 545                mdelay(5);
 546        }
 547
 548        clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
 549
 550        /* Stop device's DMA activity */
 551        iwl_pcie_apm_stop_master(trans);
 552
 553        if (trans->cfg->lp_xtal_workaround) {
 554                iwl_pcie_apm_lp_xtal_enable(trans);
 555                return;
 556        }
 557
 558        iwl_trans_pcie_sw_reset(trans);
 559
 560        /*
 561         * Clear "initialization complete" bit to move adapter from
 562         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 563         */
 564        iwl_clear_bit(trans, CSR_GP_CNTRL,
 565                      BIT(trans->cfg->csr->flag_init_done));
 566}
 567
 568static int iwl_pcie_nic_init(struct iwl_trans *trans)
 569{
 570        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 571        int ret;
 572
 573        /* nic_init */
 574        spin_lock(&trans_pcie->irq_lock);
 575        ret = iwl_pcie_apm_init(trans);
 576        spin_unlock(&trans_pcie->irq_lock);
 577
 578        if (ret)
 579                return ret;
 580
 581        iwl_pcie_set_pwr(trans, false);
 582
 583        iwl_op_mode_nic_config(trans->op_mode);
 584
 585        /* Allocate the RX queue, or reset if it is already allocated */
 586        iwl_pcie_rx_init(trans);
 587
 588        /* Allocate or reset and init all Tx and Command queues */
 589        if (iwl_pcie_tx_init(trans))
 590                return -ENOMEM;
 591
 592        if (trans->cfg->base_params->shadow_reg_enable) {
 593                /* enable shadow regs in HW */
 594                iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
 595                IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
 596        }
 597
 598        return 0;
 599}
 600
 601#define HW_READY_TIMEOUT (50)
 602
 603/* Note: returns poll_bit return value, which is >= 0 if success */
 604static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
 605{
 606        int ret;
 607
 608        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 609                    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
 610
 611        /* See if we got it */
 612        ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
 613                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 614                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 615                           HW_READY_TIMEOUT);
 616
 617        if (ret >= 0)
 618                iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
 619
 620        IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
 621        return ret;
 622}
 623
 624/* Note: returns standard 0/-ERROR code */
 625int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 626{
 627        int ret;
 628        int t = 0;
 629        int iter;
 630
 631        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 632
 633        ret = iwl_pcie_set_hw_ready(trans);
 634        /* If the card is ready, exit 0 */
 635        if (ret >= 0)
 636                return 0;
 637
 638        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 639                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 640        usleep_range(1000, 2000);
 641
 642        for (iter = 0; iter < 10; iter++) {
 643                /* If HW is not ready, prepare the conditions to check again */
 644                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 645                            CSR_HW_IF_CONFIG_REG_PREPARE);
 646
 647                do {
 648                        ret = iwl_pcie_set_hw_ready(trans);
 649                        if (ret >= 0)
 650                                return 0;
 651
 652                        usleep_range(200, 1000);
 653                        t += 200;
 654                } while (t < 150000);
 655                msleep(25);
 656        }
 657
 658        IWL_ERR(trans, "Couldn't prepare the card\n");
 659
 660        return ret;
 661}
 662
 663/*
 664 * ucode
 665 */
 666static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
 667                                            u32 dst_addr, dma_addr_t phy_addr,
 668                                            u32 byte_cnt)
 669{
 670        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 671                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
 672
 673        iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
 674                    dst_addr);
 675
 676        iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
 677                    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 678
 679        iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
 680                    (iwl_get_dma_hi_addr(phy_addr)
 681                        << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
 682
 683        iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
 684                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
 685                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
 686                    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
 687
 688        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 689                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
 690                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
 691                    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 692}
 693
 694static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
 695                                        u32 dst_addr, dma_addr_t phy_addr,
 696                                        u32 byte_cnt)
 697{
 698        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 699        unsigned long flags;
 700        int ret;
 701
 702        trans_pcie->ucode_write_complete = false;
 703
 704        if (!iwl_trans_grab_nic_access(trans, &flags))
 705                return -EIO;
 706
 707        iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
 708                                        byte_cnt);
 709        iwl_trans_release_nic_access(trans, &flags);
 710
 711        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
 712                                 trans_pcie->ucode_write_complete, 5 * HZ);
 713        if (!ret) {
 714                IWL_ERR(trans, "Failed to load firmware chunk!\n");
 715                iwl_trans_pcie_dump_regs(trans);
 716                return -ETIMEDOUT;
 717        }
 718
 719        return 0;
 720}
 721
 722static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 723                            const struct fw_desc *section)
 724{
 725        u8 *v_addr;
 726        dma_addr_t p_addr;
 727        u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
 728        int ret = 0;
 729
 730        IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
 731                     section_num);
 732
 733        v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
 734                                    GFP_KERNEL | __GFP_NOWARN);
 735        if (!v_addr) {
 736                IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
 737                chunk_sz = PAGE_SIZE;
 738                v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
 739                                            &p_addr, GFP_KERNEL);
 740                if (!v_addr)
 741                        return -ENOMEM;
 742        }
 743
 744        for (offset = 0; offset < section->len; offset += chunk_sz) {
 745                u32 copy_size, dst_addr;
 746                bool extended_addr = false;
 747
 748                copy_size = min_t(u32, chunk_sz, section->len - offset);
 749                dst_addr = section->offset + offset;
 750
 751                if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
 752                    dst_addr <= IWL_FW_MEM_EXTENDED_END)
 753                        extended_addr = true;
 754
 755                if (extended_addr)
 756                        iwl_set_bits_prph(trans, LMPM_CHICK,
 757                                          LMPM_CHICK_EXTENDED_ADDR_SPACE);
 758
 759                memcpy(v_addr, (u8 *)section->data + offset, copy_size);
 760                ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
 761                                                   copy_size);
 762
 763                if (extended_addr)
 764                        iwl_clear_bits_prph(trans, LMPM_CHICK,
 765                                            LMPM_CHICK_EXTENDED_ADDR_SPACE);
 766
 767                if (ret) {
 768                        IWL_ERR(trans,
 769                                "Could not load the [%d] uCode section\n",
 770                                section_num);
 771                        break;
 772                }
 773        }
 774
 775        dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
 776        return ret;
 777}
 778
 779static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 780                                           const struct fw_img *image,
 781                                           int cpu,
 782                                           int *first_ucode_section)
 783{
 784        int shift_param;
 785        int i, ret = 0, sec_num = 0x1;
 786        u32 val, last_read_idx = 0;
 787
 788        if (cpu == 1) {
 789                shift_param = 0;
 790                *first_ucode_section = 0;
 791        } else {
 792                shift_param = 16;
 793                (*first_ucode_section)++;
 794        }
 795
 796        for (i = *first_ucode_section; i < image->num_sec; i++) {
 797                last_read_idx = i;
 798
 799                /*
 800                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 801                 * CPU1 to CPU2.
 802                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 803                 * CPU2 non paged to CPU2 paging sec.
 804                 */
 805                if (!image->sec[i].data ||
 806                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 807                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 808                        IWL_DEBUG_FW(trans,
 809                                     "Break since Data not valid or Empty section, sec = %d\n",
 810                                     i);
 811                        break;
 812                }
 813
 814                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 815                if (ret)
 816                        return ret;
 817
 818                /* Notify ucode of loaded section number and status */
 819                val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
 820                val = val | (sec_num << shift_param);
 821                iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
 822
 823                sec_num = (sec_num << 1) | 0x1;
 824        }
 825
 826        *first_ucode_section = last_read_idx;
 827
 828        iwl_enable_interrupts(trans);
 829
 830        if (trans->cfg->use_tfh) {
 831                if (cpu == 1)
 832                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 833                                       0xFFFF);
 834                else
 835                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 836                                       0xFFFFFFFF);
 837        } else {
 838                if (cpu == 1)
 839                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 840                                           0xFFFF);
 841                else
 842                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 843                                           0xFFFFFFFF);
 844        }
 845
 846        return 0;
 847}
 848
 849static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 850                                      const struct fw_img *image,
 851                                      int cpu,
 852                                      int *first_ucode_section)
 853{
 854        int i, ret = 0;
 855        u32 last_read_idx = 0;
 856
 857        if (cpu == 1)
 858                *first_ucode_section = 0;
 859        else
 860                (*first_ucode_section)++;
 861
 862        for (i = *first_ucode_section; i < image->num_sec; i++) {
 863                last_read_idx = i;
 864
 865                /*
 866                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 867                 * CPU1 to CPU2.
 868                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 869                 * CPU2 non paged to CPU2 paging sec.
 870                 */
 871                if (!image->sec[i].data ||
 872                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 873                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 874                        IWL_DEBUG_FW(trans,
 875                                     "Break since Data not valid or Empty section, sec = %d\n",
 876                                     i);
 877                        break;
 878                }
 879
 880                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 881                if (ret)
 882                        return ret;
 883        }
 884
 885        *first_ucode_section = last_read_idx;
 886
 887        return 0;
 888}
 889
 890void iwl_pcie_apply_destination(struct iwl_trans *trans)
 891{
 892        const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
 893        int i;
 894
 895        if (trans->ini_valid) {
 896                if (!trans->num_blocks)
 897                        return;
 898
 899                IWL_DEBUG_FW(trans,
 900                             "WRT: applying DRAM buffer[0] destination\n");
 901                iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
 902                                    trans->fw_mon[0].physical >>
 903                                    MON_BUFF_SHIFT_VER2);
 904                iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
 905                                    (trans->fw_mon[0].physical +
 906                                     trans->fw_mon[0].size - 256) >>
 907                                    MON_BUFF_SHIFT_VER2);
 908                return;
 909        }
 910
 911        IWL_INFO(trans, "Applying debug destination %s\n",
 912                 get_fw_dbg_mode_string(dest->monitor_mode));
 913
 914        if (dest->monitor_mode == EXTERNAL_MODE)
 915                iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
 916        else
 917                IWL_WARN(trans, "PCI should have external buffer debug\n");
 918
 919        for (i = 0; i < trans->dbg_n_dest_reg; i++) {
 920                u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
 921                u32 val = le32_to_cpu(dest->reg_ops[i].val);
 922
 923                switch (dest->reg_ops[i].op) {
 924                case CSR_ASSIGN:
 925                        iwl_write32(trans, addr, val);
 926                        break;
 927                case CSR_SETBIT:
 928                        iwl_set_bit(trans, addr, BIT(val));
 929                        break;
 930                case CSR_CLEARBIT:
 931                        iwl_clear_bit(trans, addr, BIT(val));
 932                        break;
 933                case PRPH_ASSIGN:
 934                        iwl_write_prph(trans, addr, val);
 935                        break;
 936                case PRPH_SETBIT:
 937                        iwl_set_bits_prph(trans, addr, BIT(val));
 938                        break;
 939                case PRPH_CLEARBIT:
 940                        iwl_clear_bits_prph(trans, addr, BIT(val));
 941                        break;
 942                case PRPH_BLOCKBIT:
 943                        if (iwl_read_prph(trans, addr) & BIT(val)) {
 944                                IWL_ERR(trans,
 945                                        "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
 946                                        val, addr);
 947                                goto monitor;
 948                        }
 949                        break;
 950                default:
 951                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
 952                                dest->reg_ops[i].op);
 953                        break;
 954                }
 955        }
 956
 957monitor:
 958        if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
 959                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
 960                               trans->fw_mon[0].physical >> dest->base_shift);
 961                if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
 962                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 963                                       (trans->fw_mon[0].physical +
 964                                        trans->fw_mon[0].size - 256) >>
 965                                                dest->end_shift);
 966                else
 967                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 968                                       (trans->fw_mon[0].physical +
 969                                        trans->fw_mon[0].size) >>
 970                                                dest->end_shift);
 971        }
 972}
 973
 974static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 975                                const struct fw_img *image)
 976{
 977        int ret = 0;
 978        int first_ucode_section;
 979
 980        IWL_DEBUG_FW(trans, "working with %s CPU\n",
 981                     image->is_dual_cpus ? "Dual" : "Single");
 982
 983        /* load to FW the binary non secured sections of CPU1 */
 984        ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
 985        if (ret)
 986                return ret;
 987
 988        if (image->is_dual_cpus) {
 989                /* set CPU2 header address */
 990                iwl_write_prph(trans,
 991                               LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
 992                               LMPM_SECURE_CPU2_HDR_MEM_SPACE);
 993
 994                /* load to FW the binary sections of CPU2 */
 995                ret = iwl_pcie_load_cpu_sections(trans, image, 2,
 996                                                 &first_ucode_section);
 997                if (ret)
 998                        return ret;
 999        }
1000
1001        /* supported for 7000 only for the moment */
1002        if (iwlwifi_mod_params.fw_monitor &&
1003            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1004                iwl_pcie_alloc_fw_monitor(trans, 0);
1005
1006                if (trans->fw_mon[0].size) {
1007                        iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
1008                                       trans->fw_mon[0].physical >> 4);
1009                        iwl_write_prph(trans, MON_BUFF_END_ADDR,
1010                                       (trans->fw_mon[0].physical +
1011                                        trans->fw_mon[0].size) >> 4);
1012                }
1013        } else if (iwl_pcie_dbg_on(trans)) {
1014                iwl_pcie_apply_destination(trans);
1015        }
1016
1017        iwl_enable_interrupts(trans);
1018
1019        /* release CPU reset */
1020        iwl_write32(trans, CSR_RESET, 0);
1021
1022        return 0;
1023}
1024
1025static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1026                                          const struct fw_img *image)
1027{
1028        int ret = 0;
1029        int first_ucode_section;
1030
1031        IWL_DEBUG_FW(trans, "working with %s CPU\n",
1032                     image->is_dual_cpus ? "Dual" : "Single");
1033
1034        if (iwl_pcie_dbg_on(trans))
1035                iwl_pcie_apply_destination(trans);
1036
1037        IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1038                        iwl_read_prph(trans, WFPM_GP2));
1039
1040        /*
1041         * Set default value. On resume reading the values that were
1042         * zeored can provide debug data on the resume flow.
1043         * This is for debugging only and has no functional impact.
1044         */
1045        iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1046
1047        /* configure the ucode to be ready to get the secured image */
1048        /* release CPU reset */
1049        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1050
1051        /* load to FW the binary Secured sections of CPU1 */
1052        ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1053                                              &first_ucode_section);
1054        if (ret)
1055                return ret;
1056
1057        /* load to FW the binary sections of CPU2 */
1058        return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1059                                               &first_ucode_section);
1060}
1061
1062bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1063{
1064        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1065        bool hw_rfkill = iwl_is_rfkill_set(trans);
1066        bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1067        bool report;
1068
1069        if (hw_rfkill) {
1070                set_bit(STATUS_RFKILL_HW, &trans->status);
1071                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1072        } else {
1073                clear_bit(STATUS_RFKILL_HW, &trans->status);
1074                if (trans_pcie->opmode_down)
1075                        clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1076        }
1077
1078        report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1079
1080        if (prev != report)
1081                iwl_trans_pcie_rf_kill(trans, report);
1082
1083        return hw_rfkill;
1084}
1085
1086struct iwl_causes_list {
1087        u32 cause_num;
1088        u32 mask_reg;
1089        u8 addr;
1090};
1091
1092static struct iwl_causes_list causes_list[] = {
1093        {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1094        {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1095        {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1096        {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1097        {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1098        {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
1099        {MSIX_HW_INT_CAUSES_REG_IML,            CSR_MSIX_HW_INT_MASK_AD, 0x12},
1100        {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1101        {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1102        {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1103        {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
1104        {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1105        {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1106        {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1107        {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1108};
1109
1110static struct iwl_causes_list causes_list_v2[] = {
1111        {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1112        {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1113        {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1114        {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1115        {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1116        {MSIX_HW_INT_CAUSES_REG_IPC,            CSR_MSIX_HW_INT_MASK_AD, 0x11},
1117        {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2,      CSR_MSIX_HW_INT_MASK_AD, 0x15},
1118        {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1119        {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1120        {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1121        {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1122        {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1123        {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1124        {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1125};
1126
1127static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1128{
1129        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1130        int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1131        int i, arr_size =
1132                (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
1133                ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
1134
1135        /*
1136         * Access all non RX causes and map them to the default irq.
1137         * In case we are missing at least one interrupt vector,
1138         * the first interrupt vector will serve non-RX and FBQ causes.
1139         */
1140        for (i = 0; i < arr_size; i++) {
1141                struct iwl_causes_list *causes =
1142                        (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
1143                        causes_list : causes_list_v2;
1144
1145                iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1146                iwl_clear_bit(trans, causes[i].mask_reg,
1147                              causes[i].cause_num);
1148        }
1149}
1150
1151static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1152{
1153        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1154        u32 offset =
1155                trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1156        u32 val, idx;
1157
1158        /*
1159         * The first RX queue - fallback queue, which is designated for
1160         * management frame, command responses etc, is always mapped to the
1161         * first interrupt vector. The other RX queues are mapped to
1162         * the other (N - 2) interrupt vectors.
1163         */
1164        val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1165        for (idx = 1; idx < trans->num_rx_queues; idx++) {
1166                iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1167                           MSIX_FH_INT_CAUSES_Q(idx - offset));
1168                val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1169        }
1170        iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1171
1172        val = MSIX_FH_INT_CAUSES_Q(0);
1173        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1174                val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1175        iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1176
1177        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1178                iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1179}
1180
1181void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1182{
1183        struct iwl_trans *trans = trans_pcie->trans;
1184
1185        if (!trans_pcie->msix_enabled) {
1186                if (trans->cfg->mq_rx_supported &&
1187                    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1188                        iwl_write_umac_prph(trans, UREG_CHICK,
1189                                            UREG_CHICK_MSI_ENABLE);
1190                return;
1191        }
1192        /*
1193         * The IVAR table needs to be configured again after reset,
1194         * but if the device is disabled, we can't write to
1195         * prph.
1196         */
1197        if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1198                iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1199
1200        /*
1201         * Each cause from the causes list above and the RX causes is
1202         * represented as a byte in the IVAR table. The first nibble
1203         * represents the bound interrupt vector of the cause, the second
1204         * represents no auto clear for this cause. This will be set if its
1205         * interrupt vector is bound to serve other causes.
1206         */
1207        iwl_pcie_map_rx_causes(trans);
1208
1209        iwl_pcie_map_non_rx_causes(trans);
1210}
1211
1212static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1213{
1214        struct iwl_trans *trans = trans_pcie->trans;
1215
1216        iwl_pcie_conf_msix_hw(trans_pcie);
1217
1218        if (!trans_pcie->msix_enabled)
1219                return;
1220
1221        trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1222        trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1223        trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1224        trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1225}
1226
1227static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1228{
1229        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1230
1231        lockdep_assert_held(&trans_pcie->mutex);
1232
1233        if (trans_pcie->is_down)
1234                return;
1235
1236        trans_pcie->is_down = true;
1237
1238        /* Stop dbgc before stopping device */
1239        _iwl_fw_dbg_stop_recording(trans, NULL);
1240
1241        /* tell the device to stop sending interrupts */
1242        iwl_disable_interrupts(trans);
1243
1244        /* device going down, Stop using ICT table */
1245        iwl_pcie_disable_ict(trans);
1246
1247        /*
1248         * If a HW restart happens during firmware loading,
1249         * then the firmware loading might call this function
1250         * and later it might be called again due to the
1251         * restart. So don't process again if the device is
1252         * already dead.
1253         */
1254        if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1255                IWL_DEBUG_INFO(trans,
1256                               "DEVICE_ENABLED bit was set and is now cleared\n");
1257                iwl_pcie_tx_stop(trans);
1258                iwl_pcie_rx_stop(trans);
1259
1260                /* Power-down device's busmaster DMA clocks */
1261                if (!trans->cfg->apmg_not_supported) {
1262                        iwl_write_prph(trans, APMG_CLK_DIS_REG,
1263                                       APMG_CLK_VAL_DMA_CLK_RQT);
1264                        udelay(5);
1265                }
1266        }
1267
1268        /* Make sure (redundant) we've released our request to stay awake */
1269        iwl_clear_bit(trans, CSR_GP_CNTRL,
1270                      BIT(trans->cfg->csr->flag_mac_access_req));
1271
1272        /* Stop the device, and put it in low power state */
1273        iwl_pcie_apm_stop(trans, false);
1274
1275        iwl_trans_pcie_sw_reset(trans);
1276
1277        /*
1278         * Upon stop, the IVAR table gets erased, so msi-x won't
1279         * work. This causes a bug in RF-KILL flows, since the interrupt
1280         * that enables radio won't fire on the correct irq, and the
1281         * driver won't be able to handle the interrupt.
1282         * Configure the IVAR table again after reset.
1283         */
1284        iwl_pcie_conf_msix_hw(trans_pcie);
1285
1286        /*
1287         * Upon stop, the APM issues an interrupt if HW RF kill is set.
1288         * This is a bug in certain verions of the hardware.
1289         * Certain devices also keep sending HW RF kill interrupt all
1290         * the time, unless the interrupt is ACKed even if the interrupt
1291         * should be masked. Re-ACK all the interrupts here.
1292         */
1293        iwl_disable_interrupts(trans);
1294
1295        /* clear all status bits */
1296        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1297        clear_bit(STATUS_INT_ENABLED, &trans->status);
1298        clear_bit(STATUS_TPOWER_PMI, &trans->status);
1299
1300        /*
1301         * Even if we stop the HW, we still want the RF kill
1302         * interrupt
1303         */
1304        iwl_enable_rfkill_int(trans);
1305
1306        /* re-take ownership to prevent other users from stealing the device */
1307        iwl_pcie_prepare_card_hw(trans);
1308}
1309
1310void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1311{
1312        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1313
1314        if (trans_pcie->msix_enabled) {
1315                int i;
1316
1317                for (i = 0; i < trans_pcie->alloc_vecs; i++)
1318                        synchronize_irq(trans_pcie->msix_entries[i].vector);
1319        } else {
1320                synchronize_irq(trans_pcie->pci_dev->irq);
1321        }
1322}
1323
1324static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1325                                   const struct fw_img *fw, bool run_in_rfkill)
1326{
1327        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1328        bool hw_rfkill;
1329        int ret;
1330
1331        /* This may fail if AMT took ownership of the device */
1332        if (iwl_pcie_prepare_card_hw(trans)) {
1333                IWL_WARN(trans, "Exit HW not ready\n");
1334                ret = -EIO;
1335                goto out;
1336        }
1337
1338        iwl_enable_rfkill_int(trans);
1339
1340        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1341
1342        /*
1343         * We enabled the RF-Kill interrupt and the handler may very
1344         * well be running. Disable the interrupts to make sure no other
1345         * interrupt can be fired.
1346         */
1347        iwl_disable_interrupts(trans);
1348
1349        /* Make sure it finished running */
1350        iwl_pcie_synchronize_irqs(trans);
1351
1352        mutex_lock(&trans_pcie->mutex);
1353
1354        /* If platform's RF_KILL switch is NOT set to KILL */
1355        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1356        if (hw_rfkill && !run_in_rfkill) {
1357                ret = -ERFKILL;
1358                goto out;
1359        }
1360
1361        /* Someone called stop_device, don't try to start_fw */
1362        if (trans_pcie->is_down) {
1363                IWL_WARN(trans,
1364                         "Can't start_fw since the HW hasn't been started\n");
1365                ret = -EIO;
1366                goto out;
1367        }
1368
1369        /* make sure rfkill handshake bits are cleared */
1370        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1371        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1372                    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1373
1374        /* clear (again), then enable host interrupts */
1375        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1376
1377        ret = iwl_pcie_nic_init(trans);
1378        if (ret) {
1379                IWL_ERR(trans, "Unable to init nic\n");
1380                goto out;
1381        }
1382
1383        /*
1384         * Now, we load the firmware and don't want to be interrupted, even
1385         * by the RF-Kill interrupt (hence mask all the interrupt besides the
1386         * FH_TX interrupt which is needed to load the firmware). If the
1387         * RF-Kill switch is toggled, we will find out after having loaded
1388         * the firmware and return the proper value to the caller.
1389         */
1390        iwl_enable_fw_load_int(trans);
1391
1392        /* really make sure rfkill handshake bits are cleared */
1393        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1394        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1395
1396        /* Load the given image to the HW */
1397        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1398                ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1399        else
1400                ret = iwl_pcie_load_given_ucode(trans, fw);
1401
1402        /* re-check RF-Kill state since we may have missed the interrupt */
1403        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1404        if (hw_rfkill && !run_in_rfkill)
1405                ret = -ERFKILL;
1406
1407out:
1408        mutex_unlock(&trans_pcie->mutex);
1409        return ret;
1410}
1411
1412static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1413{
1414        iwl_pcie_reset_ict(trans);
1415        iwl_pcie_tx_start(trans, scd_addr);
1416}
1417
1418void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1419                                       bool was_in_rfkill)
1420{
1421        bool hw_rfkill;
1422
1423        /*
1424         * Check again since the RF kill state may have changed while
1425         * all the interrupts were disabled, in this case we couldn't
1426         * receive the RF kill interrupt and update the state in the
1427         * op_mode.
1428         * Don't call the op_mode if the rkfill state hasn't changed.
1429         * This allows the op_mode to call stop_device from the rfkill
1430         * notification without endless recursion. Under very rare
1431         * circumstances, we might have a small recursion if the rfkill
1432         * state changed exactly now while we were called from stop_device.
1433         * This is very unlikely but can happen and is supported.
1434         */
1435        hw_rfkill = iwl_is_rfkill_set(trans);
1436        if (hw_rfkill) {
1437                set_bit(STATUS_RFKILL_HW, &trans->status);
1438                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1439        } else {
1440                clear_bit(STATUS_RFKILL_HW, &trans->status);
1441                clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1442        }
1443        if (hw_rfkill != was_in_rfkill)
1444                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1445}
1446
1447static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1448{
1449        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450        bool was_in_rfkill;
1451
1452        mutex_lock(&trans_pcie->mutex);
1453        trans_pcie->opmode_down = true;
1454        was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1455        _iwl_trans_pcie_stop_device(trans, low_power);
1456        iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1457        mutex_unlock(&trans_pcie->mutex);
1458}
1459
1460void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1461{
1462        struct iwl_trans_pcie __maybe_unused *trans_pcie =
1463                IWL_TRANS_GET_PCIE_TRANS(trans);
1464
1465        lockdep_assert_held(&trans_pcie->mutex);
1466
1467        IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1468                 state ? "disabled" : "enabled");
1469        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1470                if (trans->cfg->gen2)
1471                        _iwl_trans_pcie_gen2_stop_device(trans, true);
1472                else
1473                        _iwl_trans_pcie_stop_device(trans, true);
1474        }
1475}
1476
1477static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1478                                      bool reset)
1479{
1480        if (!reset) {
1481                /* Enable persistence mode to avoid reset */
1482                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1483                            CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1484        }
1485
1486        iwl_disable_interrupts(trans);
1487
1488        /*
1489         * in testing mode, the host stays awake and the
1490         * hardware won't be reset (not even partially)
1491         */
1492        if (test)
1493                return;
1494
1495        iwl_pcie_disable_ict(trans);
1496
1497        iwl_pcie_synchronize_irqs(trans);
1498
1499        iwl_clear_bit(trans, CSR_GP_CNTRL,
1500                      BIT(trans->cfg->csr->flag_mac_access_req));
1501        iwl_clear_bit(trans, CSR_GP_CNTRL,
1502                      BIT(trans->cfg->csr->flag_init_done));
1503
1504        if (reset) {
1505                /*
1506                 * reset TX queues -- some of their registers reset during S3
1507                 * so if we don't reset everything here the D3 image would try
1508                 * to execute some invalid memory upon resume
1509                 */
1510                iwl_trans_pcie_tx_reset(trans);
1511        }
1512
1513        iwl_pcie_set_pwr(trans, true);
1514}
1515
1516static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1517                                    enum iwl_d3_status *status,
1518                                    bool test,  bool reset)
1519{
1520        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1521        u32 val;
1522        int ret;
1523
1524        if (test) {
1525                iwl_enable_interrupts(trans);
1526                *status = IWL_D3_STATUS_ALIVE;
1527                return 0;
1528        }
1529
1530        iwl_set_bit(trans, CSR_GP_CNTRL,
1531                    BIT(trans->cfg->csr->flag_mac_access_req));
1532
1533        ret = iwl_finish_nic_init(trans);
1534        if (ret)
1535                return ret;
1536
1537        /*
1538         * Reconfigure IVAR table in case of MSIX or reset ict table in
1539         * MSI mode since HW reset erased it.
1540         * Also enables interrupts - none will happen as
1541         * the device doesn't know we're waking it up, only when
1542         * the opmode actually tells it after this call.
1543         */
1544        iwl_pcie_conf_msix_hw(trans_pcie);
1545        if (!trans_pcie->msix_enabled)
1546                iwl_pcie_reset_ict(trans);
1547        iwl_enable_interrupts(trans);
1548
1549        iwl_pcie_set_pwr(trans, false);
1550
1551        if (!reset) {
1552                iwl_clear_bit(trans, CSR_GP_CNTRL,
1553                              BIT(trans->cfg->csr->flag_mac_access_req));
1554        } else {
1555                iwl_trans_pcie_tx_reset(trans);
1556
1557                ret = iwl_pcie_rx_init(trans);
1558                if (ret) {
1559                        IWL_ERR(trans,
1560                                "Failed to resume the device (RX reset)\n");
1561                        return ret;
1562                }
1563        }
1564
1565        IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1566                        iwl_read_umac_prph(trans, WFPM_GP2));
1567
1568        val = iwl_read32(trans, CSR_RESET);
1569        if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1570                *status = IWL_D3_STATUS_RESET;
1571        else
1572                *status = IWL_D3_STATUS_ALIVE;
1573
1574        return 0;
1575}
1576
1577static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1578                                        struct iwl_trans *trans)
1579{
1580        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1581        int max_irqs, num_irqs, i, ret;
1582        u16 pci_cmd;
1583
1584        if (!trans->cfg->mq_rx_supported)
1585                goto enable_msi;
1586
1587        max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
1588        for (i = 0; i < max_irqs; i++)
1589                trans_pcie->msix_entries[i].entry = i;
1590
1591        num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1592                                         MSIX_MIN_INTERRUPT_VECTORS,
1593                                         max_irqs);
1594        if (num_irqs < 0) {
1595                IWL_DEBUG_INFO(trans,
1596                               "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1597                               num_irqs);
1598                goto enable_msi;
1599        }
1600        trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1601
1602        IWL_DEBUG_INFO(trans,
1603                       "MSI-X enabled. %d interrupt vectors were allocated\n",
1604                       num_irqs);
1605
1606        /*
1607         * In case the OS provides fewer interrupts than requested, different
1608         * causes will share the same interrupt vector as follows:
1609         * One interrupt less: non rx causes shared with FBQ.
1610         * Two interrupts less: non rx causes shared with FBQ and RSS.
1611         * More than two interrupts: we will use fewer RSS queues.
1612         */
1613        if (num_irqs <= max_irqs - 2) {
1614                trans_pcie->trans->num_rx_queues = num_irqs + 1;
1615                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1616                        IWL_SHARED_IRQ_FIRST_RSS;
1617        } else if (num_irqs == max_irqs - 1) {
1618                trans_pcie->trans->num_rx_queues = num_irqs;
1619                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1620        } else {
1621                trans_pcie->trans->num_rx_queues = num_irqs - 1;
1622        }
1623        WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1624
1625        trans_pcie->alloc_vecs = num_irqs;
1626        trans_pcie->msix_enabled = true;
1627        return;
1628
1629enable_msi:
1630        ret = pci_enable_msi(pdev);
1631        if (ret) {
1632                dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1633                /* enable rfkill interrupt: hw bug w/a */
1634                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1635                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1636                        pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1637                        pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1638                }
1639        }
1640}
1641
1642static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1643{
1644        int iter_rx_q, i, ret, cpu, offset;
1645        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1646
1647        i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1648        iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1649        offset = 1 + i;
1650        for (; i < iter_rx_q ; i++) {
1651                /*
1652                 * Get the cpu prior to the place to search
1653                 * (i.e. return will be > i - 1).
1654                 */
1655                cpu = cpumask_next(i - offset, cpu_online_mask);
1656                cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1657                ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1658                                            &trans_pcie->affinity_mask[i]);
1659                if (ret)
1660                        IWL_ERR(trans_pcie->trans,
1661                                "Failed to set affinity mask for IRQ %d\n",
1662                                i);
1663        }
1664}
1665
1666static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1667                                      struct iwl_trans_pcie *trans_pcie)
1668{
1669        int i;
1670
1671        for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1672                int ret;
1673                struct msix_entry *msix_entry;
1674                const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1675
1676                if (!qname)
1677                        return -ENOMEM;
1678
1679                msix_entry = &trans_pcie->msix_entries[i];
1680                ret = devm_request_threaded_irq(&pdev->dev,
1681                                                msix_entry->vector,
1682                                                iwl_pcie_msix_isr,
1683                                                (i == trans_pcie->def_irq) ?
1684                                                iwl_pcie_irq_msix_handler :
1685                                                iwl_pcie_irq_rx_msix_handler,
1686                                                IRQF_SHARED,
1687                                                qname,
1688                                                msix_entry);
1689                if (ret) {
1690                        IWL_ERR(trans_pcie->trans,
1691                                "Error allocating IRQ %d\n", i);
1692
1693                        return ret;
1694                }
1695        }
1696        iwl_pcie_irq_set_affinity(trans_pcie->trans);
1697
1698        return 0;
1699}
1700
1701static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1702{
1703        u32 hpm, wprot;
1704
1705        switch (trans->cfg->device_family) {
1706        case IWL_DEVICE_FAMILY_9000:
1707                wprot = PREG_PRPH_WPROT_9000;
1708                break;
1709        case IWL_DEVICE_FAMILY_22000:
1710                wprot = PREG_PRPH_WPROT_22000;
1711                break;
1712        default:
1713                return 0;
1714        }
1715
1716        hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1717        if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1718                u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1719
1720                if (wprot_val & PREG_WFPM_ACCESS) {
1721                        IWL_ERR(trans,
1722                                "Error, can not clear persistence bit\n");
1723                        return -EPERM;
1724                }
1725                iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1726                                            hpm & ~PERSISTENCE_BIT);
1727        }
1728
1729        return 0;
1730}
1731
1732static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1733{
1734        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1735        int err;
1736
1737        lockdep_assert_held(&trans_pcie->mutex);
1738
1739        err = iwl_pcie_prepare_card_hw(trans);
1740        if (err) {
1741                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1742                return err;
1743        }
1744
1745        err = iwl_trans_pcie_clear_persistence_bit(trans);
1746        if (err)
1747                return err;
1748
1749        iwl_trans_pcie_sw_reset(trans);
1750
1751        err = iwl_pcie_apm_init(trans);
1752        if (err)
1753                return err;
1754
1755        iwl_pcie_init_msix(trans_pcie);
1756
1757        /* From now on, the op_mode will be kept updated about RF kill state */
1758        iwl_enable_rfkill_int(trans);
1759
1760        trans_pcie->opmode_down = false;
1761
1762        /* Set is_down to false here so that...*/
1763        trans_pcie->is_down = false;
1764
1765        /* ...rfkill can call stop_device and set it false if needed */
1766        iwl_pcie_check_hw_rf_kill(trans);
1767
1768        /* Make sure we sync here, because we'll need full access later */
1769        if (low_power)
1770                pm_runtime_resume(trans->dev);
1771
1772        return 0;
1773}
1774
1775static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1776{
1777        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1778        int ret;
1779
1780        mutex_lock(&trans_pcie->mutex);
1781        ret = _iwl_trans_pcie_start_hw(trans, low_power);
1782        mutex_unlock(&trans_pcie->mutex);
1783
1784        return ret;
1785}
1786
1787static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1788{
1789        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1790
1791        mutex_lock(&trans_pcie->mutex);
1792
1793        /* disable interrupts - don't enable HW RF kill interrupt */
1794        iwl_disable_interrupts(trans);
1795
1796        iwl_pcie_apm_stop(trans, true);
1797
1798        iwl_disable_interrupts(trans);
1799
1800        iwl_pcie_disable_ict(trans);
1801
1802        mutex_unlock(&trans_pcie->mutex);
1803
1804        iwl_pcie_synchronize_irqs(trans);
1805}
1806
1807static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1808{
1809        writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1810}
1811
1812static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1813{
1814        writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1815}
1816
1817static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1818{
1819        return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1820}
1821
1822static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1823{
1824        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1825                return 0x00FFFFFF;
1826        else
1827                return 0x000FFFFF;
1828}
1829
1830static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1831{
1832        u32 mask = iwl_trans_pcie_prph_msk(trans);
1833
1834        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1835                               ((reg & mask) | (3 << 24)));
1836        return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1837}
1838
1839static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1840                                      u32 val)
1841{
1842        u32 mask = iwl_trans_pcie_prph_msk(trans);
1843
1844        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1845                               ((addr & mask) | (3 << 24)));
1846        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1847}
1848
1849static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1850                                     const struct iwl_trans_config *trans_cfg)
1851{
1852        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1853
1854        trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1855        trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1856        trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1857        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1858                trans_pcie->n_no_reclaim_cmds = 0;
1859        else
1860                trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1861        if (trans_pcie->n_no_reclaim_cmds)
1862                memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1863                       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1864
1865        trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1866        trans_pcie->rx_page_order =
1867                iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1868
1869        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1870        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1871        trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
1872
1873        trans_pcie->page_offs = trans_cfg->cb_data_offs;
1874        trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1875
1876        trans->command_groups = trans_cfg->command_groups;
1877        trans->command_groups_size = trans_cfg->command_groups_size;
1878
1879        /* Initialize NAPI here - it should be before registering to mac80211
1880         * in the opmode but after the HW struct is allocated.
1881         * As this function may be called again in some corner cases don't
1882         * do anything if NAPI was already initialized.
1883         */
1884        if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1885                init_dummy_netdev(&trans_pcie->napi_dev);
1886}
1887
1888void iwl_trans_pcie_free(struct iwl_trans *trans)
1889{
1890        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1891        int i;
1892
1893        iwl_pcie_synchronize_irqs(trans);
1894
1895        if (trans->cfg->gen2)
1896                iwl_pcie_gen2_tx_free(trans);
1897        else
1898                iwl_pcie_tx_free(trans);
1899        iwl_pcie_rx_free(trans);
1900
1901        if (trans_pcie->rba.alloc_wq) {
1902                destroy_workqueue(trans_pcie->rba.alloc_wq);
1903                trans_pcie->rba.alloc_wq = NULL;
1904        }
1905
1906        if (trans_pcie->msix_enabled) {
1907                for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1908                        irq_set_affinity_hint(
1909                                trans_pcie->msix_entries[i].vector,
1910                                NULL);
1911                }
1912
1913                trans_pcie->msix_enabled = false;
1914        } else {
1915                iwl_pcie_free_ict(trans);
1916        }
1917
1918        iwl_pcie_free_fw_monitor(trans);
1919
1920        for_each_possible_cpu(i) {
1921                struct iwl_tso_hdr_page *p =
1922                        per_cpu_ptr(trans_pcie->tso_hdr_page, i);
1923
1924                if (p->page)
1925                        __free_page(p->page);
1926        }
1927
1928        free_percpu(trans_pcie->tso_hdr_page);
1929        mutex_destroy(&trans_pcie->mutex);
1930        iwl_trans_free(trans);
1931}
1932
1933static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1934{
1935        if (state)
1936                set_bit(STATUS_TPOWER_PMI, &trans->status);
1937        else
1938                clear_bit(STATUS_TPOWER_PMI, &trans->status);
1939}
1940
1941struct iwl_trans_pcie_removal {
1942        struct pci_dev *pdev;
1943        struct work_struct work;
1944};
1945
1946static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1947{
1948        struct iwl_trans_pcie_removal *removal =
1949                container_of(wk, struct iwl_trans_pcie_removal, work);
1950        struct pci_dev *pdev = removal->pdev;
1951        static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1952
1953        dev_err(&pdev->dev, "Device gone - attempting removal\n");
1954        kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1955        pci_lock_rescan_remove();
1956        pci_dev_put(pdev);
1957        pci_stop_and_remove_bus_device(pdev);
1958        pci_unlock_rescan_remove();
1959
1960        kfree(removal);
1961        module_put(THIS_MODULE);
1962}
1963
1964static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
1965                                           unsigned long *flags)
1966{
1967        int ret;
1968        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1969
1970        spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1971
1972        if (trans_pcie->cmd_hold_nic_awake)
1973                goto out;
1974
1975        /* this bit wakes up the NIC */
1976        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1977                                 BIT(trans->cfg->csr->flag_mac_access_req));
1978        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1979                udelay(2);
1980
1981        /*
1982         * These bits say the device is running, and should keep running for
1983         * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1984         * but they do not indicate that embedded SRAM is restored yet;
1985         * HW with volatile SRAM must save/restore contents to/from
1986         * host DRAM when sleeping/waking for power-saving.
1987         * Each direction takes approximately 1/4 millisecond; with this
1988         * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1989         * series of register accesses are expected (e.g. reading Event Log),
1990         * to keep device from sleeping.
1991         *
1992         * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1993         * SRAM is okay/restored.  We don't check that here because this call
1994         * is just for hardware register access; but GP1 MAC_SLEEP
1995         * check is a good idea before accessing the SRAM of HW with
1996         * volatile SRAM (e.g. reading Event Log).
1997         *
1998         * 5000 series and later (including 1000 series) have non-volatile SRAM,
1999         * and do not save/restore SRAM when power cycling.
2000         */
2001        ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2002                           BIT(trans->cfg->csr->flag_val_mac_access_en),
2003                           (BIT(trans->cfg->csr->flag_mac_clock_ready) |
2004                            CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2005        if (unlikely(ret < 0)) {
2006                u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2007
2008                WARN_ONCE(1,
2009                          "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2010                          cntrl);
2011
2012                iwl_trans_pcie_dump_regs(trans);
2013
2014                if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2015                        struct iwl_trans_pcie_removal *removal;
2016
2017                        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2018                                goto err;
2019
2020                        IWL_ERR(trans, "Device gone - scheduling removal!\n");
2021
2022                        /*
2023                         * get a module reference to avoid doing this
2024                         * while unloading anyway and to avoid
2025                         * scheduling a work with code that's being
2026                         * removed.
2027                         */
2028                        if (!try_module_get(THIS_MODULE)) {
2029                                IWL_ERR(trans,
2030                                        "Module is being unloaded - abort\n");
2031                                goto err;
2032                        }
2033
2034                        removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2035                        if (!removal) {
2036                                module_put(THIS_MODULE);
2037                                goto err;
2038                        }
2039                        /*
2040                         * we don't need to clear this flag, because
2041                         * the trans will be freed and reallocated.
2042                        */
2043                        set_bit(STATUS_TRANS_DEAD, &trans->status);
2044
2045                        removal->pdev = to_pci_dev(trans->dev);
2046                        INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2047                        pci_dev_get(removal->pdev);
2048                        schedule_work(&removal->work);
2049                } else {
2050                        iwl_write32(trans, CSR_RESET,
2051                                    CSR_RESET_REG_FLAG_FORCE_NMI);
2052                }
2053
2054err:
2055                spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2056                return false;
2057        }
2058
2059out:
2060        /*
2061         * Fool sparse by faking we release the lock - sparse will
2062         * track nic_access anyway.
2063         */
2064        __release(&trans_pcie->reg_lock);
2065        return true;
2066}
2067
2068static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
2069                                              unsigned long *flags)
2070{
2071        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2072
2073        lockdep_assert_held(&trans_pcie->reg_lock);
2074
2075        /*
2076         * Fool sparse by faking we acquiring the lock - sparse will
2077         * track nic_access anyway.
2078         */
2079        __acquire(&trans_pcie->reg_lock);
2080
2081        if (trans_pcie->cmd_hold_nic_awake)
2082                goto out;
2083
2084        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2085                                   BIT(trans->cfg->csr->flag_mac_access_req));
2086        /*
2087         * Above we read the CSR_GP_CNTRL register, which will flush
2088         * any previous writes, but we need the write that clears the
2089         * MAC_ACCESS_REQ bit to be performed before any other writes
2090         * scheduled on different CPUs (after we drop reg_lock).
2091         */
2092out:
2093        spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2094}
2095
2096static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2097                                   void *buf, int dwords)
2098{
2099        unsigned long flags;
2100        int offs, ret = 0;
2101        u32 *vals = buf;
2102
2103        if (iwl_trans_grab_nic_access(trans, &flags)) {
2104                iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
2105                for (offs = 0; offs < dwords; offs++)
2106                        vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
2107                iwl_trans_release_nic_access(trans, &flags);
2108        } else {
2109                ret = -EBUSY;
2110        }
2111        return ret;
2112}
2113
2114static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2115                                    const void *buf, int dwords)
2116{
2117        unsigned long flags;
2118        int offs, ret = 0;
2119        const u32 *vals = buf;
2120
2121        if (iwl_trans_grab_nic_access(trans, &flags)) {
2122                iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2123                for (offs = 0; offs < dwords; offs++)
2124                        iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2125                                    vals ? vals[offs] : 0);
2126                iwl_trans_release_nic_access(trans, &flags);
2127        } else {
2128                ret = -EBUSY;
2129        }
2130        return ret;
2131}
2132
2133static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
2134                                            unsigned long txqs,
2135                                            bool freeze)
2136{
2137        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2138        int queue;
2139
2140        for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2141                struct iwl_txq *txq = trans_pcie->txq[queue];
2142                unsigned long now;
2143
2144                spin_lock_bh(&txq->lock);
2145
2146                now = jiffies;
2147
2148                if (txq->frozen == freeze)
2149                        goto next_queue;
2150
2151                IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2152                                    freeze ? "Freezing" : "Waking", queue);
2153
2154                txq->frozen = freeze;
2155
2156                if (txq->read_ptr == txq->write_ptr)
2157                        goto next_queue;
2158
2159                if (freeze) {
2160                        if (unlikely(time_after(now,
2161                                                txq->stuck_timer.expires))) {
2162                                /*
2163                                 * The timer should have fired, maybe it is
2164                                 * spinning right now on the lock.
2165                                 */
2166                                goto next_queue;
2167                        }
2168                        /* remember how long until the timer fires */
2169                        txq->frozen_expiry_remainder =
2170                                txq->stuck_timer.expires - now;
2171                        del_timer(&txq->stuck_timer);
2172                        goto next_queue;
2173                }
2174
2175                /*
2176                 * Wake a non-empty queue -> arm timer with the
2177                 * remainder before it froze
2178                 */
2179                mod_timer(&txq->stuck_timer,
2180                          now + txq->frozen_expiry_remainder);
2181
2182next_queue:
2183                spin_unlock_bh(&txq->lock);
2184        }
2185}
2186
2187static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2188{
2189        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2190        int i;
2191
2192        for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
2193                struct iwl_txq *txq = trans_pcie->txq[i];
2194
2195                if (i == trans_pcie->cmd_queue)
2196                        continue;
2197
2198                spin_lock_bh(&txq->lock);
2199
2200                if (!block && !(WARN_ON_ONCE(!txq->block))) {
2201                        txq->block--;
2202                        if (!txq->block) {
2203                                iwl_write32(trans, HBUS_TARG_WRPTR,
2204                                            txq->write_ptr | (i << 8));
2205                        }
2206                } else if (block) {
2207                        txq->block++;
2208                }
2209
2210                spin_unlock_bh(&txq->lock);
2211        }
2212}
2213
2214#define IWL_FLUSH_WAIT_MS       2000
2215
2216void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
2217{
2218        u32 txq_id = txq->id;
2219        u32 status;
2220        bool active;
2221        u8 fifo;
2222
2223        if (trans->cfg->use_tfh) {
2224                IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
2225                        txq->read_ptr, txq->write_ptr);
2226                /* TODO: access new SCD registers and dump them */
2227                return;
2228        }
2229
2230        status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
2231        fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
2232        active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
2233
2234        IWL_ERR(trans,
2235                "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
2236                txq_id, active ? "" : "in", fifo,
2237                jiffies_to_msecs(txq->wd_timeout),
2238                txq->read_ptr, txq->write_ptr,
2239                iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
2240                        (trans->cfg->base_params->max_tfd_queue_size - 1),
2241                iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
2242                        (trans->cfg->base_params->max_tfd_queue_size - 1),
2243                iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
2244}
2245
2246static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2247                                       struct iwl_trans_rxq_dma_data *data)
2248{
2249        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2250
2251        if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2252                return -EINVAL;
2253
2254        data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2255        data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2256        data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2257        data->fr_bd_wid = 0;
2258
2259        return 0;
2260}
2261
2262static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2263{
2264        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2265        struct iwl_txq *txq;
2266        unsigned long now = jiffies;
2267        bool overflow_tx;
2268        u8 wr_ptr;
2269
2270        /* Make sure the NIC is still alive in the bus */
2271        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2272                return -ENODEV;
2273
2274        if (!test_bit(txq_idx, trans_pcie->queue_used))
2275                return -EINVAL;
2276
2277        IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2278        txq = trans_pcie->txq[txq_idx];
2279
2280        spin_lock_bh(&txq->lock);
2281        overflow_tx = txq->overflow_tx ||
2282                      !skb_queue_empty(&txq->overflow_q);
2283        spin_unlock_bh(&txq->lock);
2284
2285        wr_ptr = READ_ONCE(txq->write_ptr);
2286
2287        while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2288                overflow_tx) &&
2289               !time_after(jiffies,
2290                           now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2291                u8 write_ptr = READ_ONCE(txq->write_ptr);
2292
2293                /*
2294                 * If write pointer moved during the wait, warn only
2295                 * if the TX came from op mode. In case TX came from
2296                 * trans layer (overflow TX) don't warn.
2297                 */
2298                if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2299                              "WR pointer moved while flushing %d -> %d\n",
2300                              wr_ptr, write_ptr))
2301                        return -ETIMEDOUT;
2302                wr_ptr = write_ptr;
2303
2304                usleep_range(1000, 2000);
2305
2306                spin_lock_bh(&txq->lock);
2307                overflow_tx = txq->overflow_tx ||
2308                              !skb_queue_empty(&txq->overflow_q);
2309                spin_unlock_bh(&txq->lock);
2310        }
2311
2312        if (txq->read_ptr != txq->write_ptr) {
2313                IWL_ERR(trans,
2314                        "fail to flush all tx fifo queues Q %d\n", txq_idx);
2315                iwl_trans_pcie_log_scd_error(trans, txq);
2316                return -ETIMEDOUT;
2317        }
2318
2319        IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2320
2321        return 0;
2322}
2323
2324static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2325{
2326        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2327        int cnt;
2328        int ret = 0;
2329
2330        /* waiting for all the tx frames complete might take a while */
2331        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2332
2333                if (cnt == trans_pcie->cmd_queue)
2334                        continue;
2335                if (!test_bit(cnt, trans_pcie->queue_used))
2336                        continue;
2337                if (!(BIT(cnt) & txq_bm))
2338                        continue;
2339
2340                ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2341                if (ret)
2342                        break;
2343        }
2344
2345        return ret;
2346}
2347
2348static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2349                                         u32 mask, u32 value)
2350{
2351        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2352        unsigned long flags;
2353
2354        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
2355        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2356        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
2357}
2358
2359static void iwl_trans_pcie_ref(struct iwl_trans *trans)
2360{
2361        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2362
2363        if (iwlwifi_mod_params.d0i3_disable)
2364                return;
2365
2366        pm_runtime_get(&trans_pcie->pci_dev->dev);
2367
2368#ifdef CONFIG_PM
2369        IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2370                      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2371#endif /* CONFIG_PM */
2372}
2373
2374static void iwl_trans_pcie_unref(struct iwl_trans *trans)
2375{
2376        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2377
2378        if (iwlwifi_mod_params.d0i3_disable)
2379                return;
2380
2381        pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
2382        pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
2383
2384#ifdef CONFIG_PM
2385        IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2386                      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2387#endif /* CONFIG_PM */
2388}
2389
2390static const char *get_csr_string(int cmd)
2391{
2392#define IWL_CMD(x) case x: return #x
2393        switch (cmd) {
2394        IWL_CMD(CSR_HW_IF_CONFIG_REG);
2395        IWL_CMD(CSR_INT_COALESCING);
2396        IWL_CMD(CSR_INT);
2397        IWL_CMD(CSR_INT_MASK);
2398        IWL_CMD(CSR_FH_INT_STATUS);
2399        IWL_CMD(CSR_GPIO_IN);
2400        IWL_CMD(CSR_RESET);
2401        IWL_CMD(CSR_GP_CNTRL);
2402        IWL_CMD(CSR_HW_REV);
2403        IWL_CMD(CSR_EEPROM_REG);
2404        IWL_CMD(CSR_EEPROM_GP);
2405        IWL_CMD(CSR_OTP_GP_REG);
2406        IWL_CMD(CSR_GIO_REG);
2407        IWL_CMD(CSR_GP_UCODE_REG);
2408        IWL_CMD(CSR_GP_DRIVER_REG);
2409        IWL_CMD(CSR_UCODE_DRV_GP1);
2410        IWL_CMD(CSR_UCODE_DRV_GP2);
2411        IWL_CMD(CSR_LED_REG);
2412        IWL_CMD(CSR_DRAM_INT_TBL_REG);
2413        IWL_CMD(CSR_GIO_CHICKEN_BITS);
2414        IWL_CMD(CSR_ANA_PLL_CFG);
2415        IWL_CMD(CSR_HW_REV_WA_REG);
2416        IWL_CMD(CSR_MONITOR_STATUS_REG);
2417        IWL_CMD(CSR_DBG_HPET_MEM_REG);
2418        default:
2419                return "UNKNOWN";
2420        }
2421#undef IWL_CMD
2422}
2423
2424void iwl_pcie_dump_csr(struct iwl_trans *trans)
2425{
2426        int i;
2427        static const u32 csr_tbl[] = {
2428                CSR_HW_IF_CONFIG_REG,
2429                CSR_INT_COALESCING,
2430                CSR_INT,
2431                CSR_INT_MASK,
2432                CSR_FH_INT_STATUS,
2433                CSR_GPIO_IN,
2434                CSR_RESET,
2435                CSR_GP_CNTRL,
2436                CSR_HW_REV,
2437                CSR_EEPROM_REG,
2438                CSR_EEPROM_GP,
2439                CSR_OTP_GP_REG,
2440                CSR_GIO_REG,
2441                CSR_GP_UCODE_REG,
2442                CSR_GP_DRIVER_REG,
2443                CSR_UCODE_DRV_GP1,
2444                CSR_UCODE_DRV_GP2,
2445                CSR_LED_REG,
2446                CSR_DRAM_INT_TBL_REG,
2447                CSR_GIO_CHICKEN_BITS,
2448                CSR_ANA_PLL_CFG,
2449                CSR_MONITOR_STATUS_REG,
2450                CSR_HW_REV_WA_REG,
2451                CSR_DBG_HPET_MEM_REG
2452        };
2453        IWL_ERR(trans, "CSR values:\n");
2454        IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2455                "CSR_INT_PERIODIC_REG)\n");
2456        for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2457                IWL_ERR(trans, "  %25s: 0X%08x\n",
2458                        get_csr_string(csr_tbl[i]),
2459                        iwl_read32(trans, csr_tbl[i]));
2460        }
2461}
2462
2463#ifdef CONFIG_IWLWIFI_DEBUGFS
2464/* create and remove of files */
2465#define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
2466        debugfs_create_file(#name, mode, parent, trans,                 \
2467                            &iwl_dbgfs_##name##_ops);                   \
2468} while (0)
2469
2470/* file operation */
2471#define DEBUGFS_READ_FILE_OPS(name)                                     \
2472static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2473        .read = iwl_dbgfs_##name##_read,                                \
2474        .open = simple_open,                                            \
2475        .llseek = generic_file_llseek,                                  \
2476};
2477
2478#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2479static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2480        .write = iwl_dbgfs_##name##_write,                              \
2481        .open = simple_open,                                            \
2482        .llseek = generic_file_llseek,                                  \
2483};
2484
2485#define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
2486static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2487        .write = iwl_dbgfs_##name##_write,                              \
2488        .read = iwl_dbgfs_##name##_read,                                \
2489        .open = simple_open,                                            \
2490        .llseek = generic_file_llseek,                                  \
2491};
2492
2493static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
2494                                       char __user *user_buf,
2495                                       size_t count, loff_t *ppos)
2496{
2497        struct iwl_trans *trans = file->private_data;
2498        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2499        struct iwl_txq *txq;
2500        char *buf;
2501        int pos = 0;
2502        int cnt;
2503        int ret;
2504        size_t bufsz;
2505
2506        bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
2507
2508        if (!trans_pcie->txq_memory)
2509                return -EAGAIN;
2510
2511        buf = kzalloc(bufsz, GFP_KERNEL);
2512        if (!buf)
2513                return -ENOMEM;
2514
2515        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2516                txq = trans_pcie->txq[cnt];
2517                pos += scnprintf(buf + pos, bufsz - pos,
2518                                "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
2519                                cnt, txq->read_ptr, txq->write_ptr,
2520                                !!test_bit(cnt, trans_pcie->queue_used),
2521                                 !!test_bit(cnt, trans_pcie->queue_stopped),
2522                                 txq->need_update, txq->frozen,
2523                                 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
2524        }
2525        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2526        kfree(buf);
2527        return ret;
2528}
2529
2530static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2531                                       char __user *user_buf,
2532                                       size_t count, loff_t *ppos)
2533{
2534        struct iwl_trans *trans = file->private_data;
2535        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2536        char *buf;
2537        int pos = 0, i, ret;
2538        size_t bufsz = sizeof(buf);
2539
2540        bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2541
2542        if (!trans_pcie->rxq)
2543                return -EAGAIN;
2544
2545        buf = kzalloc(bufsz, GFP_KERNEL);
2546        if (!buf)
2547                return -ENOMEM;
2548
2549        for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2550                struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2551
2552                pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2553                                 i);
2554                pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2555                                 rxq->read);
2556                pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2557                                 rxq->write);
2558                pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2559                                 rxq->write_actual);
2560                pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2561                                 rxq->need_update);
2562                pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2563                                 rxq->free_count);
2564                if (rxq->rb_stts) {
2565                        u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2566                                                                     rxq));
2567                        pos += scnprintf(buf + pos, bufsz - pos,
2568                                         "\tclosed_rb_num: %u\n",
2569                                         r & 0x0FFF);
2570                } else {
2571                        pos += scnprintf(buf + pos, bufsz - pos,
2572                                         "\tclosed_rb_num: Not Allocated\n");
2573                }
2574        }
2575        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2576        kfree(buf);
2577
2578        return ret;
2579}
2580
2581static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2582                                        char __user *user_buf,
2583                                        size_t count, loff_t *ppos)
2584{
2585        struct iwl_trans *trans = file->private_data;
2586        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2587        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2588
2589        int pos = 0;
2590        char *buf;
2591        int bufsz = 24 * 64; /* 24 items * 64 char per item */
2592        ssize_t ret;
2593
2594        buf = kzalloc(bufsz, GFP_KERNEL);
2595        if (!buf)
2596                return -ENOMEM;
2597
2598        pos += scnprintf(buf + pos, bufsz - pos,
2599                        "Interrupt Statistics Report:\n");
2600
2601        pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2602                isr_stats->hw);
2603        pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2604                isr_stats->sw);
2605        if (isr_stats->sw || isr_stats->hw) {
2606                pos += scnprintf(buf + pos, bufsz - pos,
2607                        "\tLast Restarting Code:  0x%X\n",
2608                        isr_stats->err_code);
2609        }
2610#ifdef CONFIG_IWLWIFI_DEBUG
2611        pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2612                isr_stats->sch);
2613        pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2614                isr_stats->alive);
2615#endif
2616        pos += scnprintf(buf + pos, bufsz - pos,
2617                "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2618
2619        pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2620                isr_stats->ctkill);
2621
2622        pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2623                isr_stats->wakeup);
2624
2625        pos += scnprintf(buf + pos, bufsz - pos,
2626                "Rx command responses:\t\t %u\n", isr_stats->rx);
2627
2628        pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2629                isr_stats->tx);
2630
2631        pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2632                isr_stats->unhandled);
2633
2634        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2635        kfree(buf);
2636        return ret;
2637}
2638
2639static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2640                                         const char __user *user_buf,
2641                                         size_t count, loff_t *ppos)
2642{
2643        struct iwl_trans *trans = file->private_data;
2644        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2645        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2646        u32 reset_flag;
2647        int ret;
2648
2649        ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2650        if (ret)
2651                return ret;
2652        if (reset_flag == 0)
2653                memset(isr_stats, 0, sizeof(*isr_stats));
2654
2655        return count;
2656}
2657
2658static ssize_t iwl_dbgfs_csr_write(struct file *file,
2659                                   const char __user *user_buf,
2660                                   size_t count, loff_t *ppos)
2661{
2662        struct iwl_trans *trans = file->private_data;
2663
2664        iwl_pcie_dump_csr(trans);
2665
2666        return count;
2667}
2668
2669static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2670                                     char __user *user_buf,
2671                                     size_t count, loff_t *ppos)
2672{
2673        struct iwl_trans *trans = file->private_data;
2674        char *buf = NULL;
2675        ssize_t ret;
2676
2677        ret = iwl_dump_fh(trans, &buf);
2678        if (ret < 0)
2679                return ret;
2680        if (!buf)
2681                return -EINVAL;
2682        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2683        kfree(buf);
2684        return ret;
2685}
2686
2687static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2688                                     char __user *user_buf,
2689                                     size_t count, loff_t *ppos)
2690{
2691        struct iwl_trans *trans = file->private_data;
2692        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2693        char buf[100];
2694        int pos;
2695
2696        pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2697                        trans_pcie->debug_rfkill,
2698                        !(iwl_read32(trans, CSR_GP_CNTRL) &
2699                                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2700
2701        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2702}
2703
2704static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2705                                      const char __user *user_buf,
2706                                      size_t count, loff_t *ppos)
2707{
2708        struct iwl_trans *trans = file->private_data;
2709        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2710        bool new_value;
2711        int ret;
2712
2713        ret = kstrtobool_from_user(user_buf, count, &new_value);
2714        if (ret)
2715                return ret;
2716        if (new_value == trans_pcie->debug_rfkill)
2717                return count;
2718        IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2719                 trans_pcie->debug_rfkill, new_value);
2720        trans_pcie->debug_rfkill = new_value;
2721        iwl_pcie_handle_rfkill_irq(trans);
2722
2723        return count;
2724}
2725
2726static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2727                                       struct file *file)
2728{
2729        struct iwl_trans *trans = inode->i_private;
2730        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2731
2732        if (!trans->dbg_dest_tlv ||
2733            trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) {
2734                IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2735                return -ENOENT;
2736        }
2737
2738        if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2739                return -EBUSY;
2740
2741        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2742        return simple_open(inode, file);
2743}
2744
2745static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2746                                          struct file *file)
2747{
2748        struct iwl_trans_pcie *trans_pcie =
2749                IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2750
2751        if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2752                trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2753        return 0;
2754}
2755
2756static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2757                                  void *buf, ssize_t *size,
2758                                  ssize_t *bytes_copied)
2759{
2760        int buf_size_left = count - *bytes_copied;
2761
2762        buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2763        if (*size > buf_size_left)
2764                *size = buf_size_left;
2765
2766        *size -= copy_to_user(user_buf, buf, *size);
2767        *bytes_copied += *size;
2768
2769        if (buf_size_left == *size)
2770                return true;
2771        return false;
2772}
2773
2774static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2775                                           char __user *user_buf,
2776                                           size_t count, loff_t *ppos)
2777{
2778        struct iwl_trans *trans = file->private_data;
2779        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2780        void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf;
2781        struct cont_rec *data = &trans_pcie->fw_mon_data;
2782        u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2783        ssize_t size, bytes_copied = 0;
2784        bool b_full;
2785
2786        if (trans->dbg_dest_tlv) {
2787                write_ptr_addr =
2788                        le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2789                wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2790        } else {
2791                write_ptr_addr = MON_BUFF_WRPTR;
2792                wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2793        }
2794
2795        if (unlikely(!trans->dbg_rec_on))
2796                return 0;
2797
2798        mutex_lock(&data->mutex);
2799        if (data->state ==
2800            IWL_FW_MON_DBGFS_STATE_DISABLED) {
2801                mutex_unlock(&data->mutex);
2802                return 0;
2803        }
2804
2805        /* write_ptr position in bytes rather then DW */
2806        write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2807        wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2808
2809        if (data->prev_wrap_cnt == wrap_cnt) {
2810                size = write_ptr - data->prev_wr_ptr;
2811                curr_buf = cpu_addr + data->prev_wr_ptr;
2812                b_full = iwl_write_to_user_buf(user_buf, count,
2813                                               curr_buf, &size,
2814                                               &bytes_copied);
2815                data->prev_wr_ptr += size;
2816
2817        } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2818                   write_ptr < data->prev_wr_ptr) {
2819                size = trans->fw_mon[0].size - data->prev_wr_ptr;
2820                curr_buf = cpu_addr + data->prev_wr_ptr;
2821                b_full = iwl_write_to_user_buf(user_buf, count,
2822                                               curr_buf, &size,
2823                                               &bytes_copied);
2824                data->prev_wr_ptr += size;
2825
2826                if (!b_full) {
2827                        size = write_ptr;
2828                        b_full = iwl_write_to_user_buf(user_buf, count,
2829                                                       cpu_addr, &size,
2830                                                       &bytes_copied);
2831                        data->prev_wr_ptr = size;
2832                        data->prev_wrap_cnt++;
2833                }
2834        } else {
2835                if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2836                    write_ptr > data->prev_wr_ptr)
2837                        IWL_WARN(trans,
2838                                 "write pointer passed previous write pointer, start copying from the beginning\n");
2839                else if (!unlikely(data->prev_wrap_cnt == 0 &&
2840                                   data->prev_wr_ptr == 0))
2841                        IWL_WARN(trans,
2842                                 "monitor data is out of sync, start copying from the beginning\n");
2843
2844                size = write_ptr;
2845                b_full = iwl_write_to_user_buf(user_buf, count,
2846                                               cpu_addr, &size,
2847                                               &bytes_copied);
2848                data->prev_wr_ptr = size;
2849                data->prev_wrap_cnt = wrap_cnt;
2850        }
2851
2852        mutex_unlock(&data->mutex);
2853
2854        return bytes_copied;
2855}
2856
2857DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2858DEBUGFS_READ_FILE_OPS(fh_reg);
2859DEBUGFS_READ_FILE_OPS(rx_queue);
2860DEBUGFS_READ_FILE_OPS(tx_queue);
2861DEBUGFS_WRITE_FILE_OPS(csr);
2862DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2863
2864static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2865        .read = iwl_dbgfs_monitor_data_read,
2866        .open = iwl_dbgfs_monitor_data_open,
2867        .release = iwl_dbgfs_monitor_data_release,
2868};
2869
2870/* Create the debugfs files and directories */
2871void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2872{
2873        struct dentry *dir = trans->dbgfs_dir;
2874
2875        DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2876        DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2877        DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2878        DEBUGFS_ADD_FILE(csr, dir, 0200);
2879        DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2880        DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2881        DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2882}
2883
2884static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2885{
2886        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2887        struct cont_rec *data = &trans_pcie->fw_mon_data;
2888
2889        mutex_lock(&data->mutex);
2890        data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2891        mutex_unlock(&data->mutex);
2892}
2893#endif /*CONFIG_IWLWIFI_DEBUGFS */
2894
2895static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2896{
2897        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2898        u32 cmdlen = 0;
2899        int i;
2900
2901        for (i = 0; i < trans_pcie->max_tbs; i++)
2902                cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
2903
2904        return cmdlen;
2905}
2906
2907static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2908                                   struct iwl_fw_error_dump_data **data,
2909                                   int allocated_rb_nums)
2910{
2911        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2912        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2913        /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2914        struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2915        u32 i, r, j, rb_len = 0;
2916
2917        spin_lock(&rxq->lock);
2918
2919        r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2920
2921        for (i = rxq->read, j = 0;
2922             i != r && j < allocated_rb_nums;
2923             i = (i + 1) & RX_QUEUE_MASK, j++) {
2924                struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2925                struct iwl_fw_error_dump_rb *rb;
2926
2927                dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2928                               DMA_FROM_DEVICE);
2929
2930                rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2931
2932                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2933                (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2934                rb = (void *)(*data)->data;
2935                rb->index = cpu_to_le32(i);
2936                memcpy(rb->data, page_address(rxb->page), max_len);
2937                /* remap the page for the free benefit */
2938                rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
2939                                                     max_len,
2940                                                     DMA_FROM_DEVICE);
2941
2942                *data = iwl_fw_error_next_data(*data);
2943        }
2944
2945        spin_unlock(&rxq->lock);
2946
2947        return rb_len;
2948}
2949#define IWL_CSR_TO_DUMP (0x250)
2950
2951static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2952                                   struct iwl_fw_error_dump_data **data)
2953{
2954        u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2955        __le32 *val;
2956        int i;
2957
2958        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2959        (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2960        val = (void *)(*data)->data;
2961
2962        for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2963                *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2964
2965        *data = iwl_fw_error_next_data(*data);
2966
2967        return csr_len;
2968}
2969
2970static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2971                                       struct iwl_fw_error_dump_data **data)
2972{
2973        u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2974        unsigned long flags;
2975        __le32 *val;
2976        int i;
2977
2978        if (!iwl_trans_grab_nic_access(trans, &flags))
2979                return 0;
2980
2981        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2982        (*data)->len = cpu_to_le32(fh_regs_len);
2983        val = (void *)(*data)->data;
2984
2985        if (!trans->cfg->gen2)
2986                for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
2987                     i += sizeof(u32))
2988                        *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2989        else
2990                for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
2991                     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
2992                     i += sizeof(u32))
2993                        *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2994                                                                      i));
2995
2996        iwl_trans_release_nic_access(trans, &flags);
2997
2998        *data = iwl_fw_error_next_data(*data);
2999
3000        return sizeof(**data) + fh_regs_len;
3001}
3002
3003static u32
3004iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3005                                 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3006                                 u32 monitor_len)
3007{
3008        u32 buf_size_in_dwords = (monitor_len >> 2);
3009        u32 *buffer = (u32 *)fw_mon_data->data;
3010        unsigned long flags;
3011        u32 i;
3012
3013        if (!iwl_trans_grab_nic_access(trans, &flags))
3014                return 0;
3015
3016        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3017        for (i = 0; i < buf_size_in_dwords; i++)
3018                buffer[i] = iwl_read_umac_prph_no_grab(trans,
3019                                                       MON_DMARB_RD_DATA_ADDR);
3020        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3021
3022        iwl_trans_release_nic_access(trans, &flags);
3023
3024        return monitor_len;
3025}
3026
3027static void
3028iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3029                             struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3030{
3031        u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3032
3033        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3034                base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3035                base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3036                write_ptr = DBGC_CUR_DBGBUF_STATUS;
3037                wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3038        } else if (trans->ini_valid) {
3039                base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
3040                write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
3041                wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
3042        } else if (trans->dbg_dest_tlv) {
3043                write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
3044                wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
3045                base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
3046        } else {
3047                base = MON_BUFF_BASE_ADDR;
3048                write_ptr = MON_BUFF_WRPTR;
3049                wrap_cnt = MON_BUFF_CYCLE_CNT;
3050        }
3051
3052        write_ptr_val = iwl_read_prph(trans, write_ptr);
3053        fw_mon_data->fw_mon_cycle_cnt =
3054                cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3055        fw_mon_data->fw_mon_base_ptr =
3056                cpu_to_le32(iwl_read_prph(trans, base));
3057        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3058                fw_mon_data->fw_mon_base_high_ptr =
3059                        cpu_to_le32(iwl_read_prph(trans, base_high));
3060                write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3061        }
3062        fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3063}
3064
3065static u32
3066iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3067                            struct iwl_fw_error_dump_data **data,
3068                            u32 monitor_len)
3069{
3070        u32 len = 0;
3071
3072        if ((trans->num_blocks &&
3073             (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3074              trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
3075              trans->ini_valid)) ||
3076            (trans->dbg_dest_tlv && !trans->ini_valid)) {
3077                struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3078
3079                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3080                fw_mon_data = (void *)(*data)->data;
3081
3082                iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3083
3084                len += sizeof(**data) + sizeof(*fw_mon_data);
3085                if (trans->num_blocks) {
3086                        memcpy(fw_mon_data->data,
3087                               trans->fw_mon[0].block,
3088                               trans->fw_mon[0].size);
3089
3090                        monitor_len = trans->fw_mon[0].size;
3091                } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
3092                        u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3093                        /*
3094                         * Update pointers to reflect actual values after
3095                         * shifting
3096                         */
3097                        if (trans->dbg_dest_tlv->version) {
3098                                base = (iwl_read_prph(trans, base) &
3099                                        IWL_LDBG_M2S_BUF_BA_MSK) <<
3100                                       trans->dbg_dest_tlv->base_shift;
3101                                base *= IWL_M2S_UNIT_SIZE;
3102                                base += trans->cfg->smem_offset;
3103                        } else {
3104                                base = iwl_read_prph(trans, base) <<
3105                                       trans->dbg_dest_tlv->base_shift;
3106                        }
3107
3108                        iwl_trans_read_mem(trans, base, fw_mon_data->data,
3109                                           monitor_len / sizeof(u32));
3110                } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
3111                        monitor_len =
3112                                iwl_trans_pci_dump_marbh_monitor(trans,
3113                                                                 fw_mon_data,
3114                                                                 monitor_len);
3115                } else {
3116                        /* Didn't match anything - output no monitor data */
3117                        monitor_len = 0;
3118                }
3119
3120                len += monitor_len;
3121                (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3122        }
3123
3124        return len;
3125}
3126
3127static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3128{
3129        if (trans->num_blocks) {
3130                *len += sizeof(struct iwl_fw_error_dump_data) +
3131                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3132                        trans->fw_mon[0].size;
3133                return trans->fw_mon[0].size;
3134        } else if (trans->dbg_dest_tlv) {
3135                u32 base, end, cfg_reg, monitor_len;
3136
3137                if (trans->dbg_dest_tlv->version == 1) {
3138                        cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
3139                        cfg_reg = iwl_read_prph(trans, cfg_reg);
3140                        base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3141                                trans->dbg_dest_tlv->base_shift;
3142                        base *= IWL_M2S_UNIT_SIZE;
3143                        base += trans->cfg->smem_offset;
3144
3145                        monitor_len =
3146                                (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3147                                trans->dbg_dest_tlv->end_shift;
3148                        monitor_len *= IWL_M2S_UNIT_SIZE;
3149                } else {
3150                        base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
3151                        end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
3152
3153                        base = iwl_read_prph(trans, base) <<
3154                               trans->dbg_dest_tlv->base_shift;
3155                        end = iwl_read_prph(trans, end) <<
3156                              trans->dbg_dest_tlv->end_shift;
3157
3158                        /* Make "end" point to the actual end */
3159                        if (trans->cfg->device_family >=
3160                            IWL_DEVICE_FAMILY_8000 ||
3161                            trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
3162                                end += (1 << trans->dbg_dest_tlv->end_shift);
3163                        monitor_len = end - base;
3164                }
3165                *len += sizeof(struct iwl_fw_error_dump_data) +
3166                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3167                        monitor_len;
3168                return monitor_len;
3169        }
3170        return 0;
3171}
3172
3173static struct iwl_trans_dump_data
3174*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3175                          u32 dump_mask)
3176{
3177        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3178        struct iwl_fw_error_dump_data *data;
3179        struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
3180        struct iwl_fw_error_dump_txcmd *txcmd;
3181        struct iwl_trans_dump_data *dump_data;
3182        u32 len, num_rbs = 0, monitor_len = 0;
3183        int i, ptr;
3184        bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3185                        !trans->cfg->mq_rx_supported &&
3186                        dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3187
3188        if (!dump_mask)
3189                return NULL;
3190
3191        /* transport dump header */
3192        len = sizeof(*dump_data);
3193
3194        /* host commands */
3195        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
3196                len += sizeof(*data) +
3197                        cmdq->n_window * (sizeof(*txcmd) +
3198                                          TFD_MAX_PAYLOAD_SIZE);
3199
3200        /* FW monitor */
3201        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3202                monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3203
3204        /* CSR registers */
3205        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3206                len += sizeof(*data) + IWL_CSR_TO_DUMP;
3207
3208        /* FH registers */
3209        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3210                if (trans->cfg->gen2)
3211                        len += sizeof(*data) +
3212                               (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3213                                iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3214                else
3215                        len += sizeof(*data) +
3216                               (FH_MEM_UPPER_BOUND -
3217                                FH_MEM_LOWER_BOUND);
3218        }
3219
3220        if (dump_rbs) {
3221                /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3222                struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3223                /* RBs */
3224                num_rbs =
3225                        le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3226                        & 0x0FFF;
3227                num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3228                len += num_rbs * (sizeof(*data) +
3229                                  sizeof(struct iwl_fw_error_dump_rb) +
3230                                  (PAGE_SIZE << trans_pcie->rx_page_order));
3231        }
3232
3233        /* Paged memory for gen2 HW */
3234        if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3235                for (i = 0; i < trans->init_dram.paging_cnt; i++)
3236                        len += sizeof(*data) +
3237                               sizeof(struct iwl_fw_error_dump_paging) +
3238                               trans->init_dram.paging[i].size;
3239
3240        dump_data = vzalloc(len);
3241        if (!dump_data)
3242                return NULL;
3243
3244        len = 0;
3245        data = (void *)dump_data->data;
3246
3247        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
3248                u16 tfd_size = trans_pcie->tfd_size;
3249
3250                data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3251                txcmd = (void *)data->data;
3252                spin_lock_bh(&cmdq->lock);
3253                ptr = cmdq->write_ptr;
3254                for (i = 0; i < cmdq->n_window; i++) {
3255                        u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
3256                        u32 caplen, cmdlen;
3257
3258                        cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3259                                                           cmdq->tfds +
3260                                                           tfd_size * ptr);
3261                        caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3262
3263                        if (cmdlen) {
3264                                len += sizeof(*txcmd) + caplen;
3265                                txcmd->cmdlen = cpu_to_le32(cmdlen);
3266                                txcmd->caplen = cpu_to_le32(caplen);
3267                                memcpy(txcmd->data, cmdq->entries[idx].cmd,
3268                                       caplen);
3269                                txcmd = (void *)((u8 *)txcmd->data + caplen);
3270                        }
3271
3272                        ptr = iwl_queue_dec_wrap(trans, ptr);
3273                }
3274                spin_unlock_bh(&cmdq->lock);
3275
3276                data->len = cpu_to_le32(len);
3277                len += sizeof(*data);
3278                data = iwl_fw_error_next_data(data);
3279        }
3280
3281        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3282                len += iwl_trans_pcie_dump_csr(trans, &data);
3283        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3284                len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3285        if (dump_rbs)
3286                len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3287
3288        /* Paged memory for gen2 HW */
3289        if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3290                for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3291                        struct iwl_fw_error_dump_paging *paging;
3292                        u32 page_len = trans->init_dram.paging[i].size;
3293
3294                        data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3295                        data->len = cpu_to_le32(sizeof(*paging) + page_len);
3296                        paging = (void *)data->data;
3297                        paging->index = cpu_to_le32(i);
3298                        memcpy(paging->data,
3299                               trans->init_dram.paging[i].block, page_len);
3300                        data = iwl_fw_error_next_data(data);
3301
3302                        len += sizeof(*data) + sizeof(*paging) + page_len;
3303                }
3304        }
3305        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3306                len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3307
3308        dump_data->len = len;
3309
3310        return dump_data;
3311}
3312
3313#ifdef CONFIG_PM_SLEEP
3314static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
3315{
3316        if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
3317            (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
3318                return iwl_pci_fw_enter_d0i3(trans);
3319
3320        return 0;
3321}
3322
3323static void iwl_trans_pcie_resume(struct iwl_trans *trans)
3324{
3325        if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
3326            (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
3327                iwl_pci_fw_exit_d0i3(trans);
3328}
3329#endif /* CONFIG_PM_SLEEP */
3330
3331#define IWL_TRANS_COMMON_OPS                                            \
3332        .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
3333        .write8 = iwl_trans_pcie_write8,                                \
3334        .write32 = iwl_trans_pcie_write32,                              \
3335        .read32 = iwl_trans_pcie_read32,                                \
3336        .read_prph = iwl_trans_pcie_read_prph,                          \
3337        .write_prph = iwl_trans_pcie_write_prph,                        \
3338        .read_mem = iwl_trans_pcie_read_mem,                            \
3339        .write_mem = iwl_trans_pcie_write_mem,                          \
3340        .configure = iwl_trans_pcie_configure,                          \
3341        .set_pmi = iwl_trans_pcie_set_pmi,                              \
3342        .sw_reset = iwl_trans_pcie_sw_reset,                            \
3343        .grab_nic_access = iwl_trans_pcie_grab_nic_access,              \
3344        .release_nic_access = iwl_trans_pcie_release_nic_access,        \
3345        .set_bits_mask = iwl_trans_pcie_set_bits_mask,                  \
3346        .ref = iwl_trans_pcie_ref,                                      \
3347        .unref = iwl_trans_pcie_unref,                                  \
3348        .dump_data = iwl_trans_pcie_dump_data,                          \
3349        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
3350        .d3_resume = iwl_trans_pcie_d3_resume,                          \
3351        .sync_nmi = iwl_trans_pcie_sync_nmi
3352
3353#ifdef CONFIG_PM_SLEEP
3354#define IWL_TRANS_PM_OPS                                                \
3355        .suspend = iwl_trans_pcie_suspend,                              \
3356        .resume = iwl_trans_pcie_resume,
3357#else
3358#define IWL_TRANS_PM_OPS
3359#endif /* CONFIG_PM_SLEEP */
3360
3361static const struct iwl_trans_ops trans_ops_pcie = {
3362        IWL_TRANS_COMMON_OPS,
3363        IWL_TRANS_PM_OPS
3364        .start_hw = iwl_trans_pcie_start_hw,
3365        .fw_alive = iwl_trans_pcie_fw_alive,
3366        .start_fw = iwl_trans_pcie_start_fw,
3367        .stop_device = iwl_trans_pcie_stop_device,
3368
3369        .send_cmd = iwl_trans_pcie_send_hcmd,
3370
3371        .tx = iwl_trans_pcie_tx,
3372        .reclaim = iwl_trans_pcie_reclaim,
3373
3374        .txq_disable = iwl_trans_pcie_txq_disable,
3375        .txq_enable = iwl_trans_pcie_txq_enable,
3376
3377        .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3378
3379        .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3380
3381        .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
3382        .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3383#ifdef CONFIG_IWLWIFI_DEBUGFS
3384        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3385#endif
3386};
3387
3388static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3389        IWL_TRANS_COMMON_OPS,
3390        IWL_TRANS_PM_OPS
3391        .start_hw = iwl_trans_pcie_start_hw,
3392        .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3393        .start_fw = iwl_trans_pcie_gen2_start_fw,
3394        .stop_device = iwl_trans_pcie_gen2_stop_device,
3395
3396        .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
3397
3398        .tx = iwl_trans_pcie_gen2_tx,
3399        .reclaim = iwl_trans_pcie_reclaim,
3400
3401        .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
3402        .txq_free = iwl_trans_pcie_dyn_txq_free,
3403        .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3404        .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3405#ifdef CONFIG_IWLWIFI_DEBUGFS
3406        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3407#endif
3408};
3409
3410struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3411                                       const struct pci_device_id *ent,
3412                                       const struct iwl_cfg *cfg)
3413{
3414        struct iwl_trans_pcie *trans_pcie;
3415        struct iwl_trans *trans;
3416        int ret, addr_size;
3417
3418        ret = pcim_enable_device(pdev);
3419        if (ret)
3420                return ERR_PTR(ret);
3421
3422        if (cfg->gen2)
3423                trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
3424                                        &pdev->dev, cfg, &trans_ops_pcie_gen2);
3425        else
3426                trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
3427                                        &pdev->dev, cfg, &trans_ops_pcie);
3428        if (!trans)
3429                return ERR_PTR(-ENOMEM);
3430
3431        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3432
3433        trans_pcie->trans = trans;
3434        trans_pcie->opmode_down = true;
3435        spin_lock_init(&trans_pcie->irq_lock);
3436        spin_lock_init(&trans_pcie->reg_lock);
3437        mutex_init(&trans_pcie->mutex);
3438        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3439        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3440        if (!trans_pcie->tso_hdr_page) {
3441                ret = -ENOMEM;
3442                goto out_no_pci;
3443        }
3444        trans_pcie->debug_rfkill = -1;
3445
3446        if (!cfg->base_params->pcie_l1_allowed) {
3447                /*
3448                 * W/A - seems to solve weird behavior. We need to remove this
3449                 * if we don't want to stay in L1 all the time. This wastes a
3450                 * lot of power.
3451                 */
3452                pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3453                                       PCIE_LINK_STATE_L1 |
3454                                       PCIE_LINK_STATE_CLKPM);
3455        }
3456
3457        trans_pcie->def_rx_queue = 0;
3458
3459        if (cfg->use_tfh) {
3460                addr_size = 64;
3461                trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
3462                trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
3463        } else {
3464                addr_size = 36;
3465                trans_pcie->max_tbs = IWL_NUM_OF_TBS;
3466                trans_pcie->tfd_size = sizeof(struct iwl_tfd);
3467        }
3468        trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
3469
3470        pci_set_master(pdev);
3471
3472        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3473        if (!ret)
3474                ret = pci_set_consistent_dma_mask(pdev,
3475                                                  DMA_BIT_MASK(addr_size));
3476        if (ret) {
3477                ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3478                if (!ret)
3479                        ret = pci_set_consistent_dma_mask(pdev,
3480                                                          DMA_BIT_MASK(32));
3481                /* both attempts failed: */
3482                if (ret) {
3483                        dev_err(&pdev->dev, "No suitable DMA available\n");
3484                        goto out_no_pci;
3485                }
3486        }
3487
3488        ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3489        if (ret) {
3490                dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3491                goto out_no_pci;
3492        }
3493
3494        trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3495        if (!trans_pcie->hw_base) {
3496                dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3497                ret = -ENODEV;
3498                goto out_no_pci;
3499        }
3500
3501        /* We disable the RETRY_TIMEOUT register (0x41) to keep
3502         * PCI Tx retries from interfering with C3 CPU state */
3503        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3504
3505        trans_pcie->pci_dev = pdev;
3506        iwl_disable_interrupts(trans);
3507
3508        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3509        if (trans->hw_rev == 0xffffffff) {
3510                dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3511                ret = -EIO;
3512                goto out_no_pci;
3513        }
3514
3515        /*
3516         * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3517         * changed, and now the revision step also includes bit 0-1 (no more
3518         * "dash" value). To keep hw_rev backwards compatible - we'll store it
3519         * in the old format.
3520         */
3521        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
3522                unsigned long flags;
3523
3524                trans->hw_rev = (trans->hw_rev & 0xfff0) |
3525                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3526
3527                ret = iwl_pcie_prepare_card_hw(trans);
3528                if (ret) {
3529                        IWL_WARN(trans, "Exit HW not ready\n");
3530                        goto out_no_pci;
3531                }
3532
3533                /*
3534                 * in-order to recognize C step driver should read chip version
3535                 * id located at the AUX bus MISC address space.
3536                 */
3537                ret = iwl_finish_nic_init(trans);
3538                if (ret)
3539                        goto out_no_pci;
3540
3541                if (iwl_trans_grab_nic_access(trans, &flags)) {
3542                        u32 hw_step;
3543
3544                        hw_step = iwl_read_umac_prph_no_grab(trans,
3545                                                             WFPM_CTRL_REG);
3546                        hw_step |= ENABLE_WFPM;
3547                        iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG,
3548                                                    hw_step);
3549                        hw_step = iwl_read_prph_no_grab(trans,
3550                                                        CNVI_AUX_MISC_CHIP);
3551                        hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
3552                        if (hw_step == 0x3)
3553                                trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
3554                                                (SILICON_C_STEP << 2);
3555                        iwl_trans_release_nic_access(trans, &flags);
3556                }
3557        }
3558
3559        IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3560
3561#if IS_ENABLED(CONFIG_IWLMVM)
3562        trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
3563
3564        if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
3565                if (trans->hw_rev == CSR_HW_REV_TYPE_TY) {
3566                        trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
3567                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3568                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
3569                        trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
3570                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3571                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
3572                        trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
3573                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3574                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
3575                        trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
3576                }
3577        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
3578                if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3579                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3580                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3581                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3582                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3583                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
3584                        trans->cfg = &iwl_ax101_cfg_qu_hr;
3585                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3586                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
3587                        trans->cfg = &iwl22000_2ax_cfg_jf;
3588                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3589                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
3590                        IWL_ERR(trans, "RF ID HRCDB is not supported\n");
3591                        ret = -EINVAL;
3592                        goto out_no_pci;
3593                } else {
3594                        IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
3595                                CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
3596                        ret = -EINVAL;
3597                        goto out_no_pci;
3598                }
3599        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3600                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3601                   ((trans->cfg != &iwl_ax200_cfg_cc &&
3602                    trans->cfg != &killer1650x_2ax_cfg &&
3603                    trans->cfg != &killer1650w_2ax_cfg) ||
3604                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
3605                u32 hw_status;
3606
3607                hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
3608                if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
3609                        /*
3610                        * b step fw is the same for physical card and fpga
3611                        */
3612                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3613                else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
3614                         CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
3615                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
3616                } else {
3617                        /*
3618                        * a step no FPGA
3619                        */
3620                        trans->cfg = &iwl22000_2ac_cfg_hr;
3621                }
3622        }
3623#endif
3624
3625        iwl_pcie_set_interrupt_capa(pdev, trans);
3626        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3627        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3628                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3629
3630        /* Initialize the wait queue for commands */
3631        init_waitqueue_head(&trans_pcie->wait_command_queue);
3632
3633        init_waitqueue_head(&trans_pcie->d0i3_waitq);
3634
3635        if (trans_pcie->msix_enabled) {
3636                ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3637                if (ret)
3638                        goto out_no_pci;
3639         } else {
3640                ret = iwl_pcie_alloc_ict(trans);
3641                if (ret)
3642                        goto out_no_pci;
3643
3644                ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3645                                                iwl_pcie_isr,
3646                                                iwl_pcie_irq_handler,
3647                                                IRQF_SHARED, DRV_NAME, trans);
3648                if (ret) {
3649                        IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3650                        goto out_free_ict;
3651                }
3652                trans_pcie->inta_mask = CSR_INI_SET_MASK;
3653         }
3654
3655        trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3656                                                   WQ_HIGHPRI | WQ_UNBOUND, 1);
3657        INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3658
3659#ifdef CONFIG_IWLWIFI_PCIE_RTPM
3660        trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
3661#else
3662        trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
3663#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
3664
3665#ifdef CONFIG_IWLWIFI_DEBUGFS
3666        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3667        mutex_init(&trans_pcie->fw_mon_data.mutex);
3668#endif
3669
3670        return trans;
3671
3672out_free_ict:
3673        iwl_pcie_free_ict(trans);
3674out_no_pci:
3675        free_percpu(trans_pcie->tso_hdr_page);
3676        iwl_trans_free(trans);
3677        return ERR_PTR(ret);
3678}
3679
3680void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3681{
3682        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3683        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3684        u32 inta_addr, sw_err_bit;
3685
3686        if (trans_pcie->msix_enabled) {
3687                inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3688                sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3689        } else {
3690                inta_addr = CSR_INT;
3691                sw_err_bit = CSR_INT_BIT_SW_ERR;
3692        }
3693
3694        iwl_disable_interrupts(trans);
3695        iwl_force_nmi(trans);
3696        while (time_after(timeout, jiffies)) {
3697                u32 inta_hw = iwl_read32(trans, inta_addr);
3698
3699                /* Error detected by uCode */
3700                if (inta_hw & sw_err_bit) {
3701                        /* Clear causes register */
3702                        iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
3703                        break;
3704                }
3705
3706                mdelay(1);
3707        }
3708        iwl_enable_interrupts(trans);
3709        iwl_trans_fw_error(trans);
3710}
3711