linux/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 * Copyright(c) 2018 - 2019 Intel Corporation
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of version 2 of the GNU General Public License as
  15 * published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * The full GNU General Public License is included in this distribution
  23 * in the file called COPYING.
  24 *
  25 * Contact Information:
  26 *  Intel Linux Wireless <linuxwifi@intel.com>
  27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28 *
  29 * BSD LICENSE
  30 *
  31 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
  32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  34 * Copyright(c) 2018 - 2019 Intel Corporation
  35 * All rights reserved.
  36 *
  37 * Redistribution and use in source and binary forms, with or without
  38 * modification, are permitted provided that the following conditions
  39 * are met:
  40 *
  41 *  * Redistributions of source code must retain the above copyright
  42 *    notice, this list of conditions and the following disclaimer.
  43 *  * Redistributions in binary form must reproduce the above copyright
  44 *    notice, this list of conditions and the following disclaimer in
  45 *    the documentation and/or other materials provided with the
  46 *    distribution.
  47 *  * Neither the name Intel Corporation nor the names of its
  48 *    contributors may be used to endorse or promote products derived
  49 *    from this software without specific prior written permission.
  50 *
  51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  62 *
  63 *****************************************************************************/
  64#include <linux/pci.h>
  65#include <linux/pci-aspm.h>
  66#include <linux/interrupt.h>
  67#include <linux/debugfs.h>
  68#include <linux/sched.h>
  69#include <linux/bitops.h>
  70#include <linux/gfp.h>
  71#include <linux/vmalloc.h>
  72#include <linux/pm_runtime.h>
  73#include <linux/module.h>
  74#include <linux/wait.h>
  75
  76#include "iwl-drv.h"
  77#include "iwl-trans.h"
  78#include "iwl-csr.h"
  79#include "iwl-prph.h"
  80#include "iwl-scd.h"
  81#include "iwl-agn-hw.h"
  82#include "fw/error-dump.h"
  83#include "fw/dbg.h"
  84#include "internal.h"
  85#include "iwl-fh.h"
  86
  87/* extended range in FW SRAM */
  88#define IWL_FW_MEM_EXTENDED_START       0x40000
  89#define IWL_FW_MEM_EXTENDED_END         0x57FFF
  90
  91void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
  92{
  93#define PCI_DUMP_SIZE           352
  94#define PCI_MEM_DUMP_SIZE       64
  95#define PCI_PARENT_DUMP_SIZE    524
  96#define PREFIX_LEN              32
  97        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  98        struct pci_dev *pdev = trans_pcie->pci_dev;
  99        u32 i, pos, alloc_size, *ptr, *buf;
 100        char *prefix;
 101
 102        if (trans_pcie->pcie_dbg_dumped_once)
 103                return;
 104
 105        /* Should be a multiple of 4 */
 106        BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
 107        BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
 108        BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
 109
 110        /* Alloc a max size buffer */
 111        alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
 112        alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
 113        alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
 114        alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
 115
 116        buf = kmalloc(alloc_size, GFP_ATOMIC);
 117        if (!buf)
 118                return;
 119        prefix = (char *)buf + alloc_size - PREFIX_LEN;
 120
 121        IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
 122
 123        /* Print wifi device registers */
 124        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 125        IWL_ERR(trans, "iwlwifi device config registers:\n");
 126        for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
 127                if (pci_read_config_dword(pdev, i, ptr))
 128                        goto err_read;
 129        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 130
 131        IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
 132        for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
 133                *ptr = iwl_read32(trans, i);
 134        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 135
 136        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 137        if (pos) {
 138                IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
 139                for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
 140                        if (pci_read_config_dword(pdev, pos + i, ptr))
 141                                goto err_read;
 142                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
 143                               32, 4, buf, i, 0);
 144        }
 145
 146        /* Print parent device registers next */
 147        if (!pdev->bus->self)
 148                goto out;
 149
 150        pdev = pdev->bus->self;
 151        sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 152
 153        IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
 154                pci_name(pdev));
 155        for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
 156                if (pci_read_config_dword(pdev, i, ptr))
 157                        goto err_read;
 158        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 159
 160        /* Print root port AER registers */
 161        pos = 0;
 162        pdev = pcie_find_root_port(pdev);
 163        if (pdev)
 164                pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 165        if (pos) {
 166                IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
 167                        pci_name(pdev));
 168                sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
 169                for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
 170                        if (pci_read_config_dword(pdev, pos + i, ptr))
 171                                goto err_read;
 172                print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
 173                               4, buf, i, 0);
 174        }
 175        goto out;
 176
 177err_read:
 178        print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
 179        IWL_ERR(trans, "Read failed at 0x%X\n", i);
 180out:
 181        trans_pcie->pcie_dbg_dumped_once = 1;
 182        kfree(buf);
 183}
 184
 185static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
 186{
 187        /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
 188        iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
 189                    BIT(trans->cfg->csr->flag_sw_reset));
 190        usleep_range(5000, 6000);
 191}
 192
 193static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 194{
 195        int i;
 196
 197        for (i = 0; i < trans->dbg.num_blocks; i++) {
 198                dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
 199                                  trans->dbg.fw_mon[i].block,
 200                                  trans->dbg.fw_mon[i].physical);
 201                trans->dbg.fw_mon[i].block = NULL;
 202                trans->dbg.fw_mon[i].physical = 0;
 203                trans->dbg.fw_mon[i].size = 0;
 204                trans->dbg.num_blocks--;
 205        }
 206}
 207
 208static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
 209                                            u8 max_power, u8 min_power)
 210{
 211        void *cpu_addr = NULL;
 212        dma_addr_t phys = 0;
 213        u32 size = 0;
 214        u8 power;
 215
 216        for (power = max_power; power >= min_power; power--) {
 217                size = BIT(power);
 218                cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
 219                                              GFP_KERNEL | __GFP_NOWARN |
 220                                              __GFP_ZERO | __GFP_COMP);
 221                if (!cpu_addr)
 222                        continue;
 223
 224                IWL_INFO(trans,
 225                         "Allocated 0x%08x bytes for firmware monitor.\n",
 226                         size);
 227                break;
 228        }
 229
 230        if (WARN_ON_ONCE(!cpu_addr))
 231                return;
 232
 233        if (power != max_power)
 234                IWL_ERR(trans,
 235                        "Sorry - debug buffer is only %luK while you requested %luK\n",
 236                        (unsigned long)BIT(power - 10),
 237                        (unsigned long)BIT(max_power - 10));
 238
 239        trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
 240        trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
 241        trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
 242        trans->dbg.num_blocks++;
 243}
 244
 245void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 246{
 247        if (!max_power) {
 248                /* default max_power is maximum */
 249                max_power = 26;
 250        } else {
 251                max_power += 11;
 252        }
 253
 254        if (WARN(max_power > 26,
 255                 "External buffer size for monitor is too big %d, check the FW TLV\n",
 256                 max_power))
 257                return;
 258
 259        /*
 260         * This function allocats the default fw monitor.
 261         * The optional additional ones will be allocated in runtime
 262         */
 263        if (trans->dbg.num_blocks)
 264                return;
 265
 266        iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
 267}
 268
 269static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
 270{
 271        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 272                    ((reg & 0x0000ffff) | (2 << 28)));
 273        return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
 274}
 275
 276static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 277{
 278        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
 279        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
 280                    ((reg & 0x0000ffff) | (3 << 28)));
 281}
 282
 283static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 284{
 285        if (trans->cfg->apmg_not_supported)
 286                return;
 287
 288        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
 289                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 290                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
 291                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 292        else
 293                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
 294                                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
 295                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
 296}
 297
 298/* PCI registers */
 299#define PCI_CFG_RETRY_TIMEOUT   0x041
 300
 301void iwl_pcie_apm_config(struct iwl_trans *trans)
 302{
 303        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 304        u16 lctl;
 305        u16 cap;
 306
 307        /*
 308         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
 309         * Check if BIOS (or OS) enabled L1-ASPM on this device.
 310         * If so (likely), disable L0S, so device moves directly L0->L1;
 311         *    costs negligible amount of power savings.
 312         * If not (unlikely), enable L0S, so there is at least some
 313         *    power savings, even without L1.
 314         */
 315        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
 316        if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
 317                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 318        else
 319                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
 320        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 321
 322        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
 323        trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
 324        IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
 325                        (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
 326                        trans->ltr_enabled ? "En" : "Dis");
 327}
 328
 329/*
 330 * Start up NIC's basic functionality after it has been reset
 331 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
 332 * NOTE:  This does not load uCode nor start the embedded processor
 333 */
 334static int iwl_pcie_apm_init(struct iwl_trans *trans)
 335{
 336        int ret;
 337
 338        IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
 339
 340        /*
 341         * Use "set_bit" below rather than "write", to preserve any hardware
 342         * bits already set by default after reset.
 343         */
 344
 345        /* Disable L0S exit timer (platform NMI Work/Around) */
 346        if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
 347                iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 348                            CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
 349
 350        /*
 351         * Disable L0s without affecting L1;
 352         *  don't wait for ICH L0s (ICH bug W/A)
 353         */
 354        iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
 355                    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
 356
 357        /* Set FH wait threshold to maximum (HW error during stress W/A) */
 358        iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
 359
 360        /*
 361         * Enable HAP INTA (interrupt from management bus) to
 362         * wake device's PCI Express link L1a -> L0s
 363         */
 364        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 365                    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
 366
 367        iwl_pcie_apm_config(trans);
 368
 369        /* Configure analog phase-lock-loop before activating to D0A */
 370        if (trans->cfg->base_params->pll_cfg)
 371                iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
 372
 373        ret = iwl_finish_nic_init(trans);
 374        if (ret)
 375                return ret;
 376
 377        if (trans->cfg->host_interrupt_operation_mode) {
 378                /*
 379                 * This is a bit of an abuse - This is needed for 7260 / 3160
 380                 * only check host_interrupt_operation_mode even if this is
 381                 * not related to host_interrupt_operation_mode.
 382                 *
 383                 * Enable the oscillator to count wake up time for L1 exit. This
 384                 * consumes slightly more power (100uA) - but allows to be sure
 385                 * that we wake up from L1 on time.
 386                 *
 387                 * This looks weird: read twice the same register, discard the
 388                 * value, set a bit, and yet again, read that same register
 389                 * just to discard the value. But that's the way the hardware
 390                 * seems to like it.
 391                 */
 392                iwl_read_prph(trans, OSC_CLK);
 393                iwl_read_prph(trans, OSC_CLK);
 394                iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
 395                iwl_read_prph(trans, OSC_CLK);
 396                iwl_read_prph(trans, OSC_CLK);
 397        }
 398
 399        /*
 400         * Enable DMA clock and wait for it to stabilize.
 401         *
 402         * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
 403         * bits do not disable clocks.  This preserves any hardware
 404         * bits already set by default in "CLK_CTRL_REG" after reset.
 405         */
 406        if (!trans->cfg->apmg_not_supported) {
 407                iwl_write_prph(trans, APMG_CLK_EN_REG,
 408                               APMG_CLK_VAL_DMA_CLK_RQT);
 409                udelay(20);
 410
 411                /* Disable L1-Active */
 412                iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 413                                  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 414
 415                /* Clear the interrupt in APMG if the NIC is in RFKILL */
 416                iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
 417                               APMG_RTC_INT_STT_RFKILL);
 418        }
 419
 420        set_bit(STATUS_DEVICE_ENABLED, &trans->status);
 421
 422        return 0;
 423}
 424
 425/*
 426 * Enable LP XTAL to avoid HW bug where device may consume much power if
 427 * FW is not loaded after device reset. LP XTAL is disabled by default
 428 * after device HW reset. Do it only if XTAL is fed by internal source.
 429 * Configure device's "persistence" mode to avoid resetting XTAL again when
 430 * SHRD_HW_RST occurs in S3.
 431 */
 432static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
 433{
 434        int ret;
 435        u32 apmg_gp1_reg;
 436        u32 apmg_xtal_cfg_reg;
 437        u32 dl_cfg_reg;
 438
 439        /* Force XTAL ON */
 440        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
 441                                 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 442
 443        iwl_trans_pcie_sw_reset(trans);
 444
 445        ret = iwl_finish_nic_init(trans);
 446        if (WARN_ON(ret)) {
 447                /* Release XTAL ON request */
 448                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 449                                           CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 450                return;
 451        }
 452
 453        /*
 454         * Clear "disable persistence" to avoid LP XTAL resetting when
 455         * SHRD_HW_RST is applied in S3.
 456         */
 457        iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
 458                                    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
 459
 460        /*
 461         * Force APMG XTAL to be active to prevent its disabling by HW
 462         * caused by APMG idle state.
 463         */
 464        apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
 465                                                    SHR_APMG_XTAL_CFG_REG);
 466        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 467                                 apmg_xtal_cfg_reg |
 468                                 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 469
 470        iwl_trans_pcie_sw_reset(trans);
 471
 472        /* Enable LP XTAL by indirect access through CSR */
 473        apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
 474        iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
 475                                 SHR_APMG_GP1_WF_XTAL_LP_EN |
 476                                 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
 477
 478        /* Clear delay line clock power up */
 479        dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
 480        iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
 481                                 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
 482
 483        /*
 484         * Enable persistence mode to avoid LP XTAL resetting when
 485         * SHRD_HW_RST is applied in S3.
 486         */
 487        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 488                    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
 489
 490        /*
 491         * Clear "initialization complete" bit to move adapter from
 492         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 493         */
 494        iwl_clear_bit(trans, CSR_GP_CNTRL,
 495                      BIT(trans->cfg->csr->flag_init_done));
 496
 497        /* Activates XTAL resources monitor */
 498        __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
 499                                 CSR_MONITOR_XTAL_RESOURCES);
 500
 501        /* Release XTAL ON request */
 502        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
 503                                   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
 504        udelay(10);
 505
 506        /* Release APMG XTAL */
 507        iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
 508                                 apmg_xtal_cfg_reg &
 509                                 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
 510}
 511
 512void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 513{
 514        int ret;
 515
 516        /* stop device's busmaster DMA activity */
 517        iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
 518                    BIT(trans->cfg->csr->flag_stop_master));
 519
 520        ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
 521                           BIT(trans->cfg->csr->flag_master_dis),
 522                           BIT(trans->cfg->csr->flag_master_dis), 100);
 523        if (ret < 0)
 524                IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
 525
 526        IWL_DEBUG_INFO(trans, "stop master\n");
 527}
 528
 529static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 530{
 531        IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
 532
 533        if (op_mode_leave) {
 534                if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
 535                        iwl_pcie_apm_init(trans);
 536
 537                /* inform ME that we are leaving */
 538                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
 539                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 540                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
 541                else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
 542                        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 543                                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 544                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 545                                    CSR_HW_IF_CONFIG_REG_PREPARE |
 546                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
 547                        mdelay(1);
 548                        iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 549                                      CSR_RESET_LINK_PWR_MGMT_DISABLED);
 550                }
 551                mdelay(5);
 552        }
 553
 554        clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
 555
 556        /* Stop device's DMA activity */
 557        iwl_pcie_apm_stop_master(trans);
 558
 559        if (trans->cfg->lp_xtal_workaround) {
 560                iwl_pcie_apm_lp_xtal_enable(trans);
 561                return;
 562        }
 563
 564        iwl_trans_pcie_sw_reset(trans);
 565
 566        /*
 567         * Clear "initialization complete" bit to move adapter from
 568         * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
 569         */
 570        iwl_clear_bit(trans, CSR_GP_CNTRL,
 571                      BIT(trans->cfg->csr->flag_init_done));
 572}
 573
 574static int iwl_pcie_nic_init(struct iwl_trans *trans)
 575{
 576        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 577        int ret;
 578
 579        /* nic_init */
 580        spin_lock(&trans_pcie->irq_lock);
 581        ret = iwl_pcie_apm_init(trans);
 582        spin_unlock(&trans_pcie->irq_lock);
 583
 584        if (ret)
 585                return ret;
 586
 587        iwl_pcie_set_pwr(trans, false);
 588
 589        iwl_op_mode_nic_config(trans->op_mode);
 590
 591        /* Allocate the RX queue, or reset if it is already allocated */
 592        iwl_pcie_rx_init(trans);
 593
 594        /* Allocate or reset and init all Tx and Command queues */
 595        if (iwl_pcie_tx_init(trans))
 596                return -ENOMEM;
 597
 598        if (trans->cfg->base_params->shadow_reg_enable) {
 599                /* enable shadow regs in HW */
 600                iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
 601                IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
 602        }
 603
 604        return 0;
 605}
 606
 607#define HW_READY_TIMEOUT (50)
 608
 609/* Note: returns poll_bit return value, which is >= 0 if success */
 610static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
 611{
 612        int ret;
 613
 614        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 615                    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
 616
 617        /* See if we got it */
 618        ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
 619                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 620                           CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
 621                           HW_READY_TIMEOUT);
 622
 623        if (ret >= 0)
 624                iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
 625
 626        IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
 627        return ret;
 628}
 629
 630/* Note: returns standard 0/-ERROR code */
 631int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 632{
 633        int ret;
 634        int t = 0;
 635        int iter;
 636
 637        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 638
 639        ret = iwl_pcie_set_hw_ready(trans);
 640        /* If the card is ready, exit 0 */
 641        if (ret >= 0)
 642                return 0;
 643
 644        iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 645                    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 646        usleep_range(1000, 2000);
 647
 648        for (iter = 0; iter < 10; iter++) {
 649                /* If HW is not ready, prepare the conditions to check again */
 650                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 651                            CSR_HW_IF_CONFIG_REG_PREPARE);
 652
 653                do {
 654                        ret = iwl_pcie_set_hw_ready(trans);
 655                        if (ret >= 0)
 656                                return 0;
 657
 658                        usleep_range(200, 1000);
 659                        t += 200;
 660                } while (t < 150000);
 661                msleep(25);
 662        }
 663
 664        IWL_ERR(trans, "Couldn't prepare the card\n");
 665
 666        return ret;
 667}
 668
 669/*
 670 * ucode
 671 */
 672static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
 673                                            u32 dst_addr, dma_addr_t phy_addr,
 674                                            u32 byte_cnt)
 675{
 676        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 677                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
 678
 679        iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
 680                    dst_addr);
 681
 682        iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
 683                    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 684
 685        iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
 686                    (iwl_get_dma_hi_addr(phy_addr)
 687                        << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
 688
 689        iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
 690                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
 691                    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
 692                    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
 693
 694        iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
 695                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
 696                    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
 697                    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 698}
 699
 700static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
 701                                        u32 dst_addr, dma_addr_t phy_addr,
 702                                        u32 byte_cnt)
 703{
 704        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 705        unsigned long flags;
 706        int ret;
 707
 708        trans_pcie->ucode_write_complete = false;
 709
 710        if (!iwl_trans_grab_nic_access(trans, &flags))
 711                return -EIO;
 712
 713        iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
 714                                        byte_cnt);
 715        iwl_trans_release_nic_access(trans, &flags);
 716
 717        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
 718                                 trans_pcie->ucode_write_complete, 5 * HZ);
 719        if (!ret) {
 720                IWL_ERR(trans, "Failed to load firmware chunk!\n");
 721                iwl_trans_pcie_dump_regs(trans);
 722                return -ETIMEDOUT;
 723        }
 724
 725        return 0;
 726}
 727
 728static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 729                            const struct fw_desc *section)
 730{
 731        u8 *v_addr;
 732        dma_addr_t p_addr;
 733        u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
 734        int ret = 0;
 735
 736        IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
 737                     section_num);
 738
 739        v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
 740                                    GFP_KERNEL | __GFP_NOWARN);
 741        if (!v_addr) {
 742                IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
 743                chunk_sz = PAGE_SIZE;
 744                v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
 745                                            &p_addr, GFP_KERNEL);
 746                if (!v_addr)
 747                        return -ENOMEM;
 748        }
 749
 750        for (offset = 0; offset < section->len; offset += chunk_sz) {
 751                u32 copy_size, dst_addr;
 752                bool extended_addr = false;
 753
 754                copy_size = min_t(u32, chunk_sz, section->len - offset);
 755                dst_addr = section->offset + offset;
 756
 757                if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
 758                    dst_addr <= IWL_FW_MEM_EXTENDED_END)
 759                        extended_addr = true;
 760
 761                if (extended_addr)
 762                        iwl_set_bits_prph(trans, LMPM_CHICK,
 763                                          LMPM_CHICK_EXTENDED_ADDR_SPACE);
 764
 765                memcpy(v_addr, (u8 *)section->data + offset, copy_size);
 766                ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
 767                                                   copy_size);
 768
 769                if (extended_addr)
 770                        iwl_clear_bits_prph(trans, LMPM_CHICK,
 771                                            LMPM_CHICK_EXTENDED_ADDR_SPACE);
 772
 773                if (ret) {
 774                        IWL_ERR(trans,
 775                                "Could not load the [%d] uCode section\n",
 776                                section_num);
 777                        break;
 778                }
 779        }
 780
 781        dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
 782        return ret;
 783}
 784
 785static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 786                                           const struct fw_img *image,
 787                                           int cpu,
 788                                           int *first_ucode_section)
 789{
 790        int shift_param;
 791        int i, ret = 0, sec_num = 0x1;
 792        u32 val, last_read_idx = 0;
 793
 794        if (cpu == 1) {
 795                shift_param = 0;
 796                *first_ucode_section = 0;
 797        } else {
 798                shift_param = 16;
 799                (*first_ucode_section)++;
 800        }
 801
 802        for (i = *first_ucode_section; i < image->num_sec; i++) {
 803                last_read_idx = i;
 804
 805                /*
 806                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 807                 * CPU1 to CPU2.
 808                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 809                 * CPU2 non paged to CPU2 paging sec.
 810                 */
 811                if (!image->sec[i].data ||
 812                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 813                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 814                        IWL_DEBUG_FW(trans,
 815                                     "Break since Data not valid or Empty section, sec = %d\n",
 816                                     i);
 817                        break;
 818                }
 819
 820                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 821                if (ret)
 822                        return ret;
 823
 824                /* Notify ucode of loaded section number and status */
 825                val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
 826                val = val | (sec_num << shift_param);
 827                iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
 828
 829                sec_num = (sec_num << 1) | 0x1;
 830        }
 831
 832        *first_ucode_section = last_read_idx;
 833
 834        iwl_enable_interrupts(trans);
 835
 836        if (trans->cfg->use_tfh) {
 837                if (cpu == 1)
 838                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 839                                       0xFFFF);
 840                else
 841                        iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
 842                                       0xFFFFFFFF);
 843        } else {
 844                if (cpu == 1)
 845                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 846                                           0xFFFF);
 847                else
 848                        iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
 849                                           0xFFFFFFFF);
 850        }
 851
 852        return 0;
 853}
 854
 855static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 856                                      const struct fw_img *image,
 857                                      int cpu,
 858                                      int *first_ucode_section)
 859{
 860        int i, ret = 0;
 861        u32 last_read_idx = 0;
 862
 863        if (cpu == 1)
 864                *first_ucode_section = 0;
 865        else
 866                (*first_ucode_section)++;
 867
 868        for (i = *first_ucode_section; i < image->num_sec; i++) {
 869                last_read_idx = i;
 870
 871                /*
 872                 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
 873                 * CPU1 to CPU2.
 874                 * PAGING_SEPARATOR_SECTION delimiter - separate between
 875                 * CPU2 non paged to CPU2 paging sec.
 876                 */
 877                if (!image->sec[i].data ||
 878                    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
 879                    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 880                        IWL_DEBUG_FW(trans,
 881                                     "Break since Data not valid or Empty section, sec = %d\n",
 882                                     i);
 883                        break;
 884                }
 885
 886                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
 887                if (ret)
 888                        return ret;
 889        }
 890
 891        *first_ucode_section = last_read_idx;
 892
 893        return 0;
 894}
 895
 896void iwl_pcie_apply_destination(struct iwl_trans *trans)
 897{
 898        const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
 899        int i;
 900
 901        if (trans->dbg.ini_valid) {
 902                if (!trans->dbg.num_blocks)
 903                        return;
 904
 905                IWL_DEBUG_FW(trans,
 906                             "WRT: applying DRAM buffer[0] destination\n");
 907                iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
 908                                    trans->dbg.fw_mon[0].physical >>
 909                                    MON_BUFF_SHIFT_VER2);
 910                iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
 911                                    (trans->dbg.fw_mon[0].physical +
 912                                     trans->dbg.fw_mon[0].size - 256) >>
 913                                    MON_BUFF_SHIFT_VER2);
 914                return;
 915        }
 916
 917        IWL_INFO(trans, "Applying debug destination %s\n",
 918                 get_fw_dbg_mode_string(dest->monitor_mode));
 919
 920        if (dest->monitor_mode == EXTERNAL_MODE)
 921                iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
 922        else
 923                IWL_WARN(trans, "PCI should have external buffer debug\n");
 924
 925        for (i = 0; i < trans->dbg.n_dest_reg; i++) {
 926                u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
 927                u32 val = le32_to_cpu(dest->reg_ops[i].val);
 928
 929                switch (dest->reg_ops[i].op) {
 930                case CSR_ASSIGN:
 931                        iwl_write32(trans, addr, val);
 932                        break;
 933                case CSR_SETBIT:
 934                        iwl_set_bit(trans, addr, BIT(val));
 935                        break;
 936                case CSR_CLEARBIT:
 937                        iwl_clear_bit(trans, addr, BIT(val));
 938                        break;
 939                case PRPH_ASSIGN:
 940                        iwl_write_prph(trans, addr, val);
 941                        break;
 942                case PRPH_SETBIT:
 943                        iwl_set_bits_prph(trans, addr, BIT(val));
 944                        break;
 945                case PRPH_CLEARBIT:
 946                        iwl_clear_bits_prph(trans, addr, BIT(val));
 947                        break;
 948                case PRPH_BLOCKBIT:
 949                        if (iwl_read_prph(trans, addr) & BIT(val)) {
 950                                IWL_ERR(trans,
 951                                        "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
 952                                        val, addr);
 953                                goto monitor;
 954                        }
 955                        break;
 956                default:
 957                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
 958                                dest->reg_ops[i].op);
 959                        break;
 960                }
 961        }
 962
 963monitor:
 964        if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
 965                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
 966                               trans->dbg.fw_mon[0].physical >>
 967                               dest->base_shift);
 968                if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
 969                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 970                                       (trans->dbg.fw_mon[0].physical +
 971                                        trans->dbg.fw_mon[0].size - 256) >>
 972                                                dest->end_shift);
 973                else
 974                        iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
 975                                       (trans->dbg.fw_mon[0].physical +
 976                                        trans->dbg.fw_mon[0].size) >>
 977                                                dest->end_shift);
 978        }
 979}
 980
 981static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
 982                                const struct fw_img *image)
 983{
 984        int ret = 0;
 985        int first_ucode_section;
 986
 987        IWL_DEBUG_FW(trans, "working with %s CPU\n",
 988                     image->is_dual_cpus ? "Dual" : "Single");
 989
 990        /* load to FW the binary non secured sections of CPU1 */
 991        ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
 992        if (ret)
 993                return ret;
 994
 995        if (image->is_dual_cpus) {
 996                /* set CPU2 header address */
 997                iwl_write_prph(trans,
 998                               LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
 999                               LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1000
1001                /* load to FW the binary sections of CPU2 */
1002                ret = iwl_pcie_load_cpu_sections(trans, image, 2,
1003                                                 &first_ucode_section);
1004                if (ret)
1005                        return ret;
1006        }
1007
1008        /* supported for 7000 only for the moment */
1009        if (iwlwifi_mod_params.fw_monitor &&
1010            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1011                iwl_pcie_alloc_fw_monitor(trans, 0);
1012
1013                if (trans->dbg.fw_mon[0].size) {
1014                        iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
1015                                       trans->dbg.fw_mon[0].physical >> 4);
1016                        iwl_write_prph(trans, MON_BUFF_END_ADDR,
1017                                       (trans->dbg.fw_mon[0].physical +
1018                                        trans->dbg.fw_mon[0].size) >> 4);
1019                }
1020        } else if (iwl_pcie_dbg_on(trans)) {
1021                iwl_pcie_apply_destination(trans);
1022        }
1023
1024        iwl_enable_interrupts(trans);
1025
1026        /* release CPU reset */
1027        iwl_write32(trans, CSR_RESET, 0);
1028
1029        return 0;
1030}
1031
1032static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1033                                          const struct fw_img *image)
1034{
1035        int ret = 0;
1036        int first_ucode_section;
1037
1038        IWL_DEBUG_FW(trans, "working with %s CPU\n",
1039                     image->is_dual_cpus ? "Dual" : "Single");
1040
1041        if (iwl_pcie_dbg_on(trans))
1042                iwl_pcie_apply_destination(trans);
1043
1044        IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1045                        iwl_read_prph(trans, WFPM_GP2));
1046
1047        /*
1048         * Set default value. On resume reading the values that were
1049         * zeored can provide debug data on the resume flow.
1050         * This is for debugging only and has no functional impact.
1051         */
1052        iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1053
1054        /* configure the ucode to be ready to get the secured image */
1055        /* release CPU reset */
1056        iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1057
1058        /* load to FW the binary Secured sections of CPU1 */
1059        ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1060                                              &first_ucode_section);
1061        if (ret)
1062                return ret;
1063
1064        /* load to FW the binary sections of CPU2 */
1065        return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1066                                               &first_ucode_section);
1067}
1068
1069bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1070{
1071        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1072        bool hw_rfkill = iwl_is_rfkill_set(trans);
1073        bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1074        bool report;
1075
1076        if (hw_rfkill) {
1077                set_bit(STATUS_RFKILL_HW, &trans->status);
1078                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1079        } else {
1080                clear_bit(STATUS_RFKILL_HW, &trans->status);
1081                if (trans_pcie->opmode_down)
1082                        clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1083        }
1084
1085        report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1086
1087        if (prev != report)
1088                iwl_trans_pcie_rf_kill(trans, report);
1089
1090        return hw_rfkill;
1091}
1092
1093struct iwl_causes_list {
1094        u32 cause_num;
1095        u32 mask_reg;
1096        u8 addr;
1097};
1098
1099static struct iwl_causes_list causes_list[] = {
1100        {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1101        {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1102        {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1103        {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1104        {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1105        {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
1106        {MSIX_HW_INT_CAUSES_REG_IML,            CSR_MSIX_HW_INT_MASK_AD, 0x12},
1107        {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1108        {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1109        {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1110        {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
1111        {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1112        {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1113        {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1114        {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1115};
1116
1117static struct iwl_causes_list causes_list_v2[] = {
1118        {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1119        {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1120        {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1121        {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1122        {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1123        {MSIX_HW_INT_CAUSES_REG_IPC,            CSR_MSIX_HW_INT_MASK_AD, 0x11},
1124        {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2,      CSR_MSIX_HW_INT_MASK_AD, 0x15},
1125        {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1126        {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1127        {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1128        {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1129        {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1130        {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1131        {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1132};
1133
1134static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1135{
1136        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1137        int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1138        int i, arr_size =
1139                (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
1140                ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
1141
1142        /*
1143         * Access all non RX causes and map them to the default irq.
1144         * In case we are missing at least one interrupt vector,
1145         * the first interrupt vector will serve non-RX and FBQ causes.
1146         */
1147        for (i = 0; i < arr_size; i++) {
1148                struct iwl_causes_list *causes =
1149                        (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
1150                        causes_list : causes_list_v2;
1151
1152                iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1153                iwl_clear_bit(trans, causes[i].mask_reg,
1154                              causes[i].cause_num);
1155        }
1156}
1157
1158static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1159{
1160        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1161        u32 offset =
1162                trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1163        u32 val, idx;
1164
1165        /*
1166         * The first RX queue - fallback queue, which is designated for
1167         * management frame, command responses etc, is always mapped to the
1168         * first interrupt vector. The other RX queues are mapped to
1169         * the other (N - 2) interrupt vectors.
1170         */
1171        val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1172        for (idx = 1; idx < trans->num_rx_queues; idx++) {
1173                iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1174                           MSIX_FH_INT_CAUSES_Q(idx - offset));
1175                val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1176        }
1177        iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1178
1179        val = MSIX_FH_INT_CAUSES_Q(0);
1180        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1181                val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1182        iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1183
1184        if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1185                iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1186}
1187
1188void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1189{
1190        struct iwl_trans *trans = trans_pcie->trans;
1191
1192        if (!trans_pcie->msix_enabled) {
1193                if (trans->cfg->mq_rx_supported &&
1194                    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1195                        iwl_write_umac_prph(trans, UREG_CHICK,
1196                                            UREG_CHICK_MSI_ENABLE);
1197                return;
1198        }
1199        /*
1200         * The IVAR table needs to be configured again after reset,
1201         * but if the device is disabled, we can't write to
1202         * prph.
1203         */
1204        if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1205                iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1206
1207        /*
1208         * Each cause from the causes list above and the RX causes is
1209         * represented as a byte in the IVAR table. The first nibble
1210         * represents the bound interrupt vector of the cause, the second
1211         * represents no auto clear for this cause. This will be set if its
1212         * interrupt vector is bound to serve other causes.
1213         */
1214        iwl_pcie_map_rx_causes(trans);
1215
1216        iwl_pcie_map_non_rx_causes(trans);
1217}
1218
1219static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1220{
1221        struct iwl_trans *trans = trans_pcie->trans;
1222
1223        iwl_pcie_conf_msix_hw(trans_pcie);
1224
1225        if (!trans_pcie->msix_enabled)
1226                return;
1227
1228        trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1229        trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1230        trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1231        trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1232}
1233
1234static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1235{
1236        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1237
1238        lockdep_assert_held(&trans_pcie->mutex);
1239
1240        if (trans_pcie->is_down)
1241                return;
1242
1243        trans_pcie->is_down = true;
1244
1245        /* Stop dbgc before stopping device */
1246        iwl_fw_dbg_stop_recording(trans, NULL);
1247
1248        /* tell the device to stop sending interrupts */
1249        iwl_disable_interrupts(trans);
1250
1251        /* device going down, Stop using ICT table */
1252        iwl_pcie_disable_ict(trans);
1253
1254        /*
1255         * If a HW restart happens during firmware loading,
1256         * then the firmware loading might call this function
1257         * and later it might be called again due to the
1258         * restart. So don't process again if the device is
1259         * already dead.
1260         */
1261        if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1262                IWL_DEBUG_INFO(trans,
1263                               "DEVICE_ENABLED bit was set and is now cleared\n");
1264                iwl_pcie_tx_stop(trans);
1265                iwl_pcie_rx_stop(trans);
1266
1267                /* Power-down device's busmaster DMA clocks */
1268                if (!trans->cfg->apmg_not_supported) {
1269                        iwl_write_prph(trans, APMG_CLK_DIS_REG,
1270                                       APMG_CLK_VAL_DMA_CLK_RQT);
1271                        udelay(5);
1272                }
1273        }
1274
1275        /* Make sure (redundant) we've released our request to stay awake */
1276        iwl_clear_bit(trans, CSR_GP_CNTRL,
1277                      BIT(trans->cfg->csr->flag_mac_access_req));
1278
1279        /* Stop the device, and put it in low power state */
1280        iwl_pcie_apm_stop(trans, false);
1281
1282        iwl_trans_pcie_sw_reset(trans);
1283
1284        /*
1285         * Upon stop, the IVAR table gets erased, so msi-x won't
1286         * work. This causes a bug in RF-KILL flows, since the interrupt
1287         * that enables radio won't fire on the correct irq, and the
1288         * driver won't be able to handle the interrupt.
1289         * Configure the IVAR table again after reset.
1290         */
1291        iwl_pcie_conf_msix_hw(trans_pcie);
1292
1293        /*
1294         * Upon stop, the APM issues an interrupt if HW RF kill is set.
1295         * This is a bug in certain verions of the hardware.
1296         * Certain devices also keep sending HW RF kill interrupt all
1297         * the time, unless the interrupt is ACKed even if the interrupt
1298         * should be masked. Re-ACK all the interrupts here.
1299         */
1300        iwl_disable_interrupts(trans);
1301
1302        /* clear all status bits */
1303        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1304        clear_bit(STATUS_INT_ENABLED, &trans->status);
1305        clear_bit(STATUS_TPOWER_PMI, &trans->status);
1306
1307        /*
1308         * Even if we stop the HW, we still want the RF kill
1309         * interrupt
1310         */
1311        iwl_enable_rfkill_int(trans);
1312
1313        /* re-take ownership to prevent other users from stealing the device */
1314        iwl_pcie_prepare_card_hw(trans);
1315}
1316
1317void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1318{
1319        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1320
1321        if (trans_pcie->msix_enabled) {
1322                int i;
1323
1324                for (i = 0; i < trans_pcie->alloc_vecs; i++)
1325                        synchronize_irq(trans_pcie->msix_entries[i].vector);
1326        } else {
1327                synchronize_irq(trans_pcie->pci_dev->irq);
1328        }
1329}
1330
1331static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1332                                   const struct fw_img *fw, bool run_in_rfkill)
1333{
1334        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1335        bool hw_rfkill;
1336        int ret;
1337
1338        /* This may fail if AMT took ownership of the device */
1339        if (iwl_pcie_prepare_card_hw(trans)) {
1340                IWL_WARN(trans, "Exit HW not ready\n");
1341                ret = -EIO;
1342                goto out;
1343        }
1344
1345        iwl_enable_rfkill_int(trans);
1346
1347        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1348
1349        /*
1350         * We enabled the RF-Kill interrupt and the handler may very
1351         * well be running. Disable the interrupts to make sure no other
1352         * interrupt can be fired.
1353         */
1354        iwl_disable_interrupts(trans);
1355
1356        /* Make sure it finished running */
1357        iwl_pcie_synchronize_irqs(trans);
1358
1359        mutex_lock(&trans_pcie->mutex);
1360
1361        /* If platform's RF_KILL switch is NOT set to KILL */
1362        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1363        if (hw_rfkill && !run_in_rfkill) {
1364                ret = -ERFKILL;
1365                goto out;
1366        }
1367
1368        /* Someone called stop_device, don't try to start_fw */
1369        if (trans_pcie->is_down) {
1370                IWL_WARN(trans,
1371                         "Can't start_fw since the HW hasn't been started\n");
1372                ret = -EIO;
1373                goto out;
1374        }
1375
1376        /* make sure rfkill handshake bits are cleared */
1377        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1378        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1379                    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1380
1381        /* clear (again), then enable host interrupts */
1382        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1383
1384        ret = iwl_pcie_nic_init(trans);
1385        if (ret) {
1386                IWL_ERR(trans, "Unable to init nic\n");
1387                goto out;
1388        }
1389
1390        /*
1391         * Now, we load the firmware and don't want to be interrupted, even
1392         * by the RF-Kill interrupt (hence mask all the interrupt besides the
1393         * FH_TX interrupt which is needed to load the firmware). If the
1394         * RF-Kill switch is toggled, we will find out after having loaded
1395         * the firmware and return the proper value to the caller.
1396         */
1397        iwl_enable_fw_load_int(trans);
1398
1399        /* really make sure rfkill handshake bits are cleared */
1400        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1401        iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1402
1403        /* Load the given image to the HW */
1404        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1405                ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1406        else
1407                ret = iwl_pcie_load_given_ucode(trans, fw);
1408
1409        /* re-check RF-Kill state since we may have missed the interrupt */
1410        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1411        if (hw_rfkill && !run_in_rfkill)
1412                ret = -ERFKILL;
1413
1414out:
1415        mutex_unlock(&trans_pcie->mutex);
1416        return ret;
1417}
1418
1419static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1420{
1421        iwl_pcie_reset_ict(trans);
1422        iwl_pcie_tx_start(trans, scd_addr);
1423}
1424
1425void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1426                                       bool was_in_rfkill)
1427{
1428        bool hw_rfkill;
1429
1430        /*
1431         * Check again since the RF kill state may have changed while
1432         * all the interrupts were disabled, in this case we couldn't
1433         * receive the RF kill interrupt and update the state in the
1434         * op_mode.
1435         * Don't call the op_mode if the rkfill state hasn't changed.
1436         * This allows the op_mode to call stop_device from the rfkill
1437         * notification without endless recursion. Under very rare
1438         * circumstances, we might have a small recursion if the rfkill
1439         * state changed exactly now while we were called from stop_device.
1440         * This is very unlikely but can happen and is supported.
1441         */
1442        hw_rfkill = iwl_is_rfkill_set(trans);
1443        if (hw_rfkill) {
1444                set_bit(STATUS_RFKILL_HW, &trans->status);
1445                set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1446        } else {
1447                clear_bit(STATUS_RFKILL_HW, &trans->status);
1448                clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1449        }
1450        if (hw_rfkill != was_in_rfkill)
1451                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1452}
1453
1454static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1455{
1456        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1457        bool was_in_rfkill;
1458
1459        mutex_lock(&trans_pcie->mutex);
1460        trans_pcie->opmode_down = true;
1461        was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1462        _iwl_trans_pcie_stop_device(trans, low_power);
1463        iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1464        mutex_unlock(&trans_pcie->mutex);
1465}
1466
1467void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1468{
1469        struct iwl_trans_pcie __maybe_unused *trans_pcie =
1470                IWL_TRANS_GET_PCIE_TRANS(trans);
1471
1472        lockdep_assert_held(&trans_pcie->mutex);
1473
1474        IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1475                 state ? "disabled" : "enabled");
1476        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1477                if (trans->cfg->gen2)
1478                        _iwl_trans_pcie_gen2_stop_device(trans, true);
1479                else
1480                        _iwl_trans_pcie_stop_device(trans, true);
1481        }
1482}
1483
1484static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1485                                      bool reset)
1486{
1487        if (!reset) {
1488                /* Enable persistence mode to avoid reset */
1489                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1490                            CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1491        }
1492
1493        iwl_disable_interrupts(trans);
1494
1495        /*
1496         * in testing mode, the host stays awake and the
1497         * hardware won't be reset (not even partially)
1498         */
1499        if (test)
1500                return;
1501
1502        iwl_pcie_disable_ict(trans);
1503
1504        iwl_pcie_synchronize_irqs(trans);
1505
1506        iwl_clear_bit(trans, CSR_GP_CNTRL,
1507                      BIT(trans->cfg->csr->flag_mac_access_req));
1508        iwl_clear_bit(trans, CSR_GP_CNTRL,
1509                      BIT(trans->cfg->csr->flag_init_done));
1510
1511        if (reset) {
1512                /*
1513                 * reset TX queues -- some of their registers reset during S3
1514                 * so if we don't reset everything here the D3 image would try
1515                 * to execute some invalid memory upon resume
1516                 */
1517                iwl_trans_pcie_tx_reset(trans);
1518        }
1519
1520        iwl_pcie_set_pwr(trans, true);
1521}
1522
1523static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1524                                    enum iwl_d3_status *status,
1525                                    bool test,  bool reset)
1526{
1527        struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1528        u32 val;
1529        int ret;
1530
1531        if (test) {
1532                iwl_enable_interrupts(trans);
1533                *status = IWL_D3_STATUS_ALIVE;
1534                return 0;
1535        }
1536
1537        iwl_set_bit(trans, CSR_GP_CNTRL,
1538                    BIT(trans->cfg->csr->flag_mac_access_req));
1539
1540        ret = iwl_finish_nic_init(trans);
1541        if (ret)
1542                return ret;
1543
1544        /*
1545         * Reconfigure IVAR table in case of MSIX or reset ict table in
1546         * MSI mode since HW reset erased it.
1547         * Also enables interrupts - none will happen as
1548         * the device doesn't know we're waking it up, only when
1549         * the opmode actually tells it after this call.
1550         */
1551        iwl_pcie_conf_msix_hw(trans_pcie);
1552        if (!trans_pcie->msix_enabled)
1553                iwl_pcie_reset_ict(trans);
1554        iwl_enable_interrupts(trans);
1555
1556        iwl_pcie_set_pwr(trans, false);
1557
1558        if (!reset) {
1559                iwl_clear_bit(trans, CSR_GP_CNTRL,
1560                              BIT(trans->cfg->csr->flag_mac_access_req));
1561        } else {
1562                iwl_trans_pcie_tx_reset(trans);
1563
1564                ret = iwl_pcie_rx_init(trans);
1565                if (ret) {
1566                        IWL_ERR(trans,
1567                                "Failed to resume the device (RX reset)\n");
1568                        return ret;
1569                }
1570        }
1571
1572        IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1573                        iwl_read_umac_prph(trans, WFPM_GP2));
1574
1575        val = iwl_read32(trans, CSR_RESET);
1576        if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1577                *status = IWL_D3_STATUS_RESET;
1578        else
1579                *status = IWL_D3_STATUS_ALIVE;
1580
1581        return 0;
1582}
1583
1584static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1585                                        struct iwl_trans *trans)
1586{
1587        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1588        int max_irqs, num_irqs, i, ret;
1589        u16 pci_cmd;
1590
1591        if (!trans->cfg->mq_rx_supported)
1592                goto enable_msi;
1593
1594        max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
1595        for (i = 0; i < max_irqs; i++)
1596                trans_pcie->msix_entries[i].entry = i;
1597
1598        num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1599                                         MSIX_MIN_INTERRUPT_VECTORS,
1600                                         max_irqs);
1601        if (num_irqs < 0) {
1602                IWL_DEBUG_INFO(trans,
1603                               "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1604                               num_irqs);
1605                goto enable_msi;
1606        }
1607        trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1608
1609        IWL_DEBUG_INFO(trans,
1610                       "MSI-X enabled. %d interrupt vectors were allocated\n",
1611                       num_irqs);
1612
1613        /*
1614         * In case the OS provides fewer interrupts than requested, different
1615         * causes will share the same interrupt vector as follows:
1616         * One interrupt less: non rx causes shared with FBQ.
1617         * Two interrupts less: non rx causes shared with FBQ and RSS.
1618         * More than two interrupts: we will use fewer RSS queues.
1619         */
1620        if (num_irqs <= max_irqs - 2) {
1621                trans_pcie->trans->num_rx_queues = num_irqs + 1;
1622                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1623                        IWL_SHARED_IRQ_FIRST_RSS;
1624        } else if (num_irqs == max_irqs - 1) {
1625                trans_pcie->trans->num_rx_queues = num_irqs;
1626                trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1627        } else {
1628                trans_pcie->trans->num_rx_queues = num_irqs - 1;
1629        }
1630        WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1631
1632        trans_pcie->alloc_vecs = num_irqs;
1633        trans_pcie->msix_enabled = true;
1634        return;
1635
1636enable_msi:
1637        ret = pci_enable_msi(pdev);
1638        if (ret) {
1639                dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1640                /* enable rfkill interrupt: hw bug w/a */
1641                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1642                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1643                        pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1644                        pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1645                }
1646        }
1647}
1648
1649static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1650{
1651        int iter_rx_q, i, ret, cpu, offset;
1652        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1653
1654        i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1655        iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1656        offset = 1 + i;
1657        for (; i < iter_rx_q ; i++) {
1658                /*
1659                 * Get the cpu prior to the place to search
1660                 * (i.e. return will be > i - 1).
1661                 */
1662                cpu = cpumask_next(i - offset, cpu_online_mask);
1663                cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1664                ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1665                                            &trans_pcie->affinity_mask[i]);
1666                if (ret)
1667                        IWL_ERR(trans_pcie->trans,
1668                                "Failed to set affinity mask for IRQ %d\n",
1669                                i);
1670        }
1671}
1672
1673static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1674                                      struct iwl_trans_pcie *trans_pcie)
1675{
1676        int i;
1677
1678        for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1679                int ret;
1680                struct msix_entry *msix_entry;
1681                const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1682
1683                if (!qname)
1684                        return -ENOMEM;
1685
1686                msix_entry = &trans_pcie->msix_entries[i];
1687                ret = devm_request_threaded_irq(&pdev->dev,
1688                                                msix_entry->vector,
1689                                                iwl_pcie_msix_isr,
1690                                                (i == trans_pcie->def_irq) ?
1691                                                iwl_pcie_irq_msix_handler :
1692                                                iwl_pcie_irq_rx_msix_handler,
1693                                                IRQF_SHARED,
1694                                                qname,
1695                                                msix_entry);
1696                if (ret) {
1697                        IWL_ERR(trans_pcie->trans,
1698                                "Error allocating IRQ %d\n", i);
1699
1700                        return ret;
1701                }
1702        }
1703        iwl_pcie_irq_set_affinity(trans_pcie->trans);
1704
1705        return 0;
1706}
1707
1708static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1709{
1710        u32 hpm, wprot;
1711
1712        switch (trans->cfg->device_family) {
1713        case IWL_DEVICE_FAMILY_9000:
1714                wprot = PREG_PRPH_WPROT_9000;
1715                break;
1716        case IWL_DEVICE_FAMILY_22000:
1717                wprot = PREG_PRPH_WPROT_22000;
1718                break;
1719        default:
1720                return 0;
1721        }
1722
1723        hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1724        if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1725                u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1726
1727                if (wprot_val & PREG_WFPM_ACCESS) {
1728                        IWL_ERR(trans,
1729                                "Error, can not clear persistence bit\n");
1730                        return -EPERM;
1731                }
1732                iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1733                                            hpm & ~PERSISTENCE_BIT);
1734        }
1735
1736        return 0;
1737}
1738
1739static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1740{
1741        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1742        int err;
1743
1744        lockdep_assert_held(&trans_pcie->mutex);
1745
1746        err = iwl_pcie_prepare_card_hw(trans);
1747        if (err) {
1748                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1749                return err;
1750        }
1751
1752        err = iwl_trans_pcie_clear_persistence_bit(trans);
1753        if (err)
1754                return err;
1755
1756        iwl_trans_pcie_sw_reset(trans);
1757
1758        err = iwl_pcie_apm_init(trans);
1759        if (err)
1760                return err;
1761
1762        iwl_pcie_init_msix(trans_pcie);
1763
1764        /* From now on, the op_mode will be kept updated about RF kill state */
1765        iwl_enable_rfkill_int(trans);
1766
1767        trans_pcie->opmode_down = false;
1768
1769        /* Set is_down to false here so that...*/
1770        trans_pcie->is_down = false;
1771
1772        /* ...rfkill can call stop_device and set it false if needed */
1773        iwl_pcie_check_hw_rf_kill(trans);
1774
1775        /* Make sure we sync here, because we'll need full access later */
1776        if (low_power)
1777                pm_runtime_resume(trans->dev);
1778
1779        return 0;
1780}
1781
1782static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1783{
1784        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1785        int ret;
1786
1787        mutex_lock(&trans_pcie->mutex);
1788        ret = _iwl_trans_pcie_start_hw(trans, low_power);
1789        mutex_unlock(&trans_pcie->mutex);
1790
1791        return ret;
1792}
1793
1794static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1795{
1796        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1797
1798        mutex_lock(&trans_pcie->mutex);
1799
1800        /* disable interrupts - don't enable HW RF kill interrupt */
1801        iwl_disable_interrupts(trans);
1802
1803        iwl_pcie_apm_stop(trans, true);
1804
1805        iwl_disable_interrupts(trans);
1806
1807        iwl_pcie_disable_ict(trans);
1808
1809        mutex_unlock(&trans_pcie->mutex);
1810
1811        iwl_pcie_synchronize_irqs(trans);
1812}
1813
1814static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1815{
1816        writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1817}
1818
1819static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1820{
1821        writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1822}
1823
1824static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1825{
1826        return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1827}
1828
1829static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1830{
1831        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1832                return 0x00FFFFFF;
1833        else
1834                return 0x000FFFFF;
1835}
1836
1837static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1838{
1839        u32 mask = iwl_trans_pcie_prph_msk(trans);
1840
1841        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1842                               ((reg & mask) | (3 << 24)));
1843        return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1844}
1845
1846static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1847                                      u32 val)
1848{
1849        u32 mask = iwl_trans_pcie_prph_msk(trans);
1850
1851        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1852                               ((addr & mask) | (3 << 24)));
1853        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1854}
1855
1856static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1857                                     const struct iwl_trans_config *trans_cfg)
1858{
1859        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1860
1861        trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1862        trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1863        trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1864        if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1865                trans_pcie->n_no_reclaim_cmds = 0;
1866        else
1867                trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1868        if (trans_pcie->n_no_reclaim_cmds)
1869                memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1870                       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1871
1872        trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1873        trans_pcie->rx_page_order =
1874                iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1875
1876        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1877        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1878        trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
1879
1880        trans_pcie->page_offs = trans_cfg->cb_data_offs;
1881        trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1882
1883        trans->command_groups = trans_cfg->command_groups;
1884        trans->command_groups_size = trans_cfg->command_groups_size;
1885
1886        /* Initialize NAPI here - it should be before registering to mac80211
1887         * in the opmode but after the HW struct is allocated.
1888         * As this function may be called again in some corner cases don't
1889         * do anything if NAPI was already initialized.
1890         */
1891        if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1892                init_dummy_netdev(&trans_pcie->napi_dev);
1893}
1894
1895void iwl_trans_pcie_free(struct iwl_trans *trans)
1896{
1897        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1898        int i;
1899
1900        iwl_pcie_synchronize_irqs(trans);
1901
1902        if (trans->cfg->gen2)
1903                iwl_pcie_gen2_tx_free(trans);
1904        else
1905                iwl_pcie_tx_free(trans);
1906        iwl_pcie_rx_free(trans);
1907
1908        if (trans_pcie->rba.alloc_wq) {
1909                destroy_workqueue(trans_pcie->rba.alloc_wq);
1910                trans_pcie->rba.alloc_wq = NULL;
1911        }
1912
1913        if (trans_pcie->msix_enabled) {
1914                for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1915                        irq_set_affinity_hint(
1916                                trans_pcie->msix_entries[i].vector,
1917                                NULL);
1918                }
1919
1920                trans_pcie->msix_enabled = false;
1921        } else {
1922                iwl_pcie_free_ict(trans);
1923        }
1924
1925        iwl_pcie_free_fw_monitor(trans);
1926
1927        for_each_possible_cpu(i) {
1928                struct iwl_tso_hdr_page *p =
1929                        per_cpu_ptr(trans_pcie->tso_hdr_page, i);
1930
1931                if (p->page)
1932                        __free_page(p->page);
1933        }
1934
1935        free_percpu(trans_pcie->tso_hdr_page);
1936        mutex_destroy(&trans_pcie->mutex);
1937        iwl_trans_free(trans);
1938}
1939
1940static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1941{
1942        if (state)
1943                set_bit(STATUS_TPOWER_PMI, &trans->status);
1944        else
1945                clear_bit(STATUS_TPOWER_PMI, &trans->status);
1946}
1947
1948struct iwl_trans_pcie_removal {
1949        struct pci_dev *pdev;
1950        struct work_struct work;
1951};
1952
1953static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1954{
1955        struct iwl_trans_pcie_removal *removal =
1956                container_of(wk, struct iwl_trans_pcie_removal, work);
1957        struct pci_dev *pdev = removal->pdev;
1958        static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1959
1960        dev_err(&pdev->dev, "Device gone - attempting removal\n");
1961        kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1962        pci_lock_rescan_remove();
1963        pci_dev_put(pdev);
1964        pci_stop_and_remove_bus_device(pdev);
1965        pci_unlock_rescan_remove();
1966
1967        kfree(removal);
1968        module_put(THIS_MODULE);
1969}
1970
1971static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
1972                                           unsigned long *flags)
1973{
1974        int ret;
1975        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1976
1977        spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1978
1979        if (trans_pcie->cmd_hold_nic_awake)
1980                goto out;
1981
1982        /* this bit wakes up the NIC */
1983        __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1984                                 BIT(trans->cfg->csr->flag_mac_access_req));
1985        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1986                udelay(2);
1987
1988        /*
1989         * These bits say the device is running, and should keep running for
1990         * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1991         * but they do not indicate that embedded SRAM is restored yet;
1992         * HW with volatile SRAM must save/restore contents to/from
1993         * host DRAM when sleeping/waking for power-saving.
1994         * Each direction takes approximately 1/4 millisecond; with this
1995         * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1996         * series of register accesses are expected (e.g. reading Event Log),
1997         * to keep device from sleeping.
1998         *
1999         * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2000         * SRAM is okay/restored.  We don't check that here because this call
2001         * is just for hardware register access; but GP1 MAC_SLEEP
2002         * check is a good idea before accessing the SRAM of HW with
2003         * volatile SRAM (e.g. reading Event Log).
2004         *
2005         * 5000 series and later (including 1000 series) have non-volatile SRAM,
2006         * and do not save/restore SRAM when power cycling.
2007         */
2008        ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2009                           BIT(trans->cfg->csr->flag_val_mac_access_en),
2010                           (BIT(trans->cfg->csr->flag_mac_clock_ready) |
2011                            CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2012        if (unlikely(ret < 0)) {
2013                u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2014
2015                WARN_ONCE(1,
2016                          "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2017                          cntrl);
2018
2019                iwl_trans_pcie_dump_regs(trans);
2020
2021                if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2022                        struct iwl_trans_pcie_removal *removal;
2023
2024                        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2025                                goto err;
2026
2027                        IWL_ERR(trans, "Device gone - scheduling removal!\n");
2028
2029                        /*
2030                         * get a module reference to avoid doing this
2031                         * while unloading anyway and to avoid
2032                         * scheduling a work with code that's being
2033                         * removed.
2034                         */
2035                        if (!try_module_get(THIS_MODULE)) {
2036                                IWL_ERR(trans,
2037                                        "Module is being unloaded - abort\n");
2038                                goto err;
2039                        }
2040
2041                        removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2042                        if (!removal) {
2043                                module_put(THIS_MODULE);
2044                                goto err;
2045                        }
2046                        /*
2047                         * we don't need to clear this flag, because
2048                         * the trans will be freed and reallocated.
2049                        */
2050                        set_bit(STATUS_TRANS_DEAD, &trans->status);
2051
2052                        removal->pdev = to_pci_dev(trans->dev);
2053                        INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2054                        pci_dev_get(removal->pdev);
2055                        schedule_work(&removal->work);
2056                } else {
2057                        iwl_write32(trans, CSR_RESET,
2058                                    CSR_RESET_REG_FLAG_FORCE_NMI);
2059                }
2060
2061err:
2062                spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2063                return false;
2064        }
2065
2066out:
2067        /*
2068         * Fool sparse by faking we release the lock - sparse will
2069         * track nic_access anyway.
2070         */
2071        __release(&trans_pcie->reg_lock);
2072        return true;
2073}
2074
2075static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
2076                                              unsigned long *flags)
2077{
2078        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2079
2080        lockdep_assert_held(&trans_pcie->reg_lock);
2081
2082        /*
2083         * Fool sparse by faking we acquiring the lock - sparse will
2084         * track nic_access anyway.
2085         */
2086        __acquire(&trans_pcie->reg_lock);
2087
2088        if (trans_pcie->cmd_hold_nic_awake)
2089                goto out;
2090
2091        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2092                                   BIT(trans->cfg->csr->flag_mac_access_req));
2093        /*
2094         * Above we read the CSR_GP_CNTRL register, which will flush
2095         * any previous writes, but we need the write that clears the
2096         * MAC_ACCESS_REQ bit to be performed before any other writes
2097         * scheduled on different CPUs (after we drop reg_lock).
2098         */
2099out:
2100        spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2101}
2102
2103static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2104                                   void *buf, int dwords)
2105{
2106        unsigned long flags;
2107        int offs, ret = 0;
2108        u32 *vals = buf;
2109
2110        if (iwl_trans_grab_nic_access(trans, &flags)) {
2111                iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
2112                for (offs = 0; offs < dwords; offs++)
2113                        vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
2114                iwl_trans_release_nic_access(trans, &flags);
2115        } else {
2116                ret = -EBUSY;
2117        }
2118        return ret;
2119}
2120
2121static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2122                                    const void *buf, int dwords)
2123{
2124        unsigned long flags;
2125        int offs, ret = 0;
2126        const u32 *vals = buf;
2127
2128        if (iwl_trans_grab_nic_access(trans, &flags)) {
2129                iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2130                for (offs = 0; offs < dwords; offs++)
2131                        iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2132                                    vals ? vals[offs] : 0);
2133                iwl_trans_release_nic_access(trans, &flags);
2134        } else {
2135                ret = -EBUSY;
2136        }
2137        return ret;
2138}
2139
2140static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
2141                                            unsigned long txqs,
2142                                            bool freeze)
2143{
2144        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2145        int queue;
2146
2147        for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2148                struct iwl_txq *txq = trans_pcie->txq[queue];
2149                unsigned long now;
2150
2151                spin_lock_bh(&txq->lock);
2152
2153                now = jiffies;
2154
2155                if (txq->frozen == freeze)
2156                        goto next_queue;
2157
2158                IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2159                                    freeze ? "Freezing" : "Waking", queue);
2160
2161                txq->frozen = freeze;
2162
2163                if (txq->read_ptr == txq->write_ptr)
2164                        goto next_queue;
2165
2166                if (freeze) {
2167                        if (unlikely(time_after(now,
2168                                                txq->stuck_timer.expires))) {
2169                                /*
2170                                 * The timer should have fired, maybe it is
2171                                 * spinning right now on the lock.
2172                                 */
2173                                goto next_queue;
2174                        }
2175                        /* remember how long until the timer fires */
2176                        txq->frozen_expiry_remainder =
2177                                txq->stuck_timer.expires - now;
2178                        del_timer(&txq->stuck_timer);
2179                        goto next_queue;
2180                }
2181
2182                /*
2183                 * Wake a non-empty queue -> arm timer with the
2184                 * remainder before it froze
2185                 */
2186                mod_timer(&txq->stuck_timer,
2187                          now + txq->frozen_expiry_remainder);
2188
2189next_queue:
2190                spin_unlock_bh(&txq->lock);
2191        }
2192}
2193
2194static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2195{
2196        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2197        int i;
2198
2199        for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
2200                struct iwl_txq *txq = trans_pcie->txq[i];
2201
2202                if (i == trans_pcie->cmd_queue)
2203                        continue;
2204
2205                spin_lock_bh(&txq->lock);
2206
2207                if (!block && !(WARN_ON_ONCE(!txq->block))) {
2208                        txq->block--;
2209                        if (!txq->block) {
2210                                iwl_write32(trans, HBUS_TARG_WRPTR,
2211                                            txq->write_ptr | (i << 8));
2212                        }
2213                } else if (block) {
2214                        txq->block++;
2215                }
2216
2217                spin_unlock_bh(&txq->lock);
2218        }
2219}
2220
2221#define IWL_FLUSH_WAIT_MS       2000
2222
2223void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
2224{
2225        u32 txq_id = txq->id;
2226        u32 status;
2227        bool active;
2228        u8 fifo;
2229
2230        if (trans->cfg->use_tfh) {
2231                IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
2232                        txq->read_ptr, txq->write_ptr);
2233                /* TODO: access new SCD registers and dump them */
2234                return;
2235        }
2236
2237        status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
2238        fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
2239        active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
2240
2241        IWL_ERR(trans,
2242                "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
2243                txq_id, active ? "" : "in", fifo,
2244                jiffies_to_msecs(txq->wd_timeout),
2245                txq->read_ptr, txq->write_ptr,
2246                iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
2247                        (trans->cfg->base_params->max_tfd_queue_size - 1),
2248                iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
2249                        (trans->cfg->base_params->max_tfd_queue_size - 1),
2250                iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
2251}
2252
2253static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2254                                       struct iwl_trans_rxq_dma_data *data)
2255{
2256        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2257
2258        if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2259                return -EINVAL;
2260
2261        data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2262        data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2263        data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2264        data->fr_bd_wid = 0;
2265
2266        return 0;
2267}
2268
2269static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2270{
2271        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2272        struct iwl_txq *txq;
2273        unsigned long now = jiffies;
2274        bool overflow_tx;
2275        u8 wr_ptr;
2276
2277        /* Make sure the NIC is still alive in the bus */
2278        if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2279                return -ENODEV;
2280
2281        if (!test_bit(txq_idx, trans_pcie->queue_used))
2282                return -EINVAL;
2283
2284        IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2285        txq = trans_pcie->txq[txq_idx];
2286
2287        spin_lock_bh(&txq->lock);
2288        overflow_tx = txq->overflow_tx ||
2289                      !skb_queue_empty(&txq->overflow_q);
2290        spin_unlock_bh(&txq->lock);
2291
2292        wr_ptr = READ_ONCE(txq->write_ptr);
2293
2294        while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2295                overflow_tx) &&
2296               !time_after(jiffies,
2297                           now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2298                u8 write_ptr = READ_ONCE(txq->write_ptr);
2299
2300                /*
2301                 * If write pointer moved during the wait, warn only
2302                 * if the TX came from op mode. In case TX came from
2303                 * trans layer (overflow TX) don't warn.
2304                 */
2305                if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2306                              "WR pointer moved while flushing %d -> %d\n",
2307                              wr_ptr, write_ptr))
2308                        return -ETIMEDOUT;
2309                wr_ptr = write_ptr;
2310
2311                usleep_range(1000, 2000);
2312
2313                spin_lock_bh(&txq->lock);
2314                overflow_tx = txq->overflow_tx ||
2315                              !skb_queue_empty(&txq->overflow_q);
2316                spin_unlock_bh(&txq->lock);
2317        }
2318
2319        if (txq->read_ptr != txq->write_ptr) {
2320                IWL_ERR(trans,
2321                        "fail to flush all tx fifo queues Q %d\n", txq_idx);
2322                iwl_trans_pcie_log_scd_error(trans, txq);
2323                return -ETIMEDOUT;
2324        }
2325
2326        IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2327
2328        return 0;
2329}
2330
2331static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2332{
2333        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2334        int cnt;
2335        int ret = 0;
2336
2337        /* waiting for all the tx frames complete might take a while */
2338        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2339
2340                if (cnt == trans_pcie->cmd_queue)
2341                        continue;
2342                if (!test_bit(cnt, trans_pcie->queue_used))
2343                        continue;
2344                if (!(BIT(cnt) & txq_bm))
2345                        continue;
2346
2347                ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2348                if (ret)
2349                        break;
2350        }
2351
2352        return ret;
2353}
2354
2355static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2356                                         u32 mask, u32 value)
2357{
2358        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2359        unsigned long flags;
2360
2361        spin_lock_irqsave(&trans_pcie->reg_lock, flags);
2362        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2363        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
2364}
2365
2366static void iwl_trans_pcie_ref(struct iwl_trans *trans)
2367{
2368        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2369
2370        if (iwlwifi_mod_params.d0i3_disable)
2371                return;
2372
2373        pm_runtime_get(&trans_pcie->pci_dev->dev);
2374
2375#ifdef CONFIG_PM
2376        IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2377                      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2378#endif /* CONFIG_PM */
2379}
2380
2381static void iwl_trans_pcie_unref(struct iwl_trans *trans)
2382{
2383        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2384
2385        if (iwlwifi_mod_params.d0i3_disable)
2386                return;
2387
2388        pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
2389        pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
2390
2391#ifdef CONFIG_PM
2392        IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
2393                      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
2394#endif /* CONFIG_PM */
2395}
2396
2397static const char *get_csr_string(int cmd)
2398{
2399#define IWL_CMD(x) case x: return #x
2400        switch (cmd) {
2401        IWL_CMD(CSR_HW_IF_CONFIG_REG);
2402        IWL_CMD(CSR_INT_COALESCING);
2403        IWL_CMD(CSR_INT);
2404        IWL_CMD(CSR_INT_MASK);
2405        IWL_CMD(CSR_FH_INT_STATUS);
2406        IWL_CMD(CSR_GPIO_IN);
2407        IWL_CMD(CSR_RESET);
2408        IWL_CMD(CSR_GP_CNTRL);
2409        IWL_CMD(CSR_HW_REV);
2410        IWL_CMD(CSR_EEPROM_REG);
2411        IWL_CMD(CSR_EEPROM_GP);
2412        IWL_CMD(CSR_OTP_GP_REG);
2413        IWL_CMD(CSR_GIO_REG);
2414        IWL_CMD(CSR_GP_UCODE_REG);
2415        IWL_CMD(CSR_GP_DRIVER_REG);
2416        IWL_CMD(CSR_UCODE_DRV_GP1);
2417        IWL_CMD(CSR_UCODE_DRV_GP2);
2418        IWL_CMD(CSR_LED_REG);
2419        IWL_CMD(CSR_DRAM_INT_TBL_REG);
2420        IWL_CMD(CSR_GIO_CHICKEN_BITS);
2421        IWL_CMD(CSR_ANA_PLL_CFG);
2422        IWL_CMD(CSR_HW_REV_WA_REG);
2423        IWL_CMD(CSR_MONITOR_STATUS_REG);
2424        IWL_CMD(CSR_DBG_HPET_MEM_REG);
2425        default:
2426                return "UNKNOWN";
2427        }
2428#undef IWL_CMD
2429}
2430
2431void iwl_pcie_dump_csr(struct iwl_trans *trans)
2432{
2433        int i;
2434        static const u32 csr_tbl[] = {
2435                CSR_HW_IF_CONFIG_REG,
2436                CSR_INT_COALESCING,
2437                CSR_INT,
2438                CSR_INT_MASK,
2439                CSR_FH_INT_STATUS,
2440                CSR_GPIO_IN,
2441                CSR_RESET,
2442                CSR_GP_CNTRL,
2443                CSR_HW_REV,
2444                CSR_EEPROM_REG,
2445                CSR_EEPROM_GP,
2446                CSR_OTP_GP_REG,
2447                CSR_GIO_REG,
2448                CSR_GP_UCODE_REG,
2449                CSR_GP_DRIVER_REG,
2450                CSR_UCODE_DRV_GP1,
2451                CSR_UCODE_DRV_GP2,
2452                CSR_LED_REG,
2453                CSR_DRAM_INT_TBL_REG,
2454                CSR_GIO_CHICKEN_BITS,
2455                CSR_ANA_PLL_CFG,
2456                CSR_MONITOR_STATUS_REG,
2457                CSR_HW_REV_WA_REG,
2458                CSR_DBG_HPET_MEM_REG
2459        };
2460        IWL_ERR(trans, "CSR values:\n");
2461        IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2462                "CSR_INT_PERIODIC_REG)\n");
2463        for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2464                IWL_ERR(trans, "  %25s: 0X%08x\n",
2465                        get_csr_string(csr_tbl[i]),
2466                        iwl_read32(trans, csr_tbl[i]));
2467        }
2468}
2469
2470#ifdef CONFIG_IWLWIFI_DEBUGFS
2471/* create and remove of files */
2472#define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
2473        debugfs_create_file(#name, mode, parent, trans,                 \
2474                            &iwl_dbgfs_##name##_ops);                   \
2475} while (0)
2476
2477/* file operation */
2478#define DEBUGFS_READ_FILE_OPS(name)                                     \
2479static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2480        .read = iwl_dbgfs_##name##_read,                                \
2481        .open = simple_open,                                            \
2482        .llseek = generic_file_llseek,                                  \
2483};
2484
2485#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2486static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2487        .write = iwl_dbgfs_##name##_write,                              \
2488        .open = simple_open,                                            \
2489        .llseek = generic_file_llseek,                                  \
2490};
2491
2492#define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
2493static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2494        .write = iwl_dbgfs_##name##_write,                              \
2495        .read = iwl_dbgfs_##name##_read,                                \
2496        .open = simple_open,                                            \
2497        .llseek = generic_file_llseek,                                  \
2498};
2499
2500static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
2501                                       char __user *user_buf,
2502                                       size_t count, loff_t *ppos)
2503{
2504        struct iwl_trans *trans = file->private_data;
2505        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2506        struct iwl_txq *txq;
2507        char *buf;
2508        int pos = 0;
2509        int cnt;
2510        int ret;
2511        size_t bufsz;
2512
2513        bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
2514
2515        if (!trans_pcie->txq_memory)
2516                return -EAGAIN;
2517
2518        buf = kzalloc(bufsz, GFP_KERNEL);
2519        if (!buf)
2520                return -ENOMEM;
2521
2522        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
2523                txq = trans_pcie->txq[cnt];
2524                pos += scnprintf(buf + pos, bufsz - pos,
2525                                "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
2526                                cnt, txq->read_ptr, txq->write_ptr,
2527                                !!test_bit(cnt, trans_pcie->queue_used),
2528                                 !!test_bit(cnt, trans_pcie->queue_stopped),
2529                                 txq->need_update, txq->frozen,
2530                                 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
2531        }
2532        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2533        kfree(buf);
2534        return ret;
2535}
2536
2537static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2538                                       char __user *user_buf,
2539                                       size_t count, loff_t *ppos)
2540{
2541        struct iwl_trans *trans = file->private_data;
2542        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2543        char *buf;
2544        int pos = 0, i, ret;
2545        size_t bufsz = sizeof(buf);
2546
2547        bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2548
2549        if (!trans_pcie->rxq)
2550                return -EAGAIN;
2551
2552        buf = kzalloc(bufsz, GFP_KERNEL);
2553        if (!buf)
2554                return -ENOMEM;
2555
2556        for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2557                struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2558
2559                pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2560                                 i);
2561                pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2562                                 rxq->read);
2563                pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2564                                 rxq->write);
2565                pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2566                                 rxq->write_actual);
2567                pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2568                                 rxq->need_update);
2569                pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2570                                 rxq->free_count);
2571                if (rxq->rb_stts) {
2572                        u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2573                                                                     rxq));
2574                        pos += scnprintf(buf + pos, bufsz - pos,
2575                                         "\tclosed_rb_num: %u\n",
2576                                         r & 0x0FFF);
2577                } else {
2578                        pos += scnprintf(buf + pos, bufsz - pos,
2579                                         "\tclosed_rb_num: Not Allocated\n");
2580                }
2581        }
2582        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2583        kfree(buf);
2584
2585        return ret;
2586}
2587
2588static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2589                                        char __user *user_buf,
2590                                        size_t count, loff_t *ppos)
2591{
2592        struct iwl_trans *trans = file->private_data;
2593        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2594        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2595
2596        int pos = 0;
2597        char *buf;
2598        int bufsz = 24 * 64; /* 24 items * 64 char per item */
2599        ssize_t ret;
2600
2601        buf = kzalloc(bufsz, GFP_KERNEL);
2602        if (!buf)
2603                return -ENOMEM;
2604
2605        pos += scnprintf(buf + pos, bufsz - pos,
2606                        "Interrupt Statistics Report:\n");
2607
2608        pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2609                isr_stats->hw);
2610        pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2611                isr_stats->sw);
2612        if (isr_stats->sw || isr_stats->hw) {
2613                pos += scnprintf(buf + pos, bufsz - pos,
2614                        "\tLast Restarting Code:  0x%X\n",
2615                        isr_stats->err_code);
2616        }
2617#ifdef CONFIG_IWLWIFI_DEBUG
2618        pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2619                isr_stats->sch);
2620        pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2621                isr_stats->alive);
2622#endif
2623        pos += scnprintf(buf + pos, bufsz - pos,
2624                "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2625
2626        pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2627                isr_stats->ctkill);
2628
2629        pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2630                isr_stats->wakeup);
2631
2632        pos += scnprintf(buf + pos, bufsz - pos,
2633                "Rx command responses:\t\t %u\n", isr_stats->rx);
2634
2635        pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2636                isr_stats->tx);
2637
2638        pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2639                isr_stats->unhandled);
2640
2641        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2642        kfree(buf);
2643        return ret;
2644}
2645
2646static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2647                                         const char __user *user_buf,
2648                                         size_t count, loff_t *ppos)
2649{
2650        struct iwl_trans *trans = file->private_data;
2651        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2652        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2653        u32 reset_flag;
2654        int ret;
2655
2656        ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2657        if (ret)
2658                return ret;
2659        if (reset_flag == 0)
2660                memset(isr_stats, 0, sizeof(*isr_stats));
2661
2662        return count;
2663}
2664
2665static ssize_t iwl_dbgfs_csr_write(struct file *file,
2666                                   const char __user *user_buf,
2667                                   size_t count, loff_t *ppos)
2668{
2669        struct iwl_trans *trans = file->private_data;
2670
2671        iwl_pcie_dump_csr(trans);
2672
2673        return count;
2674}
2675
2676static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2677                                     char __user *user_buf,
2678                                     size_t count, loff_t *ppos)
2679{
2680        struct iwl_trans *trans = file->private_data;
2681        char *buf = NULL;
2682        ssize_t ret;
2683
2684        ret = iwl_dump_fh(trans, &buf);
2685        if (ret < 0)
2686                return ret;
2687        if (!buf)
2688                return -EINVAL;
2689        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2690        kfree(buf);
2691        return ret;
2692}
2693
2694static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2695                                     char __user *user_buf,
2696                                     size_t count, loff_t *ppos)
2697{
2698        struct iwl_trans *trans = file->private_data;
2699        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2700        char buf[100];
2701        int pos;
2702
2703        pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2704                        trans_pcie->debug_rfkill,
2705                        !(iwl_read32(trans, CSR_GP_CNTRL) &
2706                                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2707
2708        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2709}
2710
2711static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2712                                      const char __user *user_buf,
2713                                      size_t count, loff_t *ppos)
2714{
2715        struct iwl_trans *trans = file->private_data;
2716        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2717        bool new_value;
2718        int ret;
2719
2720        ret = kstrtobool_from_user(user_buf, count, &new_value);
2721        if (ret)
2722                return ret;
2723        if (new_value == trans_pcie->debug_rfkill)
2724                return count;
2725        IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2726                 trans_pcie->debug_rfkill, new_value);
2727        trans_pcie->debug_rfkill = new_value;
2728        iwl_pcie_handle_rfkill_irq(trans);
2729
2730        return count;
2731}
2732
2733static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2734                                       struct file *file)
2735{
2736        struct iwl_trans *trans = inode->i_private;
2737        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2738
2739        if (!trans->dbg.dest_tlv ||
2740            trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2741                IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2742                return -ENOENT;
2743        }
2744
2745        if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2746                return -EBUSY;
2747
2748        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2749        return simple_open(inode, file);
2750}
2751
2752static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2753                                          struct file *file)
2754{
2755        struct iwl_trans_pcie *trans_pcie =
2756                IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2757
2758        if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2759                trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2760        return 0;
2761}
2762
2763static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2764                                  void *buf, ssize_t *size,
2765                                  ssize_t *bytes_copied)
2766{
2767        int buf_size_left = count - *bytes_copied;
2768
2769        buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2770        if (*size > buf_size_left)
2771                *size = buf_size_left;
2772
2773        *size -= copy_to_user(user_buf, buf, *size);
2774        *bytes_copied += *size;
2775
2776        if (buf_size_left == *size)
2777                return true;
2778        return false;
2779}
2780
2781static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2782                                           char __user *user_buf,
2783                                           size_t count, loff_t *ppos)
2784{
2785        struct iwl_trans *trans = file->private_data;
2786        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2787        void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
2788        struct cont_rec *data = &trans_pcie->fw_mon_data;
2789        u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2790        ssize_t size, bytes_copied = 0;
2791        bool b_full;
2792
2793        if (trans->dbg.dest_tlv) {
2794                write_ptr_addr =
2795                        le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2796                wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2797        } else {
2798                write_ptr_addr = MON_BUFF_WRPTR;
2799                wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2800        }
2801
2802        if (unlikely(!trans->dbg.rec_on))
2803                return 0;
2804
2805        mutex_lock(&data->mutex);
2806        if (data->state ==
2807            IWL_FW_MON_DBGFS_STATE_DISABLED) {
2808                mutex_unlock(&data->mutex);
2809                return 0;
2810        }
2811
2812        /* write_ptr position in bytes rather then DW */
2813        write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2814        wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2815
2816        if (data->prev_wrap_cnt == wrap_cnt) {
2817                size = write_ptr - data->prev_wr_ptr;
2818                curr_buf = cpu_addr + data->prev_wr_ptr;
2819                b_full = iwl_write_to_user_buf(user_buf, count,
2820                                               curr_buf, &size,
2821                                               &bytes_copied);
2822                data->prev_wr_ptr += size;
2823
2824        } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2825                   write_ptr < data->prev_wr_ptr) {
2826                size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
2827                curr_buf = cpu_addr + data->prev_wr_ptr;
2828                b_full = iwl_write_to_user_buf(user_buf, count,
2829                                               curr_buf, &size,
2830                                               &bytes_copied);
2831                data->prev_wr_ptr += size;
2832
2833                if (!b_full) {
2834                        size = write_ptr;
2835                        b_full = iwl_write_to_user_buf(user_buf, count,
2836                                                       cpu_addr, &size,
2837                                                       &bytes_copied);
2838                        data->prev_wr_ptr = size;
2839                        data->prev_wrap_cnt++;
2840                }
2841        } else {
2842                if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2843                    write_ptr > data->prev_wr_ptr)
2844                        IWL_WARN(trans,
2845                                 "write pointer passed previous write pointer, start copying from the beginning\n");
2846                else if (!unlikely(data->prev_wrap_cnt == 0 &&
2847                                   data->prev_wr_ptr == 0))
2848                        IWL_WARN(trans,
2849                                 "monitor data is out of sync, start copying from the beginning\n");
2850
2851                size = write_ptr;
2852                b_full = iwl_write_to_user_buf(user_buf, count,
2853                                               cpu_addr, &size,
2854                                               &bytes_copied);
2855                data->prev_wr_ptr = size;
2856                data->prev_wrap_cnt = wrap_cnt;
2857        }
2858
2859        mutex_unlock(&data->mutex);
2860
2861        return bytes_copied;
2862}
2863
2864DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2865DEBUGFS_READ_FILE_OPS(fh_reg);
2866DEBUGFS_READ_FILE_OPS(rx_queue);
2867DEBUGFS_READ_FILE_OPS(tx_queue);
2868DEBUGFS_WRITE_FILE_OPS(csr);
2869DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2870
2871static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2872        .read = iwl_dbgfs_monitor_data_read,
2873        .open = iwl_dbgfs_monitor_data_open,
2874        .release = iwl_dbgfs_monitor_data_release,
2875};
2876
2877/* Create the debugfs files and directories */
2878void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2879{
2880        struct dentry *dir = trans->dbgfs_dir;
2881
2882        DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2883        DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2884        DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2885        DEBUGFS_ADD_FILE(csr, dir, 0200);
2886        DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2887        DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2888        DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2889}
2890
2891static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2892{
2893        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2894        struct cont_rec *data = &trans_pcie->fw_mon_data;
2895
2896        mutex_lock(&data->mutex);
2897        data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2898        mutex_unlock(&data->mutex);
2899}
2900#endif /*CONFIG_IWLWIFI_DEBUGFS */
2901
2902static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2903{
2904        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2905        u32 cmdlen = 0;
2906        int i;
2907
2908        for (i = 0; i < trans_pcie->max_tbs; i++)
2909                cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
2910
2911        return cmdlen;
2912}
2913
2914static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2915                                   struct iwl_fw_error_dump_data **data,
2916                                   int allocated_rb_nums)
2917{
2918        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2919        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2920        /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2921        struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2922        u32 i, r, j, rb_len = 0;
2923
2924        spin_lock(&rxq->lock);
2925
2926        r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2927
2928        for (i = rxq->read, j = 0;
2929             i != r && j < allocated_rb_nums;
2930             i = (i + 1) & RX_QUEUE_MASK, j++) {
2931                struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2932                struct iwl_fw_error_dump_rb *rb;
2933
2934                dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2935                               DMA_FROM_DEVICE);
2936
2937                rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2938
2939                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2940                (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2941                rb = (void *)(*data)->data;
2942                rb->index = cpu_to_le32(i);
2943                memcpy(rb->data, page_address(rxb->page), max_len);
2944                /* remap the page for the free benefit */
2945                rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
2946                                                     max_len,
2947                                                     DMA_FROM_DEVICE);
2948
2949                *data = iwl_fw_error_next_data(*data);
2950        }
2951
2952        spin_unlock(&rxq->lock);
2953
2954        return rb_len;
2955}
2956#define IWL_CSR_TO_DUMP (0x250)
2957
2958static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2959                                   struct iwl_fw_error_dump_data **data)
2960{
2961        u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2962        __le32 *val;
2963        int i;
2964
2965        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2966        (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2967        val = (void *)(*data)->data;
2968
2969        for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2970                *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2971
2972        *data = iwl_fw_error_next_data(*data);
2973
2974        return csr_len;
2975}
2976
2977static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2978                                       struct iwl_fw_error_dump_data **data)
2979{
2980        u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2981        unsigned long flags;
2982        __le32 *val;
2983        int i;
2984
2985        if (!iwl_trans_grab_nic_access(trans, &flags))
2986                return 0;
2987
2988        (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2989        (*data)->len = cpu_to_le32(fh_regs_len);
2990        val = (void *)(*data)->data;
2991
2992        if (!trans->cfg->gen2)
2993                for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
2994                     i += sizeof(u32))
2995                        *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2996        else
2997                for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
2998                     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
2999                     i += sizeof(u32))
3000                        *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3001                                                                      i));
3002
3003        iwl_trans_release_nic_access(trans, &flags);
3004
3005        *data = iwl_fw_error_next_data(*data);
3006
3007        return sizeof(**data) + fh_regs_len;
3008}
3009
3010static u32
3011iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3012                                 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3013                                 u32 monitor_len)
3014{
3015        u32 buf_size_in_dwords = (monitor_len >> 2);
3016        u32 *buffer = (u32 *)fw_mon_data->data;
3017        unsigned long flags;
3018        u32 i;
3019
3020        if (!iwl_trans_grab_nic_access(trans, &flags))
3021                return 0;
3022
3023        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3024        for (i = 0; i < buf_size_in_dwords; i++)
3025                buffer[i] = iwl_read_umac_prph_no_grab(trans,
3026                                                       MON_DMARB_RD_DATA_ADDR);
3027        iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3028
3029        iwl_trans_release_nic_access(trans, &flags);
3030
3031        return monitor_len;
3032}
3033
3034static void
3035iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3036                             struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3037{
3038        u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3039
3040        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3041                base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3042                base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3043                write_ptr = DBGC_CUR_DBGBUF_STATUS;
3044                wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3045        } else if (trans->dbg.dest_tlv) {
3046                write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3047                wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3048                base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3049        } else {
3050                base = MON_BUFF_BASE_ADDR;
3051                write_ptr = MON_BUFF_WRPTR;
3052                wrap_cnt = MON_BUFF_CYCLE_CNT;
3053        }
3054
3055        write_ptr_val = iwl_read_prph(trans, write_ptr);
3056        fw_mon_data->fw_mon_cycle_cnt =
3057                cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3058        fw_mon_data->fw_mon_base_ptr =
3059                cpu_to_le32(iwl_read_prph(trans, base));
3060        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3061                fw_mon_data->fw_mon_base_high_ptr =
3062                        cpu_to_le32(iwl_read_prph(trans, base_high));
3063                write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3064        }
3065        fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3066}
3067
3068static u32
3069iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3070                            struct iwl_fw_error_dump_data **data,
3071                            u32 monitor_len)
3072{
3073        u32 len = 0;
3074
3075        if (trans->dbg.dest_tlv ||
3076            (trans->dbg.num_blocks &&
3077             (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3078              trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3079                struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3080
3081                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3082                fw_mon_data = (void *)(*data)->data;
3083
3084                iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3085
3086                len += sizeof(**data) + sizeof(*fw_mon_data);
3087                if (trans->dbg.num_blocks) {
3088                        memcpy(fw_mon_data->data,
3089                               trans->dbg.fw_mon[0].block,
3090                               trans->dbg.fw_mon[0].size);
3091
3092                        monitor_len = trans->dbg.fw_mon[0].size;
3093                } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3094                        u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3095                        /*
3096                         * Update pointers to reflect actual values after
3097                         * shifting
3098                         */
3099                        if (trans->dbg.dest_tlv->version) {
3100                                base = (iwl_read_prph(trans, base) &
3101                                        IWL_LDBG_M2S_BUF_BA_MSK) <<
3102                                       trans->dbg.dest_tlv->base_shift;
3103                                base *= IWL_M2S_UNIT_SIZE;
3104                                base += trans->cfg->smem_offset;
3105                        } else {
3106                                base = iwl_read_prph(trans, base) <<
3107                                       trans->dbg.dest_tlv->base_shift;
3108                        }
3109
3110                        iwl_trans_read_mem(trans, base, fw_mon_data->data,
3111                                           monitor_len / sizeof(u32));
3112                } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3113                        monitor_len =
3114                                iwl_trans_pci_dump_marbh_monitor(trans,
3115                                                                 fw_mon_data,
3116                                                                 monitor_len);
3117                } else {
3118                        /* Didn't match anything - output no monitor data */
3119                        monitor_len = 0;
3120                }
3121
3122                len += monitor_len;
3123                (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3124        }
3125
3126        return len;
3127}
3128
3129static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3130{
3131        if (trans->dbg.num_blocks) {
3132                *len += sizeof(struct iwl_fw_error_dump_data) +
3133                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3134                        trans->dbg.fw_mon[0].size;
3135                return trans->dbg.fw_mon[0].size;
3136        } else if (trans->dbg.dest_tlv) {
3137                u32 base, end, cfg_reg, monitor_len;
3138
3139                if (trans->dbg.dest_tlv->version == 1) {
3140                        cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3141                        cfg_reg = iwl_read_prph(trans, cfg_reg);
3142                        base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3143                                trans->dbg.dest_tlv->base_shift;
3144                        base *= IWL_M2S_UNIT_SIZE;
3145                        base += trans->cfg->smem_offset;
3146
3147                        monitor_len =
3148                                (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3149                                trans->dbg.dest_tlv->end_shift;
3150                        monitor_len *= IWL_M2S_UNIT_SIZE;
3151                } else {
3152                        base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3153                        end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3154
3155                        base = iwl_read_prph(trans, base) <<
3156                               trans->dbg.dest_tlv->base_shift;
3157                        end = iwl_read_prph(trans, end) <<
3158                              trans->dbg.dest_tlv->end_shift;
3159
3160                        /* Make "end" point to the actual end */
3161                        if (trans->cfg->device_family >=
3162                            IWL_DEVICE_FAMILY_8000 ||
3163                            trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3164                                end += (1 << trans->dbg.dest_tlv->end_shift);
3165                        monitor_len = end - base;
3166                }
3167                *len += sizeof(struct iwl_fw_error_dump_data) +
3168                        sizeof(struct iwl_fw_error_dump_fw_mon) +
3169                        monitor_len;
3170                return monitor_len;
3171        }
3172        return 0;
3173}
3174
3175static struct iwl_trans_dump_data
3176*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3177                          u32 dump_mask)
3178{
3179        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3180        struct iwl_fw_error_dump_data *data;
3181        struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
3182        struct iwl_fw_error_dump_txcmd *txcmd;
3183        struct iwl_trans_dump_data *dump_data;
3184        u32 len, num_rbs = 0, monitor_len = 0;
3185        int i, ptr;
3186        bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3187                        !trans->cfg->mq_rx_supported &&
3188                        dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3189
3190        if (!dump_mask)
3191                return NULL;
3192
3193        /* transport dump header */
3194        len = sizeof(*dump_data);
3195
3196        /* host commands */
3197        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3198                len += sizeof(*data) +
3199                        cmdq->n_window * (sizeof(*txcmd) +
3200                                          TFD_MAX_PAYLOAD_SIZE);
3201
3202        /* FW monitor */
3203        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3204                monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3205
3206        /* CSR registers */
3207        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3208                len += sizeof(*data) + IWL_CSR_TO_DUMP;
3209
3210        /* FH registers */
3211        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3212                if (trans->cfg->gen2)
3213                        len += sizeof(*data) +
3214                               (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3215                                iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3216                else
3217                        len += sizeof(*data) +
3218                               (FH_MEM_UPPER_BOUND -
3219                                FH_MEM_LOWER_BOUND);
3220        }
3221
3222        if (dump_rbs) {
3223                /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3224                struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3225                /* RBs */
3226                num_rbs =
3227                        le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3228                        & 0x0FFF;
3229                num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3230                len += num_rbs * (sizeof(*data) +
3231                                  sizeof(struct iwl_fw_error_dump_rb) +
3232                                  (PAGE_SIZE << trans_pcie->rx_page_order));
3233        }
3234
3235        /* Paged memory for gen2 HW */
3236        if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3237                for (i = 0; i < trans->init_dram.paging_cnt; i++)
3238                        len += sizeof(*data) +
3239                               sizeof(struct iwl_fw_error_dump_paging) +
3240                               trans->init_dram.paging[i].size;
3241
3242        dump_data = vzalloc(len);
3243        if (!dump_data)
3244                return NULL;
3245
3246        len = 0;
3247        data = (void *)dump_data->data;
3248
3249        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3250                u16 tfd_size = trans_pcie->tfd_size;
3251
3252                data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3253                txcmd = (void *)data->data;
3254                spin_lock_bh(&cmdq->lock);
3255                ptr = cmdq->write_ptr;
3256                for (i = 0; i < cmdq->n_window; i++) {
3257                        u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
3258                        u32 caplen, cmdlen;
3259
3260                        cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3261                                                           cmdq->tfds +
3262                                                           tfd_size * ptr);
3263                        caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3264
3265                        if (cmdlen) {
3266                                len += sizeof(*txcmd) + caplen;
3267                                txcmd->cmdlen = cpu_to_le32(cmdlen);
3268                                txcmd->caplen = cpu_to_le32(caplen);
3269                                memcpy(txcmd->data, cmdq->entries[idx].cmd,
3270                                       caplen);
3271                                txcmd = (void *)((u8 *)txcmd->data + caplen);
3272                        }
3273
3274                        ptr = iwl_queue_dec_wrap(trans, ptr);
3275                }
3276                spin_unlock_bh(&cmdq->lock);
3277
3278                data->len = cpu_to_le32(len);
3279                len += sizeof(*data);
3280                data = iwl_fw_error_next_data(data);
3281        }
3282
3283        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3284                len += iwl_trans_pcie_dump_csr(trans, &data);
3285        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3286                len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3287        if (dump_rbs)
3288                len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3289
3290        /* Paged memory for gen2 HW */
3291        if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3292                for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3293                        struct iwl_fw_error_dump_paging *paging;
3294                        u32 page_len = trans->init_dram.paging[i].size;
3295
3296                        data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3297                        data->len = cpu_to_le32(sizeof(*paging) + page_len);
3298                        paging = (void *)data->data;
3299                        paging->index = cpu_to_le32(i);
3300                        memcpy(paging->data,
3301                               trans->init_dram.paging[i].block, page_len);
3302                        data = iwl_fw_error_next_data(data);
3303
3304                        len += sizeof(*data) + sizeof(*paging) + page_len;
3305                }
3306        }
3307        if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3308                len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3309
3310        dump_data->len = len;
3311
3312        return dump_data;
3313}
3314
3315#ifdef CONFIG_PM_SLEEP
3316static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
3317{
3318        if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
3319            (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
3320                return iwl_pci_fw_enter_d0i3(trans);
3321
3322        return 0;
3323}
3324
3325static void iwl_trans_pcie_resume(struct iwl_trans *trans)
3326{
3327        if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
3328            (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
3329                iwl_pci_fw_exit_d0i3(trans);
3330}
3331#endif /* CONFIG_PM_SLEEP */
3332
3333#define IWL_TRANS_COMMON_OPS                                            \
3334        .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
3335        .write8 = iwl_trans_pcie_write8,                                \
3336        .write32 = iwl_trans_pcie_write32,                              \
3337        .read32 = iwl_trans_pcie_read32,                                \
3338        .read_prph = iwl_trans_pcie_read_prph,                          \
3339        .write_prph = iwl_trans_pcie_write_prph,                        \
3340        .read_mem = iwl_trans_pcie_read_mem,                            \
3341        .write_mem = iwl_trans_pcie_write_mem,                          \
3342        .configure = iwl_trans_pcie_configure,                          \
3343        .set_pmi = iwl_trans_pcie_set_pmi,                              \
3344        .sw_reset = iwl_trans_pcie_sw_reset,                            \
3345        .grab_nic_access = iwl_trans_pcie_grab_nic_access,              \
3346        .release_nic_access = iwl_trans_pcie_release_nic_access,        \
3347        .set_bits_mask = iwl_trans_pcie_set_bits_mask,                  \
3348        .ref = iwl_trans_pcie_ref,                                      \
3349        .unref = iwl_trans_pcie_unref,                                  \
3350        .dump_data = iwl_trans_pcie_dump_data,                          \
3351        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
3352        .d3_resume = iwl_trans_pcie_d3_resume,                          \
3353        .sync_nmi = iwl_trans_pcie_sync_nmi
3354
3355#ifdef CONFIG_PM_SLEEP
3356#define IWL_TRANS_PM_OPS                                                \
3357        .suspend = iwl_trans_pcie_suspend,                              \
3358        .resume = iwl_trans_pcie_resume,
3359#else
3360#define IWL_TRANS_PM_OPS
3361#endif /* CONFIG_PM_SLEEP */
3362
3363static const struct iwl_trans_ops trans_ops_pcie = {
3364        IWL_TRANS_COMMON_OPS,
3365        IWL_TRANS_PM_OPS
3366        .start_hw = iwl_trans_pcie_start_hw,
3367        .fw_alive = iwl_trans_pcie_fw_alive,
3368        .start_fw = iwl_trans_pcie_start_fw,
3369        .stop_device = iwl_trans_pcie_stop_device,
3370
3371        .send_cmd = iwl_trans_pcie_send_hcmd,
3372
3373        .tx = iwl_trans_pcie_tx,
3374        .reclaim = iwl_trans_pcie_reclaim,
3375
3376        .txq_disable = iwl_trans_pcie_txq_disable,
3377        .txq_enable = iwl_trans_pcie_txq_enable,
3378
3379        .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3380
3381        .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3382
3383        .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
3384        .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3385#ifdef CONFIG_IWLWIFI_DEBUGFS
3386        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3387#endif
3388};
3389
3390static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3391        IWL_TRANS_COMMON_OPS,
3392        IWL_TRANS_PM_OPS
3393        .start_hw = iwl_trans_pcie_start_hw,
3394        .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3395        .start_fw = iwl_trans_pcie_gen2_start_fw,
3396        .stop_device = iwl_trans_pcie_gen2_stop_device,
3397
3398        .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
3399
3400        .tx = iwl_trans_pcie_gen2_tx,
3401        .reclaim = iwl_trans_pcie_reclaim,
3402
3403        .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
3404        .txq_free = iwl_trans_pcie_dyn_txq_free,
3405        .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3406        .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3407#ifdef CONFIG_IWLWIFI_DEBUGFS
3408        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3409#endif
3410};
3411
3412struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3413                                       const struct pci_device_id *ent,
3414                                       const struct iwl_cfg *cfg)
3415{
3416        struct iwl_trans_pcie *trans_pcie;
3417        struct iwl_trans *trans;
3418        int ret, addr_size;
3419
3420        ret = pcim_enable_device(pdev);
3421        if (ret)
3422                return ERR_PTR(ret);
3423
3424        if (cfg->gen2)
3425                trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
3426                                        &pdev->dev, cfg, &trans_ops_pcie_gen2);
3427        else
3428                trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
3429                                        &pdev->dev, cfg, &trans_ops_pcie);
3430        if (!trans)
3431                return ERR_PTR(-ENOMEM);
3432
3433        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3434
3435        trans_pcie->trans = trans;
3436        trans_pcie->opmode_down = true;
3437        spin_lock_init(&trans_pcie->irq_lock);
3438        spin_lock_init(&trans_pcie->reg_lock);
3439        mutex_init(&trans_pcie->mutex);
3440        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3441        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3442        if (!trans_pcie->tso_hdr_page) {
3443                ret = -ENOMEM;
3444                goto out_no_pci;
3445        }
3446        trans_pcie->debug_rfkill = -1;
3447
3448        if (!cfg->base_params->pcie_l1_allowed) {
3449                /*
3450                 * W/A - seems to solve weird behavior. We need to remove this
3451                 * if we don't want to stay in L1 all the time. This wastes a
3452                 * lot of power.
3453                 */
3454                pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3455                                       PCIE_LINK_STATE_L1 |
3456                                       PCIE_LINK_STATE_CLKPM);
3457        }
3458
3459        trans_pcie->def_rx_queue = 0;
3460
3461        if (cfg->use_tfh) {
3462                addr_size = 64;
3463                trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
3464                trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
3465        } else {
3466                addr_size = 36;
3467                trans_pcie->max_tbs = IWL_NUM_OF_TBS;
3468                trans_pcie->tfd_size = sizeof(struct iwl_tfd);
3469        }
3470        trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
3471
3472        pci_set_master(pdev);
3473
3474        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3475        if (!ret)
3476                ret = pci_set_consistent_dma_mask(pdev,
3477                                                  DMA_BIT_MASK(addr_size));
3478        if (ret) {
3479                ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3480                if (!ret)
3481                        ret = pci_set_consistent_dma_mask(pdev,
3482                                                          DMA_BIT_MASK(32));
3483                /* both attempts failed: */
3484                if (ret) {
3485                        dev_err(&pdev->dev, "No suitable DMA available\n");
3486                        goto out_no_pci;
3487                }
3488        }
3489
3490        ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3491        if (ret) {
3492                dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3493                goto out_no_pci;
3494        }
3495
3496        trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3497        if (!trans_pcie->hw_base) {
3498                dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3499                ret = -ENODEV;
3500                goto out_no_pci;
3501        }
3502
3503        /* We disable the RETRY_TIMEOUT register (0x41) to keep
3504         * PCI Tx retries from interfering with C3 CPU state */
3505        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3506
3507        trans_pcie->pci_dev = pdev;
3508        iwl_disable_interrupts(trans);
3509
3510        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3511        if (trans->hw_rev == 0xffffffff) {
3512                dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3513                ret = -EIO;
3514                goto out_no_pci;
3515        }
3516
3517        /*
3518         * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3519         * changed, and now the revision step also includes bit 0-1 (no more
3520         * "dash" value). To keep hw_rev backwards compatible - we'll store it
3521         * in the old format.
3522         */
3523        if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
3524                unsigned long flags;
3525
3526                trans->hw_rev = (trans->hw_rev & 0xfff0) |
3527                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3528
3529                ret = iwl_pcie_prepare_card_hw(trans);
3530                if (ret) {
3531                        IWL_WARN(trans, "Exit HW not ready\n");
3532                        goto out_no_pci;
3533                }
3534
3535                /*
3536                 * in-order to recognize C step driver should read chip version
3537                 * id located at the AUX bus MISC address space.
3538                 */
3539                ret = iwl_finish_nic_init(trans);
3540                if (ret)
3541                        goto out_no_pci;
3542
3543                if (iwl_trans_grab_nic_access(trans, &flags)) {
3544                        u32 hw_step;
3545
3546                        hw_step = iwl_read_umac_prph_no_grab(trans,
3547                                                             WFPM_CTRL_REG);
3548                        hw_step |= ENABLE_WFPM;
3549                        iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG,
3550                                                    hw_step);
3551                        hw_step = iwl_read_prph_no_grab(trans,
3552                                                        CNVI_AUX_MISC_CHIP);
3553                        hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
3554                        if (hw_step == 0x3)
3555                                trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
3556                                                (SILICON_C_STEP << 2);
3557                        iwl_trans_release_nic_access(trans, &flags);
3558                }
3559        }
3560
3561        IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3562
3563#if IS_ENABLED(CONFIG_IWLMVM)
3564        trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
3565
3566        if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
3567                if (trans->hw_rev == CSR_HW_REV_TYPE_TY) {
3568                        trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
3569                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3570                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
3571                        trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
3572                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3573                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
3574                        trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
3575                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3576                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
3577                        trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
3578                }
3579        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
3580                if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3581                     CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3582                     trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
3583                    (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3584                     CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
3585                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3586                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3587                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
3588                        trans->cfg = &iwl_ax101_cfg_qu_hr;
3589                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3590                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
3591                        trans->cfg = &iwl22000_2ax_cfg_jf;
3592                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3593                           CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
3594                        IWL_ERR(trans, "RF ID HRCDB is not supported\n");
3595                        ret = -EINVAL;
3596                        goto out_no_pci;
3597                } else {
3598                        IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
3599                                CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
3600                        ret = -EINVAL;
3601                        goto out_no_pci;
3602                }
3603        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3606                u32 hw_status;
3607
3608                hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
3609                if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
3610                        /*
3611                        * b step fw is the same for physical card and fpga
3612                        */
3613                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3614                else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
3615                         CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
3616                        trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
3617                } else {
3618                        /*
3619                        * a step no FPGA
3620                        */
3621                        trans->cfg = &iwl22000_2ac_cfg_hr;
3622                }
3623        }
3624#endif
3625
3626        iwl_pcie_set_interrupt_capa(pdev, trans);
3627        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3628        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3629                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3630
3631        /* Initialize the wait queue for commands */
3632        init_waitqueue_head(&trans_pcie->wait_command_queue);
3633
3634        init_waitqueue_head(&trans_pcie->d0i3_waitq);
3635
3636        if (trans_pcie->msix_enabled) {
3637                ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3638                if (ret)
3639                        goto out_no_pci;
3640         } else {
3641                ret = iwl_pcie_alloc_ict(trans);
3642                if (ret)
3643                        goto out_no_pci;
3644
3645                ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3646                                                iwl_pcie_isr,
3647                                                iwl_pcie_irq_handler,
3648                                                IRQF_SHARED, DRV_NAME, trans);
3649                if (ret) {
3650                        IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3651                        goto out_free_ict;
3652                }
3653                trans_pcie->inta_mask = CSR_INI_SET_MASK;
3654         }
3655
3656        trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3657                                                   WQ_HIGHPRI | WQ_UNBOUND, 1);
3658        INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3659
3660#ifdef CONFIG_IWLWIFI_PCIE_RTPM
3661        trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
3662#else
3663        trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
3664#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
3665
3666#ifdef CONFIG_IWLWIFI_DEBUGFS
3667        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3668        mutex_init(&trans_pcie->fw_mon_data.mutex);
3669#endif
3670
3671        return trans;
3672
3673out_free_ict:
3674        iwl_pcie_free_ict(trans);
3675out_no_pci:
3676        free_percpu(trans_pcie->tso_hdr_page);
3677        iwl_trans_free(trans);
3678        return ERR_PTR(ret);
3679}
3680
3681void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3682{
3683        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3684        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3685        bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
3686        u32 inta_addr, sw_err_bit;
3687
3688        if (trans_pcie->msix_enabled) {
3689                inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3690                sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3691        } else {
3692                inta_addr = CSR_INT;
3693                sw_err_bit = CSR_INT_BIT_SW_ERR;
3694        }
3695
3696        /* if the interrupts were already disabled, there is no point in
3697         * calling iwl_disable_interrupts
3698         */
3699        if (interrupts_enabled)
3700                iwl_disable_interrupts(trans);
3701
3702        iwl_force_nmi(trans);
3703        while (time_after(timeout, jiffies)) {
3704                u32 inta_hw = iwl_read32(trans, inta_addr);
3705
3706                /* Error detected by uCode */
3707                if (inta_hw & sw_err_bit) {
3708                        /* Clear causes register */
3709                        iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
3710                        break;
3711                }
3712
3713                mdelay(1);
3714        }
3715
3716        /* enable interrupts only if there were already enabled before this
3717         * function to avoid a case were the driver enable interrupts before
3718         * proper configurations were made
3719         */
3720        if (interrupts_enabled)
3721                iwl_enable_interrupts(trans);
3722
3723        iwl_trans_fw_error(trans);
3724}
3725