linux/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
   2/*
   3 * Copyright (C) 2003-2015, 2018-2020 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
   7#ifndef __iwl_trans_int_pcie_h__
   8#define __iwl_trans_int_pcie_h__
   9
  10#include <linux/spinlock.h>
  11#include <linux/interrupt.h>
  12#include <linux/skbuff.h>
  13#include <linux/wait.h>
  14#include <linux/pci.h>
  15#include <linux/timer.h>
  16#include <linux/cpu.h>
  17
  18#include "iwl-fh.h"
  19#include "iwl-csr.h"
  20#include "iwl-trans.h"
  21#include "iwl-debug.h"
  22#include "iwl-io.h"
  23#include "iwl-op-mode.h"
  24#include "iwl-drv.h"
  25#include "queue/tx.h"
  26
  27/*
  28 * RX related structures and functions
  29 */
  30#define RX_NUM_QUEUES 1
  31#define RX_POST_REQ_ALLOC 2
  32#define RX_CLAIM_REQ_ALLOC 8
  33#define RX_PENDING_WATERMARK 16
  34#define FIRST_RX_QUEUE 512
  35
  36struct iwl_host_cmd;
  37
  38/*This file includes the declaration that are internal to the
  39 * trans_pcie layer */
  40
  41/**
  42 * struct iwl_rx_mem_buffer
  43 * @page_dma: bus address of rxb page
  44 * @page: driver's pointer to the rxb page
  45 * @invalid: rxb is in driver ownership - not owned by HW
  46 * @vid: index of this rxb in the global table
  47 * @offset: indicates which offset of the page (in bytes)
  48 *      this buffer uses (if multiple RBs fit into one page)
  49 */
  50struct iwl_rx_mem_buffer {
  51        dma_addr_t page_dma;
  52        struct page *page;
  53        u16 vid;
  54        bool invalid;
  55        struct list_head list;
  56        u32 offset;
  57};
  58
  59/**
  60 * struct isr_statistics - interrupt statistics
  61 *
  62 */
  63struct isr_statistics {
  64        u32 hw;
  65        u32 sw;
  66        u32 err_code;
  67        u32 sch;
  68        u32 alive;
  69        u32 rfkill;
  70        u32 ctkill;
  71        u32 wakeup;
  72        u32 rx;
  73        u32 tx;
  74        u32 unhandled;
  75};
  76
  77/**
  78 * struct iwl_rx_transfer_desc - transfer descriptor
  79 * @addr: ptr to free buffer start address
  80 * @rbid: unique tag of the buffer
  81 * @reserved: reserved
  82 */
  83struct iwl_rx_transfer_desc {
  84        __le16 rbid;
  85        __le16 reserved[3];
  86        __le64 addr;
  87} __packed;
  88
  89#define IWL_RX_CD_FLAGS_FRAGMENTED      BIT(0)
  90
  91/**
  92 * struct iwl_rx_completion_desc - completion descriptor
  93 * @reserved1: reserved
  94 * @rbid: unique tag of the received buffer
  95 * @flags: flags (0: fragmented, all others: reserved)
  96 * @reserved2: reserved
  97 */
  98struct iwl_rx_completion_desc {
  99        __le32 reserved1;
 100        __le16 rbid;
 101        u8 flags;
 102        u8 reserved2[25];
 103} __packed;
 104
 105/**
 106 * struct iwl_rxq - Rx queue
 107 * @id: queue index
 108 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
 109 *      Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
 110 *      In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
 111 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
 112 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
 113 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
 114 * @tr_tail: driver's pointer to the transmission ring tail buffer
 115 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
 116 * @cr_tail: driver's pointer to the completion ring tail buffer
 117 * @cr_tail_dma: physical address of the buffer for the completion ring tail
 118 * @read: Shared index to newest available Rx buffer
 119 * @write: Shared index to oldest written Rx packet
 120 * @free_count: Number of pre-allocated buffers in rx_free
 121 * @used_count: Number of RBDs handled to allocator to use for allocation
 122 * @write_actual:
 123 * @rx_free: list of RBDs with allocated RB ready for use
 124 * @rx_used: list of RBDs with no RB attached
 125 * @need_update: flag to indicate we need to update read/write index
 126 * @rb_stts: driver's pointer to receive buffer status
 127 * @rb_stts_dma: bus address of receive buffer status
 128 * @lock:
 129 * @queue: actual rx queue. Not used for multi-rx queue.
 130 * @next_rb_is_fragment: indicates that the previous RB that we handled set
 131 *      the fragmented flag, so the next one is still another fragment
 132 *
 133 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 134 */
 135struct iwl_rxq {
 136        int id;
 137        void *bd;
 138        dma_addr_t bd_dma;
 139        union {
 140                void *used_bd;
 141                __le32 *bd_32;
 142                struct iwl_rx_completion_desc *cd;
 143        };
 144        dma_addr_t used_bd_dma;
 145        __le16 *tr_tail;
 146        dma_addr_t tr_tail_dma;
 147        __le16 *cr_tail;
 148        dma_addr_t cr_tail_dma;
 149        u32 read;
 150        u32 write;
 151        u32 free_count;
 152        u32 used_count;
 153        u32 write_actual;
 154        u32 queue_size;
 155        struct list_head rx_free;
 156        struct list_head rx_used;
 157        bool need_update, next_rb_is_fragment;
 158        void *rb_stts;
 159        dma_addr_t rb_stts_dma;
 160        spinlock_t lock;
 161        struct napi_struct napi;
 162        struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 163};
 164
 165/**
 166 * struct iwl_rb_allocator - Rx allocator
 167 * @req_pending: number of requests the allcator had not processed yet
 168 * @req_ready: number of requests honored and ready for claiming
 169 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 170 *      the queue. This is a list of &struct iwl_rx_mem_buffer
 171 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 172 *      of &struct iwl_rx_mem_buffer
 173 * @lock: protects the rbd_allocated and rbd_empty lists
 174 * @alloc_wq: work queue for background calls
 175 * @rx_alloc: work struct for background calls
 176 */
 177struct iwl_rb_allocator {
 178        atomic_t req_pending;
 179        atomic_t req_ready;
 180        struct list_head rbd_allocated;
 181        struct list_head rbd_empty;
 182        spinlock_t lock;
 183        struct workqueue_struct *alloc_wq;
 184        struct work_struct rx_alloc;
 185};
 186
 187/**
 188 * iwl_get_closed_rb_stts - get closed rb stts from different structs
 189 * @rxq - the rxq to get the rb stts from
 190 */
 191static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
 192                                            struct iwl_rxq *rxq)
 193{
 194        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
 195                __le16 *rb_stts = rxq->rb_stts;
 196
 197                return READ_ONCE(*rb_stts);
 198        } else {
 199                struct iwl_rb_status *rb_stts = rxq->rb_stts;
 200
 201                return READ_ONCE(rb_stts->closed_rb_num);
 202        }
 203}
 204
 205#ifdef CONFIG_IWLWIFI_DEBUGFS
 206/**
 207 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
 208 * debugfs file
 209 *
 210 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
 211 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
 212 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
 213 *      set the file can no longer be used.
 214 */
 215enum iwl_fw_mon_dbgfs_state {
 216        IWL_FW_MON_DBGFS_STATE_CLOSED,
 217        IWL_FW_MON_DBGFS_STATE_OPEN,
 218        IWL_FW_MON_DBGFS_STATE_DISABLED,
 219};
 220#endif
 221
 222/**
 223 * enum iwl_shared_irq_flags - level of sharing for irq
 224 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
 225 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
 226 */
 227enum iwl_shared_irq_flags {
 228        IWL_SHARED_IRQ_NON_RX           = BIT(0),
 229        IWL_SHARED_IRQ_FIRST_RSS        = BIT(1),
 230};
 231
 232/**
 233 * enum iwl_image_response_code - image response values
 234 * @IWL_IMAGE_RESP_DEF: the default value of the register
 235 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
 236 * @IWL_IMAGE_RESP_FAIL: iml reading failed
 237 */
 238enum iwl_image_response_code {
 239        IWL_IMAGE_RESP_DEF              = 0,
 240        IWL_IMAGE_RESP_SUCCESS          = 1,
 241        IWL_IMAGE_RESP_FAIL             = 2,
 242};
 243
 244/**
 245 * struct cont_rec: continuous recording data structure
 246 * @prev_wr_ptr: the last address that was read in monitor_data
 247 *      debugfs file
 248 * @prev_wrap_cnt: the wrap count that was used during the last read in
 249 *      monitor_data debugfs file
 250 * @state: the state of monitor_data debugfs file as described
 251 *      in &iwl_fw_mon_dbgfs_state enum
 252 * @mutex: locked while reading from monitor_data debugfs file
 253 */
 254#ifdef CONFIG_IWLWIFI_DEBUGFS
 255struct cont_rec {
 256        u32 prev_wr_ptr;
 257        u32 prev_wrap_cnt;
 258        u8  state;
 259        /* Used to sync monitor_data debugfs file with driver unload flow */
 260        struct mutex mutex;
 261};
 262#endif
 263
 264/**
 265 * struct iwl_trans_pcie - PCIe transport specific data
 266 * @rxq: all the RX queue data
 267 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
 268 * @global_table: table mapping received VID from hw to rxb
 269 * @rba: allocator for RX replenishing
 270 * @ctxt_info: context information for FW self init
 271 * @ctxt_info_gen3: context information for gen3 devices
 272 * @prph_info: prph info for self init
 273 * @prph_scratch: prph scratch for self init
 274 * @ctxt_info_dma_addr: dma addr of context information
 275 * @prph_info_dma_addr: dma addr of prph info
 276 * @prph_scratch_dma_addr: dma addr of prph scratch
 277 * @ctxt_info_dma_addr: dma addr of context information
 278 * @init_dram: DRAM data of firmware image (including paging).
 279 *      Context information addresses will be taken from here.
 280 *      This is driver's local copy for keeping track of size and
 281 *      count for allocating and freeing the memory.
 282 * @trans: pointer to the generic transport area
 283 * @scd_base_addr: scheduler sram base address in SRAM
 284 * @kw: keep warm address
 285 * @pnvm_dram: DRAM area that contains the PNVM data
 286 * @pci_dev: basic pci-network driver stuff
 287 * @hw_base: pci hardware address support
 288 * @ucode_write_complete: indicates that the ucode has been copied.
 289 * @ucode_write_waitq: wait queue for uCode load
 290 * @cmd_queue - command queue number
 291 * @def_rx_queue - default rx queue number
 292 * @rx_buf_size: Rx buffer size
 293 * @scd_set_active: should the transport configure the SCD for HCMD queue
 294 * @rx_page_order: page order for receive buffer size
 295 * @rx_buf_bytes: RX buffer (RB) size in bytes
 296 * @reg_lock: protect hw register access
 297 * @mutex: to protect stop_device / start_fw / start_hw
 298 * @cmd_in_flight: true when we have a host command in flight
 299#ifdef CONFIG_IWLWIFI_DEBUGFS
 300 * @fw_mon_data: fw continuous recording data
 301#endif
 302 * @msix_entries: array of MSI-X entries
 303 * @msix_enabled: true if managed to enable MSI-X
 304 * @shared_vec_mask: the type of causes the shared vector handles
 305 *      (see iwl_shared_irq_flags).
 306 * @alloc_vecs: the number of interrupt vectors allocated by the OS
 307 * @def_irq: default irq for non rx causes
 308 * @fh_init_mask: initial unmasked fh causes
 309 * @hw_init_mask: initial unmasked hw causes
 310 * @fh_mask: current unmasked fh causes
 311 * @hw_mask: current unmasked hw causes
 312 * @in_rescan: true if we have triggered a device rescan
 313 * @base_rb_stts: base virtual address of receive buffer status for all queues
 314 * @base_rb_stts_dma: base physical address of receive buffer status
 315 * @supported_dma_mask: DMA mask to validate the actual address against,
 316 *      will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
 317 * @alloc_page_lock: spinlock for the page allocator
 318 * @alloc_page: allocated page to still use parts of
 319 * @alloc_page_used: how much of the allocated page was already used (bytes)
 320 */
 321struct iwl_trans_pcie {
 322        struct iwl_rxq *rxq;
 323        struct iwl_rx_mem_buffer *rx_pool;
 324        struct iwl_rx_mem_buffer **global_table;
 325        struct iwl_rb_allocator rba;
 326        union {
 327                struct iwl_context_info *ctxt_info;
 328                struct iwl_context_info_gen3 *ctxt_info_gen3;
 329        };
 330        struct iwl_prph_info *prph_info;
 331        struct iwl_prph_scratch *prph_scratch;
 332        dma_addr_t ctxt_info_dma_addr;
 333        dma_addr_t prph_info_dma_addr;
 334        dma_addr_t prph_scratch_dma_addr;
 335        dma_addr_t iml_dma_addr;
 336        struct iwl_trans *trans;
 337
 338        struct net_device napi_dev;
 339
 340        /* INT ICT Table */
 341        __le32 *ict_tbl;
 342        dma_addr_t ict_tbl_dma;
 343        int ict_index;
 344        bool use_ict;
 345        bool is_down, opmode_down;
 346        s8 debug_rfkill;
 347        struct isr_statistics isr_stats;
 348
 349        spinlock_t irq_lock;
 350        struct mutex mutex;
 351        u32 inta_mask;
 352        u32 scd_base_addr;
 353        struct iwl_dma_ptr kw;
 354
 355        struct iwl_dram_data pnvm_dram;
 356
 357        struct iwl_txq *txq_memory;
 358
 359        /* PCI bus related data */
 360        struct pci_dev *pci_dev;
 361        void __iomem *hw_base;
 362
 363        bool ucode_write_complete;
 364        bool sx_complete;
 365        wait_queue_head_t ucode_write_waitq;
 366        wait_queue_head_t wait_command_queue;
 367        wait_queue_head_t sx_waitq;
 368
 369        u8 def_rx_queue;
 370        u8 n_no_reclaim_cmds;
 371        u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
 372        u16 num_rx_bufs;
 373
 374        enum iwl_amsdu_size rx_buf_size;
 375        bool scd_set_active;
 376        bool pcie_dbg_dumped_once;
 377        u32 rx_page_order;
 378        u32 rx_buf_bytes;
 379        u32 supported_dma_mask;
 380
 381        /* allocator lock for the two values below */
 382        spinlock_t alloc_page_lock;
 383        struct page *alloc_page;
 384        u32 alloc_page_used;
 385
 386        /*protect hw register */
 387        spinlock_t reg_lock;
 388        bool cmd_hold_nic_awake;
 389
 390#ifdef CONFIG_IWLWIFI_DEBUGFS
 391        struct cont_rec fw_mon_data;
 392#endif
 393
 394        struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
 395        bool msix_enabled;
 396        u8 shared_vec_mask;
 397        u32 alloc_vecs;
 398        u32 def_irq;
 399        u32 fh_init_mask;
 400        u32 hw_init_mask;
 401        u32 fh_mask;
 402        u32 hw_mask;
 403        cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
 404        u16 tx_cmd_queue_size;
 405        bool in_rescan;
 406
 407        void *base_rb_stts;
 408        dma_addr_t base_rb_stts_dma;
 409
 410        bool fw_reset_handshake;
 411        bool fw_reset_done;
 412        wait_queue_head_t fw_reset_waitq;
 413};
 414
 415static inline struct iwl_trans_pcie *
 416IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
 417{
 418        return (void *)trans->trans_specific;
 419}
 420
 421static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
 422                                      struct msix_entry *entry)
 423{
 424        /*
 425         * Before sending the interrupt the HW disables it to prevent
 426         * a nested interrupt. This is done by writing 1 to the corresponding
 427         * bit in the mask register. After handling the interrupt, it should be
 428         * re-enabled by clearing this bit. This register is defined as
 429         * write 1 clear (W1C) register, meaning that it's being clear
 430         * by writing 1 to the bit.
 431         */
 432        iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
 433}
 434
 435static inline struct iwl_trans *
 436iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
 437{
 438        return container_of((void *)trans_pcie, struct iwl_trans,
 439                            trans_specific);
 440}
 441
 442/*
 443 * Convention: trans API functions: iwl_trans_pcie_XXX
 444 *      Other functions: iwl_pcie_XXX
 445 */
 446struct iwl_trans
 447*iwl_trans_pcie_alloc(struct pci_dev *pdev,
 448                      const struct pci_device_id *ent,
 449                      const struct iwl_cfg_trans_params *cfg_trans);
 450void iwl_trans_pcie_free(struct iwl_trans *trans);
 451
 452/*****************************************************
 453* RX
 454******************************************************/
 455int iwl_pcie_rx_init(struct iwl_trans *trans);
 456int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
 457irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
 458irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
 459irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
 460irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
 461int iwl_pcie_rx_stop(struct iwl_trans *trans);
 462void iwl_pcie_rx_free(struct iwl_trans *trans);
 463void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
 464void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
 465int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
 466void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
 467                            struct iwl_rxq *rxq);
 468
 469/*****************************************************
 470* ICT - interrupt handling
 471******************************************************/
 472irqreturn_t iwl_pcie_isr(int irq, void *data);
 473int iwl_pcie_alloc_ict(struct iwl_trans *trans);
 474void iwl_pcie_free_ict(struct iwl_trans *trans);
 475void iwl_pcie_reset_ict(struct iwl_trans *trans);
 476void iwl_pcie_disable_ict(struct iwl_trans *trans);
 477
 478/*****************************************************
 479* TX / HCMD
 480******************************************************/
 481int iwl_pcie_tx_init(struct iwl_trans *trans);
 482void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
 483int iwl_pcie_tx_stop(struct iwl_trans *trans);
 484void iwl_pcie_tx_free(struct iwl_trans *trans);
 485bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
 486                               const struct iwl_trans_txq_scd_cfg *cfg,
 487                               unsigned int wdg_timeout);
 488void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
 489                                bool configure_scd);
 490void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
 491                                        bool shared_mode);
 492int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 493                      struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 494void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 495int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 496void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
 497                            struct iwl_rx_cmd_buffer *rxb);
 498void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
 499
 500/*****************************************************
 501* Error handling
 502******************************************************/
 503void iwl_pcie_dump_csr(struct iwl_trans *trans);
 504
 505/*****************************************************
 506* Helpers
 507******************************************************/
 508static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
 509{
 510        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 511
 512        clear_bit(STATUS_INT_ENABLED, &trans->status);
 513        if (!trans_pcie->msix_enabled) {
 514                /* disable interrupts from uCode/NIC to host */
 515                iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 516
 517                /* acknowledge/clear/reset any interrupts still pending
 518                 * from uCode or flow handler (Rx/Tx DMA) */
 519                iwl_write32(trans, CSR_INT, 0xffffffff);
 520                iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
 521        } else {
 522                /* disable all the interrupt we might use */
 523                iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
 524                            trans_pcie->fh_init_mask);
 525                iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
 526                            trans_pcie->hw_init_mask);
 527        }
 528        IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
 529}
 530
 531#define IWL_NUM_OF_COMPLETION_RINGS     31
 532#define IWL_NUM_OF_TRANSFER_RINGS       527
 533
 534static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
 535                                            int start)
 536{
 537        int i = 0;
 538
 539        while (start < fw->num_sec &&
 540               fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
 541               fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
 542                start++;
 543                i++;
 544        }
 545
 546        return i;
 547}
 548
 549static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
 550{
 551        struct iwl_self_init_dram *dram = &trans->init_dram;
 552        int i;
 553
 554        if (!dram->fw) {
 555                WARN_ON(dram->fw_cnt);
 556                return;
 557        }
 558
 559        for (i = 0; i < dram->fw_cnt; i++)
 560                dma_free_coherent(trans->dev, dram->fw[i].size,
 561                                  dram->fw[i].block, dram->fw[i].physical);
 562
 563        kfree(dram->fw);
 564        dram->fw_cnt = 0;
 565        dram->fw = NULL;
 566}
 567
 568static inline void iwl_disable_interrupts(struct iwl_trans *trans)
 569{
 570        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 571
 572        spin_lock(&trans_pcie->irq_lock);
 573        _iwl_disable_interrupts(trans);
 574        spin_unlock(&trans_pcie->irq_lock);
 575}
 576
 577static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
 578{
 579        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 580
 581        IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
 582        set_bit(STATUS_INT_ENABLED, &trans->status);
 583        if (!trans_pcie->msix_enabled) {
 584                trans_pcie->inta_mask = CSR_INI_SET_MASK;
 585                iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 586        } else {
 587                /*
 588                 * fh/hw_mask keeps all the unmasked causes.
 589                 * Unlike msi, in msix cause is enabled when it is unset.
 590                 */
 591                trans_pcie->hw_mask = trans_pcie->hw_init_mask;
 592                trans_pcie->fh_mask = trans_pcie->fh_init_mask;
 593                iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
 594                            ~trans_pcie->fh_mask);
 595                iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
 596                            ~trans_pcie->hw_mask);
 597        }
 598}
 599
 600static inline void iwl_enable_interrupts(struct iwl_trans *trans)
 601{
 602        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 603
 604        spin_lock(&trans_pcie->irq_lock);
 605        _iwl_enable_interrupts(trans);
 606        spin_unlock(&trans_pcie->irq_lock);
 607}
 608static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
 609{
 610        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 611
 612        iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
 613        trans_pcie->hw_mask = msk;
 614}
 615
 616static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
 617{
 618        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 619
 620        iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
 621        trans_pcie->fh_mask = msk;
 622}
 623
 624static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
 625{
 626        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 627
 628        IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
 629        if (!trans_pcie->msix_enabled) {
 630                trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
 631                iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 632        } else {
 633                iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
 634                            trans_pcie->hw_init_mask);
 635                iwl_enable_fh_int_msk_msix(trans,
 636                                           MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
 637        }
 638}
 639
 640static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
 641{
 642        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 643
 644        IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
 645
 646        if (!trans_pcie->msix_enabled) {
 647                /*
 648                 * When we'll receive the ALIVE interrupt, the ISR will call
 649                 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
 650                 * interrupt (which is not really needed anymore) but also the
 651                 * RX interrupt which will allow us to receive the ALIVE
 652                 * notification (which is Rx) and continue the flow.
 653                 */
 654                trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
 655                iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 656        } else {
 657                iwl_enable_hw_int_msk_msix(trans,
 658                                           MSIX_HW_INT_CAUSES_REG_ALIVE);
 659                /*
 660                 * Leave all the FH causes enabled to get the ALIVE
 661                 * notification.
 662                 */
 663                iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
 664        }
 665}
 666
 667static inline const char *queue_name(struct device *dev,
 668                                     struct iwl_trans_pcie *trans_p, int i)
 669{
 670        if (trans_p->shared_vec_mask) {
 671                int vec = trans_p->shared_vec_mask &
 672                          IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
 673
 674                if (i == 0)
 675                        return DRV_NAME ": shared IRQ";
 676
 677                return devm_kasprintf(dev, GFP_KERNEL,
 678                                      DRV_NAME ": queue %d", i + vec);
 679        }
 680        if (i == 0)
 681                return DRV_NAME ": default queue";
 682
 683        if (i == trans_p->alloc_vecs - 1)
 684                return DRV_NAME ": exception";
 685
 686        return devm_kasprintf(dev, GFP_KERNEL,
 687                              DRV_NAME  ": queue %d", i);
 688}
 689
 690static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
 691{
 692        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 693
 694        IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
 695        if (!trans_pcie->msix_enabled) {
 696                trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
 697                iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
 698        } else {
 699                iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
 700                            trans_pcie->fh_init_mask);
 701                iwl_enable_hw_int_msk_msix(trans,
 702                                           MSIX_HW_INT_CAUSES_REG_RF_KILL);
 703        }
 704
 705        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
 706                /*
 707                 * On 9000-series devices this bit isn't enabled by default, so
 708                 * when we power down the device we need set the bit to allow it
 709                 * to wake up the PCI-E bus for RF-kill interrupts.
 710                 */
 711                iwl_set_bit(trans, CSR_GP_CNTRL,
 712                            CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
 713        }
 714}
 715
 716void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
 717
 718static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
 719{
 720        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 721
 722        lockdep_assert_held(&trans_pcie->mutex);
 723
 724        if (trans_pcie->debug_rfkill == 1)
 725                return true;
 726
 727        return !(iwl_read32(trans, CSR_GP_CNTRL) &
 728                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
 729}
 730
 731static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
 732                                                  u32 reg, u32 mask, u32 value)
 733{
 734        u32 v;
 735
 736#ifdef CONFIG_IWLWIFI_DEBUG
 737        WARN_ON_ONCE(value & ~mask);
 738#endif
 739
 740        v = iwl_read32(trans, reg);
 741        v &= ~mask;
 742        v |= value;
 743        iwl_write32(trans, reg, v);
 744}
 745
 746static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
 747                                              u32 reg, u32 mask)
 748{
 749        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
 750}
 751
 752static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
 753                                            u32 reg, u32 mask)
 754{
 755        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
 756}
 757
 758static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 759{
 760        return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
 761}
 762
 763void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 764void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
 765void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 766
 767#ifdef CONFIG_IWLWIFI_DEBUGFS
 768void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
 769#else
 770static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
 771#endif
 772
 773void iwl_pcie_rx_allocator_work(struct work_struct *data);
 774
 775/* common functions that are used by gen2 transport */
 776int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
 777void iwl_pcie_apm_config(struct iwl_trans *trans);
 778int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
 779void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
 780bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
 781void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
 782                                       bool was_in_rfkill);
 783void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
 784void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
 785int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
 786                           struct iwl_dma_ptr *ptr, size_t size);
 787void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
 788void iwl_pcie_apply_destination(struct iwl_trans *trans);
 789
 790/* common functions that are used by gen3 transport */
 791void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
 792
 793/* transport gen 2 exported functions */
 794int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
 795                                 const struct fw_img *fw, bool run_in_rfkill);
 796void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
 797int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
 798                                  struct iwl_host_cmd *cmd);
 799void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
 800void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
 801void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
 802                                  bool test, bool reset);
 803#endif /* __iwl_trans_int_pcie_h__ */
 804