1
2
3
4
5
6
7#ifndef __iwl_trans_int_pcie_h__
8#define __iwl_trans_int_pcie_h__
9
10#include <linux/spinlock.h>
11#include <linux/interrupt.h>
12#include <linux/skbuff.h>
13#include <linux/wait.h>
14#include <linux/pci.h>
15#include <linux/timer.h>
16#include <linux/cpu.h>
17
18#include "iwl-fh.h"
19#include "iwl-csr.h"
20#include "iwl-trans.h"
21#include "iwl-debug.h"
22#include "iwl-io.h"
23#include "iwl-op-mode.h"
24#include "iwl-drv.h"
25#include "queue/tx.h"
26
27
28
29
30#define RX_NUM_QUEUES 1
31#define RX_POST_REQ_ALLOC 2
32#define RX_CLAIM_REQ_ALLOC 8
33#define RX_PENDING_WATERMARK 16
34#define FIRST_RX_QUEUE 512
35
36struct iwl_host_cmd;
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51struct iwl_rx_mem_buffer {
52 dma_addr_t page_dma;
53 struct page *page;
54 struct list_head list;
55 u32 offset;
56 u16 vid;
57 bool invalid;
58};
59
60
61
62
63
64struct isr_statistics {
65 u32 hw;
66 u32 sw;
67 u32 err_code;
68 u32 sch;
69 u32 alive;
70 u32 rfkill;
71 u32 ctkill;
72 u32 wakeup;
73 u32 rx;
74 u32 tx;
75 u32 unhandled;
76};
77
78
79
80
81
82
83
84struct iwl_rx_transfer_desc {
85 __le16 rbid;
86 __le16 reserved[3];
87 __le64 addr;
88} __packed;
89
90#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
91
92
93
94
95
96
97
98
99struct iwl_rx_completion_desc {
100 __le32 reserved1;
101 __le16 rbid;
102 u8 flags;
103 u8 reserved2[25];
104} __packed;
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132struct iwl_rxq {
133 int id;
134 void *bd;
135 dma_addr_t bd_dma;
136 union {
137 void *used_bd;
138 __le32 *bd_32;
139 struct iwl_rx_completion_desc *cd;
140 };
141 dma_addr_t used_bd_dma;
142 u32 read;
143 u32 write;
144 u32 free_count;
145 u32 used_count;
146 u32 write_actual;
147 u32 queue_size;
148 struct list_head rx_free;
149 struct list_head rx_used;
150 bool need_update, next_rb_is_fragment;
151 void *rb_stts;
152 dma_addr_t rb_stts_dma;
153 spinlock_t lock;
154 struct napi_struct napi;
155 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
156};
157
158
159
160
161
162
163
164
165
166
167
168
169
170struct iwl_rb_allocator {
171 atomic_t req_pending;
172 atomic_t req_ready;
173 struct list_head rbd_allocated;
174 struct list_head rbd_empty;
175 spinlock_t lock;
176 struct workqueue_struct *alloc_wq;
177 struct work_struct rx_alloc;
178};
179
180
181
182
183
184static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
185 struct iwl_rxq *rxq)
186{
187 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
188 __le16 *rb_stts = rxq->rb_stts;
189
190 return READ_ONCE(*rb_stts);
191 } else {
192 struct iwl_rb_status *rb_stts = rxq->rb_stts;
193
194 return READ_ONCE(rb_stts->closed_rb_num);
195 }
196}
197
198#ifdef CONFIG_IWLWIFI_DEBUGFS
199
200
201
202
203
204
205
206
207
208enum iwl_fw_mon_dbgfs_state {
209 IWL_FW_MON_DBGFS_STATE_CLOSED,
210 IWL_FW_MON_DBGFS_STATE_OPEN,
211 IWL_FW_MON_DBGFS_STATE_DISABLED,
212};
213#endif
214
215
216
217
218
219
220enum iwl_shared_irq_flags {
221 IWL_SHARED_IRQ_NON_RX = BIT(0),
222 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
223};
224
225
226
227
228
229
230
231enum iwl_image_response_code {
232 IWL_IMAGE_RESP_DEF = 0,
233 IWL_IMAGE_RESP_SUCCESS = 1,
234 IWL_IMAGE_RESP_FAIL = 2,
235};
236
237
238
239
240
241
242
243
244
245
246
247#ifdef CONFIG_IWLWIFI_DEBUGFS
248struct cont_rec {
249 u32 prev_wr_ptr;
250 u32 prev_wrap_cnt;
251 u8 state;
252
253 struct mutex mutex;
254};
255#endif
256
257enum iwl_pcie_fw_reset_state {
258 FW_RESET_IDLE,
259 FW_RESET_REQUESTED,
260 FW_RESET_OK,
261 FW_RESET_ERROR,
262};
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct iwl_trans_pcie {
325 struct iwl_rxq *rxq;
326 struct iwl_rx_mem_buffer *rx_pool;
327 struct iwl_rx_mem_buffer **global_table;
328 struct iwl_rb_allocator rba;
329 union {
330 struct iwl_context_info *ctxt_info;
331 struct iwl_context_info_gen3 *ctxt_info_gen3;
332 };
333 struct iwl_prph_info *prph_info;
334 struct iwl_prph_scratch *prph_scratch;
335 void *iml;
336 dma_addr_t ctxt_info_dma_addr;
337 dma_addr_t prph_info_dma_addr;
338 dma_addr_t prph_scratch_dma_addr;
339 dma_addr_t iml_dma_addr;
340 struct iwl_trans *trans;
341
342 struct net_device napi_dev;
343
344
345 __le32 *ict_tbl;
346 dma_addr_t ict_tbl_dma;
347 int ict_index;
348 bool use_ict;
349 bool is_down, opmode_down;
350 s8 debug_rfkill;
351 struct isr_statistics isr_stats;
352
353 spinlock_t irq_lock;
354 struct mutex mutex;
355 u32 inta_mask;
356 u32 scd_base_addr;
357 struct iwl_dma_ptr kw;
358
359 struct iwl_dram_data pnvm_dram;
360 struct iwl_dram_data reduce_power_dram;
361
362 struct iwl_txq *txq_memory;
363
364
365 struct pci_dev *pci_dev;
366 void __iomem *hw_base;
367
368 bool ucode_write_complete;
369 bool sx_complete;
370 wait_queue_head_t ucode_write_waitq;
371 wait_queue_head_t sx_waitq;
372
373 u8 def_rx_queue;
374 u8 n_no_reclaim_cmds;
375 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
376 u16 num_rx_bufs;
377
378 enum iwl_amsdu_size rx_buf_size;
379 bool scd_set_active;
380 bool pcie_dbg_dumped_once;
381 u32 rx_page_order;
382 u32 rx_buf_bytes;
383 u32 supported_dma_mask;
384
385
386 spinlock_t alloc_page_lock;
387 struct page *alloc_page;
388 u32 alloc_page_used;
389
390
391 spinlock_t reg_lock;
392 bool cmd_hold_nic_awake;
393
394#ifdef CONFIG_IWLWIFI_DEBUGFS
395 struct cont_rec fw_mon_data;
396#endif
397
398 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
399 bool msix_enabled;
400 u8 shared_vec_mask;
401 u32 alloc_vecs;
402 u32 def_irq;
403 u32 fh_init_mask;
404 u32 hw_init_mask;
405 u32 fh_mask;
406 u32 hw_mask;
407 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
408 u16 tx_cmd_queue_size;
409 bool in_rescan;
410
411 void *base_rb_stts;
412 dma_addr_t base_rb_stts_dma;
413
414 bool fw_reset_handshake;
415 enum iwl_pcie_fw_reset_state fw_reset_state;
416 wait_queue_head_t fw_reset_waitq;
417
418 char rf_name[32];
419};
420
421static inline struct iwl_trans_pcie *
422IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
423{
424 return (void *)trans->trans_specific;
425}
426
427static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
428{
429
430
431
432
433
434
435
436
437 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
438}
439
440static inline struct iwl_trans *
441iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
442{
443 return container_of((void *)trans_pcie, struct iwl_trans,
444 trans_specific);
445}
446
447
448
449
450
451struct iwl_trans
452*iwl_trans_pcie_alloc(struct pci_dev *pdev,
453 const struct pci_device_id *ent,
454 const struct iwl_cfg_trans_params *cfg_trans);
455void iwl_trans_pcie_free(struct iwl_trans *trans);
456
457bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
458#define _iwl_trans_pcie_grab_nic_access(trans) \
459 __cond_lock(nic_access_nobh, \
460 likely(__iwl_trans_pcie_grab_nic_access(trans)))
461
462
463
464
465int iwl_pcie_rx_init(struct iwl_trans *trans);
466int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
467irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
468irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
469irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
470irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
471int iwl_pcie_rx_stop(struct iwl_trans *trans);
472void iwl_pcie_rx_free(struct iwl_trans *trans);
473void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
474void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
475void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
476 struct iwl_rxq *rxq);
477
478
479
480
481irqreturn_t iwl_pcie_isr(int irq, void *data);
482int iwl_pcie_alloc_ict(struct iwl_trans *trans);
483void iwl_pcie_free_ict(struct iwl_trans *trans);
484void iwl_pcie_reset_ict(struct iwl_trans *trans);
485void iwl_pcie_disable_ict(struct iwl_trans *trans);
486
487
488
489
490int iwl_pcie_tx_init(struct iwl_trans *trans);
491void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
492int iwl_pcie_tx_stop(struct iwl_trans *trans);
493void iwl_pcie_tx_free(struct iwl_trans *trans);
494bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
495 const struct iwl_trans_txq_scd_cfg *cfg,
496 unsigned int wdg_timeout);
497void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
498 bool configure_scd);
499void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
500 bool shared_mode);
501int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
502 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
503void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
504int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
505void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
506 struct iwl_rx_cmd_buffer *rxb);
507void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
508
509
510
511
512void iwl_pcie_dump_csr(struct iwl_trans *trans);
513
514
515
516
517static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
518{
519 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520
521 clear_bit(STATUS_INT_ENABLED, &trans->status);
522 if (!trans_pcie->msix_enabled) {
523
524 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
525
526
527
528 iwl_write32(trans, CSR_INT, 0xffffffff);
529 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
530 } else {
531
532 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
533 trans_pcie->fh_init_mask);
534 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
535 trans_pcie->hw_init_mask);
536 }
537 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
538}
539
540static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
541 int start)
542{
543 int i = 0;
544
545 while (start < fw->num_sec &&
546 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
547 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
548 start++;
549 i++;
550 }
551
552 return i;
553}
554
555static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
556{
557 struct iwl_self_init_dram *dram = &trans->init_dram;
558 int i;
559
560 if (!dram->fw) {
561 WARN_ON(dram->fw_cnt);
562 return;
563 }
564
565 for (i = 0; i < dram->fw_cnt; i++)
566 dma_free_coherent(trans->dev, dram->fw[i].size,
567 dram->fw[i].block, dram->fw[i].physical);
568
569 kfree(dram->fw);
570 dram->fw_cnt = 0;
571 dram->fw = NULL;
572}
573
574static inline void iwl_disable_interrupts(struct iwl_trans *trans)
575{
576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577
578 spin_lock_bh(&trans_pcie->irq_lock);
579 _iwl_disable_interrupts(trans);
580 spin_unlock_bh(&trans_pcie->irq_lock);
581}
582
583static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
584{
585 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
586
587 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
588 set_bit(STATUS_INT_ENABLED, &trans->status);
589 if (!trans_pcie->msix_enabled) {
590 trans_pcie->inta_mask = CSR_INI_SET_MASK;
591 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
592 } else {
593
594
595
596
597 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
598 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
599 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
600 ~trans_pcie->fh_mask);
601 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
602 ~trans_pcie->hw_mask);
603 }
604}
605
606static inline void iwl_enable_interrupts(struct iwl_trans *trans)
607{
608 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
609
610 spin_lock_bh(&trans_pcie->irq_lock);
611 _iwl_enable_interrupts(trans);
612 spin_unlock_bh(&trans_pcie->irq_lock);
613}
614static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
615{
616 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
617
618 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
619 trans_pcie->hw_mask = msk;
620}
621
622static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
623{
624 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
625
626 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
627 trans_pcie->fh_mask = msk;
628}
629
630static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
631{
632 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
633
634 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
635 if (!trans_pcie->msix_enabled) {
636 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
637 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
638 } else {
639 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
640 trans_pcie->hw_init_mask);
641 iwl_enable_fh_int_msk_msix(trans,
642 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
643 }
644}
645
646static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
647{
648 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
649
650 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
651
652 if (!trans_pcie->msix_enabled) {
653
654
655
656
657
658
659
660 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
661 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
662 } else {
663 iwl_enable_hw_int_msk_msix(trans,
664 MSIX_HW_INT_CAUSES_REG_ALIVE);
665
666
667
668
669 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
670 }
671}
672
673static inline const char *queue_name(struct device *dev,
674 struct iwl_trans_pcie *trans_p, int i)
675{
676 if (trans_p->shared_vec_mask) {
677 int vec = trans_p->shared_vec_mask &
678 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
679
680 if (i == 0)
681 return DRV_NAME ":shared_IRQ";
682
683 return devm_kasprintf(dev, GFP_KERNEL,
684 DRV_NAME ":queue_%d", i + vec);
685 }
686 if (i == 0)
687 return DRV_NAME ":default_queue";
688
689 if (i == trans_p->alloc_vecs - 1)
690 return DRV_NAME ":exception";
691
692 return devm_kasprintf(dev, GFP_KERNEL,
693 DRV_NAME ":queue_%d", i);
694}
695
696static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
697{
698 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
699
700 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
701 if (!trans_pcie->msix_enabled) {
702 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
703 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
704 } else {
705 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
706 trans_pcie->fh_init_mask);
707 iwl_enable_hw_int_msk_msix(trans,
708 MSIX_HW_INT_CAUSES_REG_RF_KILL);
709 }
710
711 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
712
713
714
715
716
717 iwl_set_bit(trans, CSR_GP_CNTRL,
718 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
719 }
720}
721
722void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
723
724static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
725{
726 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727
728 lockdep_assert_held(&trans_pcie->mutex);
729
730 if (trans_pcie->debug_rfkill == 1)
731 return true;
732
733 return !(iwl_read32(trans, CSR_GP_CNTRL) &
734 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
735}
736
737static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
738 u32 reg, u32 mask, u32 value)
739{
740 u32 v;
741
742#ifdef CONFIG_IWLWIFI_DEBUG
743 WARN_ON_ONCE(value & ~mask);
744#endif
745
746 v = iwl_read32(trans, reg);
747 v &= ~mask;
748 v |= value;
749 iwl_write32(trans, reg, v);
750}
751
752static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
753 u32 reg, u32 mask)
754{
755 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
756}
757
758static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
759 u32 reg, u32 mask)
760{
761 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
762}
763
764static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
765{
766 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
767}
768
769void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
770void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
771
772#ifdef CONFIG_IWLWIFI_DEBUGFS
773void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
774#else
775static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
776#endif
777
778void iwl_pcie_rx_allocator_work(struct work_struct *data);
779
780
781int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
782void iwl_pcie_apm_config(struct iwl_trans *trans);
783int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
784void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
785bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
786void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
787 bool was_in_rfkill);
788void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
789void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
790int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
791 struct iwl_dma_ptr *ptr, size_t size);
792void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
793void iwl_pcie_apply_destination(struct iwl_trans *trans);
794
795
796void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
797
798
799int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
800 const struct fw_img *fw, bool run_in_rfkill);
801void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
802int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
803 struct iwl_host_cmd *cmd);
804void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
805void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
806void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
807 bool test, bool reset);
808int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
809 struct iwl_host_cmd *cmd);
810int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
811 struct iwl_host_cmd *cmd);
812#endif
813