1
2
3
4
5
6
7#ifndef __iwl_trans_int_pcie_h__
8#define __iwl_trans_int_pcie_h__
9
10#include <linux/spinlock.h>
11#include <linux/interrupt.h>
12#include <linux/skbuff.h>
13#include <linux/wait.h>
14#include <linux/pci.h>
15#include <linux/timer.h>
16#include <linux/cpu.h>
17
18#include "iwl-fh.h"
19#include "iwl-csr.h"
20#include "iwl-trans.h"
21#include "iwl-debug.h"
22#include "iwl-io.h"
23#include "iwl-op-mode.h"
24#include "iwl-drv.h"
25#include "queue/tx.h"
26
27
28
29
30#define RX_NUM_QUEUES 1
31#define RX_POST_REQ_ALLOC 2
32#define RX_CLAIM_REQ_ALLOC 8
33#define RX_PENDING_WATERMARK 16
34#define FIRST_RX_QUEUE 512
35
36struct iwl_host_cmd;
37
38
39
40
41
42
43
44
45
46
47
48
49
50struct iwl_rx_mem_buffer {
51 dma_addr_t page_dma;
52 struct page *page;
53 u16 vid;
54 bool invalid;
55 struct list_head list;
56 u32 offset;
57};
58
59
60
61
62
63struct isr_statistics {
64 u32 hw;
65 u32 sw;
66 u32 err_code;
67 u32 sch;
68 u32 alive;
69 u32 rfkill;
70 u32 ctkill;
71 u32 wakeup;
72 u32 rx;
73 u32 tx;
74 u32 unhandled;
75};
76
77
78
79
80
81
82
83struct iwl_rx_transfer_desc {
84 __le16 rbid;
85 __le16 reserved[3];
86 __le64 addr;
87} __packed;
88
89#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
90
91
92
93
94
95
96
97
98struct iwl_rx_completion_desc {
99 __le32 reserved1;
100 __le16 rbid;
101 u8 flags;
102 u8 reserved2[25];
103} __packed;
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135struct iwl_rxq {
136 int id;
137 void *bd;
138 dma_addr_t bd_dma;
139 union {
140 void *used_bd;
141 __le32 *bd_32;
142 struct iwl_rx_completion_desc *cd;
143 };
144 dma_addr_t used_bd_dma;
145 __le16 *tr_tail;
146 dma_addr_t tr_tail_dma;
147 __le16 *cr_tail;
148 dma_addr_t cr_tail_dma;
149 u32 read;
150 u32 write;
151 u32 free_count;
152 u32 used_count;
153 u32 write_actual;
154 u32 queue_size;
155 struct list_head rx_free;
156 struct list_head rx_used;
157 bool need_update, next_rb_is_fragment;
158 void *rb_stts;
159 dma_addr_t rb_stts_dma;
160 spinlock_t lock;
161 struct napi_struct napi;
162 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
163};
164
165
166
167
168
169
170
171
172
173
174
175
176
177struct iwl_rb_allocator {
178 atomic_t req_pending;
179 atomic_t req_ready;
180 struct list_head rbd_allocated;
181 struct list_head rbd_empty;
182 spinlock_t lock;
183 struct workqueue_struct *alloc_wq;
184 struct work_struct rx_alloc;
185};
186
187
188
189
190
191static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
192 struct iwl_rxq *rxq)
193{
194 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
195 __le16 *rb_stts = rxq->rb_stts;
196
197 return READ_ONCE(*rb_stts);
198 } else {
199 struct iwl_rb_status *rb_stts = rxq->rb_stts;
200
201 return READ_ONCE(rb_stts->closed_rb_num);
202 }
203}
204
205#ifdef CONFIG_IWLWIFI_DEBUGFS
206
207
208
209
210
211
212
213
214
215enum iwl_fw_mon_dbgfs_state {
216 IWL_FW_MON_DBGFS_STATE_CLOSED,
217 IWL_FW_MON_DBGFS_STATE_OPEN,
218 IWL_FW_MON_DBGFS_STATE_DISABLED,
219};
220#endif
221
222
223
224
225
226
227enum iwl_shared_irq_flags {
228 IWL_SHARED_IRQ_NON_RX = BIT(0),
229 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
230};
231
232
233
234
235
236
237
238enum iwl_image_response_code {
239 IWL_IMAGE_RESP_DEF = 0,
240 IWL_IMAGE_RESP_SUCCESS = 1,
241 IWL_IMAGE_RESP_FAIL = 2,
242};
243
244
245
246
247
248
249
250
251
252
253
254#ifdef CONFIG_IWLWIFI_DEBUGFS
255struct cont_rec {
256 u32 prev_wr_ptr;
257 u32 prev_wrap_cnt;
258 u8 state;
259
260 struct mutex mutex;
261};
262#endif
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321struct iwl_trans_pcie {
322 struct iwl_rxq *rxq;
323 struct iwl_rx_mem_buffer *rx_pool;
324 struct iwl_rx_mem_buffer **global_table;
325 struct iwl_rb_allocator rba;
326 union {
327 struct iwl_context_info *ctxt_info;
328 struct iwl_context_info_gen3 *ctxt_info_gen3;
329 };
330 struct iwl_prph_info *prph_info;
331 struct iwl_prph_scratch *prph_scratch;
332 dma_addr_t ctxt_info_dma_addr;
333 dma_addr_t prph_info_dma_addr;
334 dma_addr_t prph_scratch_dma_addr;
335 dma_addr_t iml_dma_addr;
336 struct iwl_trans *trans;
337
338 struct net_device napi_dev;
339
340
341 __le32 *ict_tbl;
342 dma_addr_t ict_tbl_dma;
343 int ict_index;
344 bool use_ict;
345 bool is_down, opmode_down;
346 s8 debug_rfkill;
347 struct isr_statistics isr_stats;
348
349 spinlock_t irq_lock;
350 struct mutex mutex;
351 u32 inta_mask;
352 u32 scd_base_addr;
353 struct iwl_dma_ptr kw;
354
355 struct iwl_dram_data pnvm_dram;
356
357 struct iwl_txq *txq_memory;
358
359
360 struct pci_dev *pci_dev;
361 void __iomem *hw_base;
362
363 bool ucode_write_complete;
364 bool sx_complete;
365 wait_queue_head_t ucode_write_waitq;
366 wait_queue_head_t wait_command_queue;
367 wait_queue_head_t sx_waitq;
368
369 u8 def_rx_queue;
370 u8 n_no_reclaim_cmds;
371 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
372 u16 num_rx_bufs;
373
374 enum iwl_amsdu_size rx_buf_size;
375 bool scd_set_active;
376 bool pcie_dbg_dumped_once;
377 u32 rx_page_order;
378 u32 rx_buf_bytes;
379 u32 supported_dma_mask;
380
381
382 spinlock_t alloc_page_lock;
383 struct page *alloc_page;
384 u32 alloc_page_used;
385
386
387 spinlock_t reg_lock;
388 bool cmd_hold_nic_awake;
389
390#ifdef CONFIG_IWLWIFI_DEBUGFS
391 struct cont_rec fw_mon_data;
392#endif
393
394 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
395 bool msix_enabled;
396 u8 shared_vec_mask;
397 u32 alloc_vecs;
398 u32 def_irq;
399 u32 fh_init_mask;
400 u32 hw_init_mask;
401 u32 fh_mask;
402 u32 hw_mask;
403 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
404 u16 tx_cmd_queue_size;
405 bool in_rescan;
406
407 void *base_rb_stts;
408 dma_addr_t base_rb_stts_dma;
409
410 bool fw_reset_handshake;
411 bool fw_reset_done;
412 wait_queue_head_t fw_reset_waitq;
413};
414
415static inline struct iwl_trans_pcie *
416IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
417{
418 return (void *)trans->trans_specific;
419}
420
421static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
422 struct msix_entry *entry)
423{
424
425
426
427
428
429
430
431
432 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
433}
434
435static inline struct iwl_trans *
436iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
437{
438 return container_of((void *)trans_pcie, struct iwl_trans,
439 trans_specific);
440}
441
442
443
444
445
446struct iwl_trans
447*iwl_trans_pcie_alloc(struct pci_dev *pdev,
448 const struct pci_device_id *ent,
449 const struct iwl_cfg_trans_params *cfg_trans);
450void iwl_trans_pcie_free(struct iwl_trans *trans);
451
452
453
454
455int iwl_pcie_rx_init(struct iwl_trans *trans);
456int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
457irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
458irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
459irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
460irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
461int iwl_pcie_rx_stop(struct iwl_trans *trans);
462void iwl_pcie_rx_free(struct iwl_trans *trans);
463void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
464void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
465int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
466void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
467 struct iwl_rxq *rxq);
468
469
470
471
472irqreturn_t iwl_pcie_isr(int irq, void *data);
473int iwl_pcie_alloc_ict(struct iwl_trans *trans);
474void iwl_pcie_free_ict(struct iwl_trans *trans);
475void iwl_pcie_reset_ict(struct iwl_trans *trans);
476void iwl_pcie_disable_ict(struct iwl_trans *trans);
477
478
479
480
481int iwl_pcie_tx_init(struct iwl_trans *trans);
482void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
483int iwl_pcie_tx_stop(struct iwl_trans *trans);
484void iwl_pcie_tx_free(struct iwl_trans *trans);
485bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
486 const struct iwl_trans_txq_scd_cfg *cfg,
487 unsigned int wdg_timeout);
488void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
489 bool configure_scd);
490void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
491 bool shared_mode);
492int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
493 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
494void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
495int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
496void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
497 struct iwl_rx_cmd_buffer *rxb);
498void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
499
500
501
502
503void iwl_pcie_dump_csr(struct iwl_trans *trans);
504
505
506
507
508static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
509{
510 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
511
512 clear_bit(STATUS_INT_ENABLED, &trans->status);
513 if (!trans_pcie->msix_enabled) {
514
515 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
516
517
518
519 iwl_write32(trans, CSR_INT, 0xffffffff);
520 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
521 } else {
522
523 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
524 trans_pcie->fh_init_mask);
525 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
526 trans_pcie->hw_init_mask);
527 }
528 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
529}
530
531#define IWL_NUM_OF_COMPLETION_RINGS 31
532#define IWL_NUM_OF_TRANSFER_RINGS 527
533
534static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
535 int start)
536{
537 int i = 0;
538
539 while (start < fw->num_sec &&
540 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
541 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
542 start++;
543 i++;
544 }
545
546 return i;
547}
548
549static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
550{
551 struct iwl_self_init_dram *dram = &trans->init_dram;
552 int i;
553
554 if (!dram->fw) {
555 WARN_ON(dram->fw_cnt);
556 return;
557 }
558
559 for (i = 0; i < dram->fw_cnt; i++)
560 dma_free_coherent(trans->dev, dram->fw[i].size,
561 dram->fw[i].block, dram->fw[i].physical);
562
563 kfree(dram->fw);
564 dram->fw_cnt = 0;
565 dram->fw = NULL;
566}
567
568static inline void iwl_disable_interrupts(struct iwl_trans *trans)
569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571
572 spin_lock(&trans_pcie->irq_lock);
573 _iwl_disable_interrupts(trans);
574 spin_unlock(&trans_pcie->irq_lock);
575}
576
577static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
578{
579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580
581 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
582 set_bit(STATUS_INT_ENABLED, &trans->status);
583 if (!trans_pcie->msix_enabled) {
584 trans_pcie->inta_mask = CSR_INI_SET_MASK;
585 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
586 } else {
587
588
589
590
591 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
592 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
593 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
594 ~trans_pcie->fh_mask);
595 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
596 ~trans_pcie->hw_mask);
597 }
598}
599
600static inline void iwl_enable_interrupts(struct iwl_trans *trans)
601{
602 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
603
604 spin_lock(&trans_pcie->irq_lock);
605 _iwl_enable_interrupts(trans);
606 spin_unlock(&trans_pcie->irq_lock);
607}
608static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
609{
610 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
611
612 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
613 trans_pcie->hw_mask = msk;
614}
615
616static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
617{
618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
619
620 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
621 trans_pcie->fh_mask = msk;
622}
623
624static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
625{
626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
627
628 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
629 if (!trans_pcie->msix_enabled) {
630 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
631 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
632 } else {
633 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
634 trans_pcie->hw_init_mask);
635 iwl_enable_fh_int_msk_msix(trans,
636 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
637 }
638}
639
640static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
641{
642 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
643
644 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
645
646 if (!trans_pcie->msix_enabled) {
647
648
649
650
651
652
653
654 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
655 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
656 } else {
657 iwl_enable_hw_int_msk_msix(trans,
658 MSIX_HW_INT_CAUSES_REG_ALIVE);
659
660
661
662
663 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
664 }
665}
666
667static inline const char *queue_name(struct device *dev,
668 struct iwl_trans_pcie *trans_p, int i)
669{
670 if (trans_p->shared_vec_mask) {
671 int vec = trans_p->shared_vec_mask &
672 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
673
674 if (i == 0)
675 return DRV_NAME ": shared IRQ";
676
677 return devm_kasprintf(dev, GFP_KERNEL,
678 DRV_NAME ": queue %d", i + vec);
679 }
680 if (i == 0)
681 return DRV_NAME ": default queue";
682
683 if (i == trans_p->alloc_vecs - 1)
684 return DRV_NAME ": exception";
685
686 return devm_kasprintf(dev, GFP_KERNEL,
687 DRV_NAME ": queue %d", i);
688}
689
690static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
691{
692 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693
694 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
695 if (!trans_pcie->msix_enabled) {
696 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
697 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
698 } else {
699 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
700 trans_pcie->fh_init_mask);
701 iwl_enable_hw_int_msk_msix(trans,
702 MSIX_HW_INT_CAUSES_REG_RF_KILL);
703 }
704
705 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
706
707
708
709
710
711 iwl_set_bit(trans, CSR_GP_CNTRL,
712 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
713 }
714}
715
716void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
717
718static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
719{
720 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
721
722 lockdep_assert_held(&trans_pcie->mutex);
723
724 if (trans_pcie->debug_rfkill == 1)
725 return true;
726
727 return !(iwl_read32(trans, CSR_GP_CNTRL) &
728 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
729}
730
731static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
732 u32 reg, u32 mask, u32 value)
733{
734 u32 v;
735
736#ifdef CONFIG_IWLWIFI_DEBUG
737 WARN_ON_ONCE(value & ~mask);
738#endif
739
740 v = iwl_read32(trans, reg);
741 v &= ~mask;
742 v |= value;
743 iwl_write32(trans, reg, v);
744}
745
746static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
747 u32 reg, u32 mask)
748{
749 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
750}
751
752static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
753 u32 reg, u32 mask)
754{
755 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
756}
757
758static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
759{
760 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
761}
762
763void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
764void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
765void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
766
767#ifdef CONFIG_IWLWIFI_DEBUGFS
768void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
769#else
770static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
771#endif
772
773void iwl_pcie_rx_allocator_work(struct work_struct *data);
774
775
776int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
777void iwl_pcie_apm_config(struct iwl_trans *trans);
778int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
779void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
780bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
781void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
782 bool was_in_rfkill);
783void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
784void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
785int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
786 struct iwl_dma_ptr *ptr, size_t size);
787void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
788void iwl_pcie_apply_destination(struct iwl_trans *trans);
789
790
791void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
792
793
794int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
795 const struct fw_img *fw, bool run_in_rfkill);
796void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
797int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
798 struct iwl_host_cmd *cmd);
799void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
800void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
801void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
802 bool test, bool reset);
803#endif
804