1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#ifndef __iwl_trans_int_pcie_h__
65#define __iwl_trans_int_pcie_h__
66
67#include <linux/spinlock.h>
68#include <linux/interrupt.h>
69#include <linux/skbuff.h>
70#include <linux/wait.h>
71#include <linux/pci.h>
72#include <linux/timer.h>
73#include <linux/cpu.h>
74
75#include "iwl-fh.h"
76#include "iwl-csr.h"
77#include "iwl-trans.h"
78#include "iwl-debug.h"
79#include "iwl-io.h"
80#include "iwl-op-mode.h"
81#include "iwl-drv.h"
82
83
84
85
86
87#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
88
89
90
91
92#define RX_NUM_QUEUES 1
93#define RX_POST_REQ_ALLOC 2
94#define RX_CLAIM_REQ_ALLOC 8
95#define RX_PENDING_WATERMARK 16
96#define FIRST_RX_QUEUE 512
97
98struct iwl_host_cmd;
99
100
101
102
103
104
105
106
107
108
109
110
111
112struct iwl_rx_mem_buffer {
113 dma_addr_t page_dma;
114 struct page *page;
115 u16 vid;
116 bool invalid;
117 struct list_head list;
118 u32 offset;
119};
120
121
122
123
124
125struct isr_statistics {
126 u32 hw;
127 u32 sw;
128 u32 err_code;
129 u32 sch;
130 u32 alive;
131 u32 rfkill;
132 u32 ctkill;
133 u32 wakeup;
134 u32 rx;
135 u32 tx;
136 u32 unhandled;
137};
138
139
140
141
142
143
144
145struct iwl_rx_transfer_desc {
146 __le16 rbid;
147 __le16 reserved[3];
148 __le64 addr;
149} __packed;
150
151#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
152
153
154
155
156
157
158
159
160struct iwl_rx_completion_desc {
161 __le32 reserved1;
162 __le16 rbid;
163 u8 flags;
164 u8 reserved2[25];
165} __packed;
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195struct iwl_rxq {
196 int id;
197 void *bd;
198 dma_addr_t bd_dma;
199 union {
200 void *used_bd;
201 __le32 *bd_32;
202 struct iwl_rx_completion_desc *cd;
203 };
204 dma_addr_t used_bd_dma;
205 __le16 *tr_tail;
206 dma_addr_t tr_tail_dma;
207 __le16 *cr_tail;
208 dma_addr_t cr_tail_dma;
209 u32 read;
210 u32 write;
211 u32 free_count;
212 u32 used_count;
213 u32 write_actual;
214 u32 queue_size;
215 struct list_head rx_free;
216 struct list_head rx_used;
217 bool need_update;
218 void *rb_stts;
219 dma_addr_t rb_stts_dma;
220 spinlock_t lock;
221 struct napi_struct napi;
222 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
223};
224
225
226
227
228
229
230
231
232
233
234
235
236
237struct iwl_rb_allocator {
238 atomic_t req_pending;
239 atomic_t req_ready;
240 struct list_head rbd_allocated;
241 struct list_head rbd_empty;
242 spinlock_t lock;
243 struct workqueue_struct *alloc_wq;
244 struct work_struct rx_alloc;
245};
246
247struct iwl_dma_ptr {
248 dma_addr_t dma;
249 void *addr;
250 size_t size;
251};
252
253
254
255
256
257static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
258{
259 return ++index &
260 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
261}
262
263
264
265
266
267static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
268 struct iwl_rxq *rxq)
269{
270 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
271 __le16 *rb_stts = rxq->rb_stts;
272
273 return READ_ONCE(*rb_stts);
274 } else {
275 struct iwl_rb_status *rb_stts = rxq->rb_stts;
276
277 return READ_ONCE(rb_stts->closed_rb_num);
278 }
279}
280
281
282
283
284
285static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
286{
287 return --index &
288 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
289}
290
291struct iwl_cmd_meta {
292
293 struct iwl_host_cmd *source;
294 u32 flags;
295 u32 tbs;
296};
297
298
299
300
301
302
303
304
305
306
307#define IWL_FIRST_TB_SIZE 20
308#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
309
310struct iwl_pcie_txq_entry {
311 void *cmd;
312 struct sk_buff *skb;
313
314 const void *free_buf;
315 struct iwl_cmd_meta meta;
316};
317
318struct iwl_pcie_first_tb_buf {
319 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
320};
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct iwl_txq {
365 void *tfds;
366 struct iwl_pcie_first_tb_buf *first_tb_bufs;
367 dma_addr_t first_tb_dma;
368 struct iwl_pcie_txq_entry *entries;
369 spinlock_t lock;
370 unsigned long frozen_expiry_remainder;
371 struct timer_list stuck_timer;
372 struct iwl_trans_pcie *trans_pcie;
373 bool need_update;
374 bool frozen;
375 bool ampdu;
376 int block;
377 unsigned long wd_timeout;
378 struct sk_buff_head overflow_q;
379 struct iwl_dma_ptr bc_tbl;
380
381 int write_ptr;
382 int read_ptr;
383 dma_addr_t dma_addr;
384 int n_window;
385 u32 id;
386 int low_mark;
387 int high_mark;
388
389 bool overflow_tx;
390};
391
392static inline dma_addr_t
393iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
394{
395 return txq->first_tb_dma +
396 sizeof(struct iwl_pcie_first_tb_buf) * idx;
397}
398
399struct iwl_tso_hdr_page {
400 struct page *page;
401 u8 *pos;
402};
403
404#ifdef CONFIG_IWLWIFI_DEBUGFS
405
406
407
408
409
410
411
412
413
414enum iwl_fw_mon_dbgfs_state {
415 IWL_FW_MON_DBGFS_STATE_CLOSED,
416 IWL_FW_MON_DBGFS_STATE_OPEN,
417 IWL_FW_MON_DBGFS_STATE_DISABLED,
418};
419#endif
420
421
422
423
424
425
426enum iwl_shared_irq_flags {
427 IWL_SHARED_IRQ_NON_RX = BIT(0),
428 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
429};
430
431
432
433
434
435
436
437enum iwl_image_response_code {
438 IWL_IMAGE_RESP_DEF = 0,
439 IWL_IMAGE_RESP_SUCCESS = 1,
440 IWL_IMAGE_RESP_FAIL = 2,
441};
442
443
444
445
446
447
448
449
450
451
452
453#ifdef CONFIG_IWLWIFI_DEBUGFS
454struct cont_rec {
455 u32 prev_wr_ptr;
456 u32 prev_wrap_cnt;
457 u8 state;
458
459 struct mutex mutex;
460};
461#endif
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct iwl_trans_pcie {
524 struct iwl_rxq *rxq;
525 struct iwl_rx_mem_buffer *rx_pool;
526 struct iwl_rx_mem_buffer **global_table;
527 struct iwl_rb_allocator rba;
528 union {
529 struct iwl_context_info *ctxt_info;
530 struct iwl_context_info_gen3 *ctxt_info_gen3;
531 };
532 struct iwl_prph_info *prph_info;
533 struct iwl_prph_scratch *prph_scratch;
534 dma_addr_t ctxt_info_dma_addr;
535 dma_addr_t prph_info_dma_addr;
536 dma_addr_t prph_scratch_dma_addr;
537 dma_addr_t iml_dma_addr;
538 struct iwl_trans *trans;
539
540 struct net_device napi_dev;
541
542 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
543
544
545 __le32 *ict_tbl;
546 dma_addr_t ict_tbl_dma;
547 int ict_index;
548 bool use_ict;
549 bool is_down, opmode_down;
550 s8 debug_rfkill;
551 struct isr_statistics isr_stats;
552
553 spinlock_t irq_lock;
554 struct mutex mutex;
555 u32 inta_mask;
556 u32 scd_base_addr;
557 struct iwl_dma_ptr scd_bc_tbls;
558 struct iwl_dma_ptr kw;
559
560 struct iwl_txq *txq_memory;
561 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
562 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
563 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
564
565
566 struct pci_dev *pci_dev;
567 void __iomem *hw_base;
568
569 bool ucode_write_complete;
570 bool sx_complete;
571 wait_queue_head_t ucode_write_waitq;
572 wait_queue_head_t wait_command_queue;
573 wait_queue_head_t sx_waitq;
574
575 u8 page_offs, dev_cmd_offs;
576
577 u8 cmd_queue;
578 u8 def_rx_queue;
579 u8 cmd_fifo;
580 unsigned int cmd_q_wdg_timeout;
581 u8 n_no_reclaim_cmds;
582 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
583 u8 max_tbs;
584 u16 tfd_size;
585 u16 num_rx_bufs;
586
587 enum iwl_amsdu_size rx_buf_size;
588 bool bc_table_dword;
589 bool scd_set_active;
590 bool sw_csum_tx;
591 bool pcie_dbg_dumped_once;
592 u32 rx_page_order;
593 u32 rx_buf_bytes;
594 u32 supported_dma_mask;
595
596
597 spinlock_t alloc_page_lock;
598 struct page *alloc_page;
599 u32 alloc_page_used;
600
601
602 spinlock_t reg_lock;
603 bool cmd_hold_nic_awake;
604
605#ifdef CONFIG_IWLWIFI_DEBUGFS
606 struct cont_rec fw_mon_data;
607#endif
608
609 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
610 bool msix_enabled;
611 u8 shared_vec_mask;
612 u32 alloc_vecs;
613 u32 def_irq;
614 u32 fh_init_mask;
615 u32 hw_init_mask;
616 u32 fh_mask;
617 u32 hw_mask;
618 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
619 u16 tx_cmd_queue_size;
620 bool in_rescan;
621
622 void *base_rb_stts;
623 dma_addr_t base_rb_stts_dma;
624};
625
626static inline struct iwl_trans_pcie *
627IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
628{
629 return (void *)trans->trans_specific;
630}
631
632static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
633 struct msix_entry *entry)
634{
635
636
637
638
639
640
641
642
643 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
644}
645
646static inline struct iwl_trans *
647iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
648{
649 return container_of((void *)trans_pcie, struct iwl_trans,
650 trans_specific);
651}
652
653
654
655
656
657struct iwl_trans
658*iwl_trans_pcie_alloc(struct pci_dev *pdev,
659 const struct pci_device_id *ent,
660 const struct iwl_cfg_trans_params *cfg_trans);
661void iwl_trans_pcie_free(struct iwl_trans *trans);
662
663
664
665
666int iwl_pcie_rx_init(struct iwl_trans *trans);
667int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
668irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
669irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
670irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
671irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
672int iwl_pcie_rx_stop(struct iwl_trans *trans);
673void iwl_pcie_rx_free(struct iwl_trans *trans);
674void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
675void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
676int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
677void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
678 struct iwl_rxq *rxq);
679
680
681
682
683irqreturn_t iwl_pcie_isr(int irq, void *data);
684int iwl_pcie_alloc_ict(struct iwl_trans *trans);
685void iwl_pcie_free_ict(struct iwl_trans *trans);
686void iwl_pcie_reset_ict(struct iwl_trans *trans);
687void iwl_pcie_disable_ict(struct iwl_trans *trans);
688
689
690
691
692
693
694
695
696
697static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
698{
699 return upper_32_bits(phys) != upper_32_bits(phys + len);
700}
701
702int iwl_pcie_tx_init(struct iwl_trans *trans);
703int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
704 int queue_size);
705void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
706int iwl_pcie_tx_stop(struct iwl_trans *trans);
707void iwl_pcie_tx_free(struct iwl_trans *trans);
708bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
709 const struct iwl_trans_txq_scd_cfg *cfg,
710 unsigned int wdg_timeout);
711void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
712 bool configure_scd);
713void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
714 bool shared_mode);
715void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
716 struct iwl_txq *txq);
717int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
718 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
719void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
720int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
721void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
722 struct iwl_txq *txq);
723void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
724 struct iwl_rx_cmd_buffer *rxb);
725void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
726 struct sk_buff_head *skbs);
727void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
728void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
729
730static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
731 u8 idx)
732{
733 if (trans->trans_cfg->use_tfh) {
734 struct iwl_tfh_tfd *tfd = _tfd;
735 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
736
737 return le16_to_cpu(tb->tb_len);
738 } else {
739 struct iwl_tfd *tfd = _tfd;
740 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
741
742 return le16_to_cpu(tb->hi_n_len) >> 4;
743 }
744}
745
746
747
748
749void iwl_pcie_dump_csr(struct iwl_trans *trans);
750
751
752
753
754static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
755{
756 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
757
758 clear_bit(STATUS_INT_ENABLED, &trans->status);
759 if (!trans_pcie->msix_enabled) {
760
761 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
762
763
764
765 iwl_write32(trans, CSR_INT, 0xffffffff);
766 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
767 } else {
768
769 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
770 trans_pcie->fh_init_mask);
771 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
772 trans_pcie->hw_init_mask);
773 }
774 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
775}
776
777#define IWL_NUM_OF_COMPLETION_RINGS 31
778#define IWL_NUM_OF_TRANSFER_RINGS 527
779
780static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
781 int start)
782{
783 int i = 0;
784
785 while (start < fw->num_sec &&
786 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
787 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
788 start++;
789 i++;
790 }
791
792 return i;
793}
794
795static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
796 const struct fw_desc *sec,
797 struct iwl_dram_data *dram)
798{
799 dram->block = dma_alloc_coherent(trans->dev, sec->len,
800 &dram->physical,
801 GFP_KERNEL);
802 if (!dram->block)
803 return -ENOMEM;
804
805 dram->size = sec->len;
806 memcpy(dram->block, sec->data, sec->len);
807
808 return 0;
809}
810
811static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
812{
813 struct iwl_self_init_dram *dram = &trans->init_dram;
814 int i;
815
816 if (!dram->fw) {
817 WARN_ON(dram->fw_cnt);
818 return;
819 }
820
821 for (i = 0; i < dram->fw_cnt; i++)
822 dma_free_coherent(trans->dev, dram->fw[i].size,
823 dram->fw[i].block, dram->fw[i].physical);
824
825 kfree(dram->fw);
826 dram->fw_cnt = 0;
827 dram->fw = NULL;
828}
829
830static inline void iwl_disable_interrupts(struct iwl_trans *trans)
831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833
834 spin_lock(&trans_pcie->irq_lock);
835 _iwl_disable_interrupts(trans);
836 spin_unlock(&trans_pcie->irq_lock);
837}
838
839static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
840{
841 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
842
843 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
844 set_bit(STATUS_INT_ENABLED, &trans->status);
845 if (!trans_pcie->msix_enabled) {
846 trans_pcie->inta_mask = CSR_INI_SET_MASK;
847 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
848 } else {
849
850
851
852
853 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
854 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
855 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
856 ~trans_pcie->fh_mask);
857 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
858 ~trans_pcie->hw_mask);
859 }
860}
861
862static inline void iwl_enable_interrupts(struct iwl_trans *trans)
863{
864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
865
866 spin_lock(&trans_pcie->irq_lock);
867 _iwl_enable_interrupts(trans);
868 spin_unlock(&trans_pcie->irq_lock);
869}
870static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
871{
872 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
873
874 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
875 trans_pcie->hw_mask = msk;
876}
877
878static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
879{
880 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
881
882 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
883 trans_pcie->fh_mask = msk;
884}
885
886static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
887{
888 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
889
890 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
891 if (!trans_pcie->msix_enabled) {
892 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
893 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
894 } else {
895 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
896 trans_pcie->hw_init_mask);
897 iwl_enable_fh_int_msk_msix(trans,
898 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
899 }
900}
901
902static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
903{
904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905
906 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
907
908 if (!trans_pcie->msix_enabled) {
909
910
911
912
913
914
915
916 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
917 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
918 } else {
919 iwl_enable_hw_int_msk_msix(trans,
920 MSIX_HW_INT_CAUSES_REG_ALIVE);
921
922
923
924
925 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
926 }
927}
928
929static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
930{
931 return index & (q->n_window - 1);
932}
933
934static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
935 struct iwl_txq *txq, int idx)
936{
937 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
938
939 if (trans->trans_cfg->use_tfh)
940 idx = iwl_pcie_get_cmd_index(txq, idx);
941
942 return txq->tfds + trans_pcie->tfd_size * idx;
943}
944
945static inline const char *queue_name(struct device *dev,
946 struct iwl_trans_pcie *trans_p, int i)
947{
948 if (trans_p->shared_vec_mask) {
949 int vec = trans_p->shared_vec_mask &
950 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
951
952 if (i == 0)
953 return DRV_NAME ": shared IRQ";
954
955 return devm_kasprintf(dev, GFP_KERNEL,
956 DRV_NAME ": queue %d", i + vec);
957 }
958 if (i == 0)
959 return DRV_NAME ": default queue";
960
961 if (i == trans_p->alloc_vecs - 1)
962 return DRV_NAME ": exception";
963
964 return devm_kasprintf(dev, GFP_KERNEL,
965 DRV_NAME ": queue %d", i);
966}
967
968static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
969{
970 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
971
972 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
973 if (!trans_pcie->msix_enabled) {
974 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
975 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
976 } else {
977 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
978 trans_pcie->fh_init_mask);
979 iwl_enable_hw_int_msk_msix(trans,
980 MSIX_HW_INT_CAUSES_REG_RF_KILL);
981 }
982
983 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
984
985
986
987
988
989 iwl_set_bit(trans, CSR_GP_CNTRL,
990 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
991 }
992}
993
994void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
995
996static inline void iwl_wake_queue(struct iwl_trans *trans,
997 struct iwl_txq *txq)
998{
999 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1000
1001 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
1002 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
1003 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
1004 }
1005}
1006
1007static inline void iwl_stop_queue(struct iwl_trans *trans,
1008 struct iwl_txq *txq)
1009{
1010 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1011
1012 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
1013 iwl_op_mode_queue_full(trans->op_mode, txq->id);
1014 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
1015 } else
1016 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
1017 txq->id);
1018}
1019
1020static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
1021{
1022 int index = iwl_pcie_get_cmd_index(q, i);
1023 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
1024 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
1025
1026 return w >= r ?
1027 (index >= r && index < w) :
1028 !(index < r && index >= w);
1029}
1030
1031static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1032{
1033 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034
1035 lockdep_assert_held(&trans_pcie->mutex);
1036
1037 if (trans_pcie->debug_rfkill == 1)
1038 return true;
1039
1040 return !(iwl_read32(trans, CSR_GP_CNTRL) &
1041 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1042}
1043
1044static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
1045 u32 reg, u32 mask, u32 value)
1046{
1047 u32 v;
1048
1049#ifdef CONFIG_IWLWIFI_DEBUG
1050 WARN_ON_ONCE(value & ~mask);
1051#endif
1052
1053 v = iwl_read32(trans, reg);
1054 v &= ~mask;
1055 v |= value;
1056 iwl_write32(trans, reg, v);
1057}
1058
1059static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1060 u32 reg, u32 mask)
1061{
1062 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1063}
1064
1065static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1066 u32 reg, u32 mask)
1067{
1068 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1069}
1070
1071static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1072{
1073 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
1074}
1075
1076void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1077void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1078void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1079
1080#ifdef CONFIG_IWLWIFI_DEBUGFS
1081void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1082#else
1083static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1084#endif
1085
1086void iwl_pcie_rx_allocator_work(struct work_struct *data);
1087
1088
1089int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1090void iwl_pcie_apm_config(struct iwl_trans *trans);
1091int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1092void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1093bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1094void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1095 bool was_in_rfkill);
1096void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1097int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1098void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1099void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1100int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1101 int slots_num, bool cmd_queue);
1102int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1103 struct iwl_txq *txq, int slots_num, bool cmd_queue);
1104int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1105 struct iwl_dma_ptr *ptr, size_t size);
1106void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1107void iwl_pcie_apply_destination(struct iwl_trans *trans);
1108void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1109 struct sk_buff *skb);
1110#ifdef CONFIG_INET
1111struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
1112 struct sk_buff *skb);
1113#endif
1114
1115
1116void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1117
1118
1119int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1120 const struct fw_img *fw, bool run_in_rfkill);
1121void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1122void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1123 struct iwl_txq *txq);
1124int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1125 struct iwl_txq **intxq, int size,
1126 unsigned int timeout);
1127int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1128 struct iwl_txq *txq,
1129 struct iwl_host_cmd *hcmd);
1130int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1131 __le16 flags, u8 sta_id, u8 tid,
1132 int cmd_id, int size,
1133 unsigned int timeout);
1134void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1135int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1136 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
1137int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1138 struct iwl_host_cmd *cmd);
1139void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1140void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
1141void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1142void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1143void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1144void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1145 bool test, bool reset);
1146#endif
1147