1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#ifndef __iwl_trans_int_pcie_h__
65#define __iwl_trans_int_pcie_h__
66
67#include <linux/spinlock.h>
68#include <linux/interrupt.h>
69#include <linux/skbuff.h>
70#include <linux/wait.h>
71#include <linux/pci.h>
72#include <linux/timer.h>
73#include <linux/cpu.h>
74
75#include "iwl-fh.h"
76#include "iwl-csr.h"
77#include "iwl-trans.h"
78#include "iwl-debug.h"
79#include "iwl-io.h"
80#include "iwl-op-mode.h"
81#include "iwl-drv.h"
82
83
84
85
86
87#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
88
89
90
91
92#define RX_NUM_QUEUES 1
93#define RX_POST_REQ_ALLOC 2
94#define RX_CLAIM_REQ_ALLOC 8
95#define RX_PENDING_WATERMARK 16
96#define FIRST_RX_QUEUE 512
97
98struct iwl_host_cmd;
99
100
101
102
103
104
105
106
107
108
109
110struct iwl_rx_mem_buffer {
111 dma_addr_t page_dma;
112 struct page *page;
113 u16 vid;
114 bool invalid;
115 struct list_head list;
116};
117
118
119
120
121
122struct isr_statistics {
123 u32 hw;
124 u32 sw;
125 u32 err_code;
126 u32 sch;
127 u32 alive;
128 u32 rfkill;
129 u32 ctkill;
130 u32 wakeup;
131 u32 rx;
132 u32 tx;
133 u32 unhandled;
134};
135
136
137
138
139
140
141
142struct iwl_rx_transfer_desc {
143 __le16 rbid;
144 __le16 reserved[3];
145 __le64 addr;
146} __packed;
147
148#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
149
150
151
152
153
154
155
156
157struct iwl_rx_completion_desc {
158 __le32 reserved1;
159 __le16 rbid;
160 u8 flags;
161 u8 reserved2[25];
162} __packed;
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192struct iwl_rxq {
193 int id;
194 void *bd;
195 dma_addr_t bd_dma;
196 union {
197 void *used_bd;
198 __le32 *bd_32;
199 struct iwl_rx_completion_desc *cd;
200 };
201 dma_addr_t used_bd_dma;
202 __le16 *tr_tail;
203 dma_addr_t tr_tail_dma;
204 __le16 *cr_tail;
205 dma_addr_t cr_tail_dma;
206 u32 read;
207 u32 write;
208 u32 free_count;
209 u32 used_count;
210 u32 write_actual;
211 u32 queue_size;
212 struct list_head rx_free;
213 struct list_head rx_used;
214 bool need_update;
215 void *rb_stts;
216 dma_addr_t rb_stts_dma;
217 spinlock_t lock;
218 struct napi_struct napi;
219 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
220};
221
222
223
224
225
226
227
228
229
230
231
232
233
234struct iwl_rb_allocator {
235 atomic_t req_pending;
236 atomic_t req_ready;
237 struct list_head rbd_allocated;
238 struct list_head rbd_empty;
239 spinlock_t lock;
240 struct workqueue_struct *alloc_wq;
241 struct work_struct rx_alloc;
242};
243
244struct iwl_dma_ptr {
245 dma_addr_t dma;
246 void *addr;
247 size_t size;
248};
249
250
251
252
253
254static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
255{
256 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
257}
258
259
260
261
262
263static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
264 struct iwl_rxq *rxq)
265{
266 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
267 __le16 *rb_stts = rxq->rb_stts;
268
269 return READ_ONCE(*rb_stts);
270 } else {
271 struct iwl_rb_status *rb_stts = rxq->rb_stts;
272
273 return READ_ONCE(rb_stts->closed_rb_num);
274 }
275}
276
277
278
279
280
281static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
282{
283 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
284}
285
286struct iwl_cmd_meta {
287
288 struct iwl_host_cmd *source;
289 u32 flags;
290 u32 tbs;
291};
292
293
294
295
296
297
298
299
300
301
302#define IWL_FIRST_TB_SIZE 20
303#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
304
305struct iwl_pcie_txq_entry {
306 struct iwl_device_cmd *cmd;
307 struct sk_buff *skb;
308
309 const void *free_buf;
310 struct iwl_cmd_meta meta;
311};
312
313struct iwl_pcie_first_tb_buf {
314 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
315};
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct iwl_txq {
360 void *tfds;
361 struct iwl_pcie_first_tb_buf *first_tb_bufs;
362 dma_addr_t first_tb_dma;
363 struct iwl_pcie_txq_entry *entries;
364 spinlock_t lock;
365 unsigned long frozen_expiry_remainder;
366 struct timer_list stuck_timer;
367 struct iwl_trans_pcie *trans_pcie;
368 bool need_update;
369 bool frozen;
370 bool ampdu;
371 int block;
372 unsigned long wd_timeout;
373 struct sk_buff_head overflow_q;
374 struct iwl_dma_ptr bc_tbl;
375
376 int write_ptr;
377 int read_ptr;
378 dma_addr_t dma_addr;
379 int n_window;
380 u32 id;
381 int low_mark;
382 int high_mark;
383
384 bool overflow_tx;
385};
386
387static inline dma_addr_t
388iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
389{
390 return txq->first_tb_dma +
391 sizeof(struct iwl_pcie_first_tb_buf) * idx;
392}
393
394struct iwl_tso_hdr_page {
395 struct page *page;
396 u8 *pos;
397};
398
399#ifdef CONFIG_IWLWIFI_DEBUGFS
400
401
402
403
404
405
406
407
408
409enum iwl_fw_mon_dbgfs_state {
410 IWL_FW_MON_DBGFS_STATE_CLOSED,
411 IWL_FW_MON_DBGFS_STATE_OPEN,
412 IWL_FW_MON_DBGFS_STATE_DISABLED,
413};
414#endif
415
416
417
418
419
420
421enum iwl_shared_irq_flags {
422 IWL_SHARED_IRQ_NON_RX = BIT(0),
423 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
424};
425
426
427
428
429
430
431
432enum iwl_image_response_code {
433 IWL_IMAGE_RESP_DEF = 0,
434 IWL_IMAGE_RESP_SUCCESS = 1,
435 IWL_IMAGE_RESP_FAIL = 2,
436};
437
438
439
440
441
442
443
444
445
446
447
448#ifdef CONFIG_IWLWIFI_DEBUGFS
449struct cont_rec {
450 u32 prev_wr_ptr;
451 u32 prev_wrap_cnt;
452 u8 state;
453
454 struct mutex mutex;
455};
456#endif
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512struct iwl_trans_pcie {
513 struct iwl_rxq *rxq;
514 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
515 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
516 struct iwl_rb_allocator rba;
517 union {
518 struct iwl_context_info *ctxt_info;
519 struct iwl_context_info_gen3 *ctxt_info_gen3;
520 };
521 struct iwl_prph_info *prph_info;
522 struct iwl_prph_scratch *prph_scratch;
523 dma_addr_t ctxt_info_dma_addr;
524 dma_addr_t prph_info_dma_addr;
525 dma_addr_t prph_scratch_dma_addr;
526 dma_addr_t iml_dma_addr;
527 struct iwl_trans *trans;
528
529 struct net_device napi_dev;
530
531 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
532
533
534 __le32 *ict_tbl;
535 dma_addr_t ict_tbl_dma;
536 int ict_index;
537 bool use_ict;
538 bool is_down, opmode_down;
539 s8 debug_rfkill;
540 struct isr_statistics isr_stats;
541
542 spinlock_t irq_lock;
543 struct mutex mutex;
544 u32 inta_mask;
545 u32 scd_base_addr;
546 struct iwl_dma_ptr scd_bc_tbls;
547 struct iwl_dma_ptr kw;
548
549 struct iwl_txq *txq_memory;
550 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
551 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
552 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
553
554
555 struct pci_dev *pci_dev;
556 void __iomem *hw_base;
557
558 bool ucode_write_complete;
559 wait_queue_head_t ucode_write_waitq;
560 wait_queue_head_t wait_command_queue;
561 wait_queue_head_t d0i3_waitq;
562
563 u8 page_offs, dev_cmd_offs;
564
565 u8 cmd_queue;
566 u8 def_rx_queue;
567 u8 cmd_fifo;
568 unsigned int cmd_q_wdg_timeout;
569 u8 n_no_reclaim_cmds;
570 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
571 u8 max_tbs;
572 u16 tfd_size;
573
574 enum iwl_amsdu_size rx_buf_size;
575 bool bc_table_dword;
576 bool scd_set_active;
577 bool sw_csum_tx;
578 bool pcie_dbg_dumped_once;
579 u32 rx_page_order;
580
581
582 spinlock_t reg_lock;
583 bool cmd_hold_nic_awake;
584 bool ref_cmd_in_flight;
585
586#ifdef CONFIG_IWLWIFI_DEBUGFS
587 struct cont_rec fw_mon_data;
588#endif
589
590 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
591 bool msix_enabled;
592 u8 shared_vec_mask;
593 u32 alloc_vecs;
594 u32 def_irq;
595 u32 fh_init_mask;
596 u32 hw_init_mask;
597 u32 fh_mask;
598 u32 hw_mask;
599 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
600 u16 tx_cmd_queue_size;
601 bool in_rescan;
602
603 void *base_rb_stts;
604 dma_addr_t base_rb_stts_dma;
605};
606
607static inline struct iwl_trans_pcie *
608IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
609{
610 return (void *)trans->trans_specific;
611}
612
613static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
614 struct msix_entry *entry)
615{
616
617
618
619
620
621
622
623
624 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
625}
626
627static inline struct iwl_trans *
628iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
629{
630 return container_of((void *)trans_pcie, struct iwl_trans,
631 trans_specific);
632}
633
634
635
636
637
638struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
639 const struct pci_device_id *ent,
640 const struct iwl_cfg *cfg);
641void iwl_trans_pcie_free(struct iwl_trans *trans);
642
643
644
645
646int _iwl_pcie_rx_init(struct iwl_trans *trans);
647int iwl_pcie_rx_init(struct iwl_trans *trans);
648int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
649irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
650irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
651irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
652irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
653int iwl_pcie_rx_stop(struct iwl_trans *trans);
654void iwl_pcie_rx_free(struct iwl_trans *trans);
655void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
656void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
657int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
658void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
659 struct iwl_rxq *rxq);
660int iwl_pcie_rx_alloc(struct iwl_trans *trans);
661
662
663
664
665irqreturn_t iwl_pcie_isr(int irq, void *data);
666int iwl_pcie_alloc_ict(struct iwl_trans *trans);
667void iwl_pcie_free_ict(struct iwl_trans *trans);
668void iwl_pcie_reset_ict(struct iwl_trans *trans);
669void iwl_pcie_disable_ict(struct iwl_trans *trans);
670
671
672
673
674int iwl_pcie_tx_init(struct iwl_trans *trans);
675int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
676 int queue_size);
677void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
678int iwl_pcie_tx_stop(struct iwl_trans *trans);
679void iwl_pcie_tx_free(struct iwl_trans *trans);
680bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
681 const struct iwl_trans_txq_scd_cfg *cfg,
682 unsigned int wdg_timeout);
683void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
684 bool configure_scd);
685void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
686 bool shared_mode);
687void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
688 struct iwl_txq *txq);
689int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
690 struct iwl_device_cmd *dev_cmd, int txq_id);
691void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
692int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
693void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
694void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
695 struct iwl_txq *txq);
696void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
697 struct iwl_rx_cmd_buffer *rxb);
698void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
699 struct sk_buff_head *skbs);
700void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
701void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
702 struct iwl_txq *txq, u16 byte_cnt,
703 int num_tbs);
704
705static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
706 u8 idx)
707{
708 if (trans->cfg->use_tfh) {
709 struct iwl_tfh_tfd *tfd = _tfd;
710 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
711
712 return le16_to_cpu(tb->tb_len);
713 } else {
714 struct iwl_tfd *tfd = _tfd;
715 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
716
717 return le16_to_cpu(tb->hi_n_len) >> 4;
718 }
719}
720
721
722
723
724void iwl_pcie_dump_csr(struct iwl_trans *trans);
725
726
727
728
729static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
730{
731 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
732
733 clear_bit(STATUS_INT_ENABLED, &trans->status);
734 if (!trans_pcie->msix_enabled) {
735
736 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
737
738
739
740 iwl_write32(trans, CSR_INT, 0xffffffff);
741 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
742 } else {
743
744 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
745 trans_pcie->fh_init_mask);
746 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
747 trans_pcie->hw_init_mask);
748 }
749 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
750}
751
752#define IWL_NUM_OF_COMPLETION_RINGS 31
753#define IWL_NUM_OF_TRANSFER_RINGS 527
754
755static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
756 int start)
757{
758 int i = 0;
759
760 while (start < fw->num_sec &&
761 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
762 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
763 start++;
764 i++;
765 }
766
767 return i;
768}
769
770static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
771 const struct fw_desc *sec,
772 struct iwl_dram_data *dram)
773{
774 dram->block = dma_alloc_coherent(trans->dev, sec->len,
775 &dram->physical,
776 GFP_KERNEL);
777 if (!dram->block)
778 return -ENOMEM;
779
780 dram->size = sec->len;
781 memcpy(dram->block, sec->data, sec->len);
782
783 return 0;
784}
785
786static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
787{
788 struct iwl_self_init_dram *dram = &trans->init_dram;
789 int i;
790
791 if (!dram->fw) {
792 WARN_ON(dram->fw_cnt);
793 return;
794 }
795
796 for (i = 0; i < dram->fw_cnt; i++)
797 dma_free_coherent(trans->dev, dram->fw[i].size,
798 dram->fw[i].block, dram->fw[i].physical);
799
800 kfree(dram->fw);
801 dram->fw_cnt = 0;
802 dram->fw = NULL;
803}
804
805static inline void iwl_disable_interrupts(struct iwl_trans *trans)
806{
807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808
809 spin_lock(&trans_pcie->irq_lock);
810 _iwl_disable_interrupts(trans);
811 spin_unlock(&trans_pcie->irq_lock);
812}
813
814static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
815{
816 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
817
818 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
819 set_bit(STATUS_INT_ENABLED, &trans->status);
820 if (!trans_pcie->msix_enabled) {
821 trans_pcie->inta_mask = CSR_INI_SET_MASK;
822 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
823 } else {
824
825
826
827
828 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
829 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
830 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
831 ~trans_pcie->fh_mask);
832 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
833 ~trans_pcie->hw_mask);
834 }
835}
836
837static inline void iwl_enable_interrupts(struct iwl_trans *trans)
838{
839 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840
841 spin_lock(&trans_pcie->irq_lock);
842 _iwl_enable_interrupts(trans);
843 spin_unlock(&trans_pcie->irq_lock);
844}
845static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
846{
847 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
848
849 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
850 trans_pcie->hw_mask = msk;
851}
852
853static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
854{
855 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
856
857 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
858 trans_pcie->fh_mask = msk;
859}
860
861static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
862{
863 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
864
865 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
866 if (!trans_pcie->msix_enabled) {
867 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
868 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
869 } else {
870 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
871 trans_pcie->hw_init_mask);
872 iwl_enable_fh_int_msk_msix(trans,
873 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
874 }
875}
876
877static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
878{
879 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
880
881 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
882
883 if (!trans_pcie->msix_enabled) {
884
885
886
887
888
889
890
891 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
892 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
893 } else {
894 iwl_enable_hw_int_msk_msix(trans,
895 MSIX_HW_INT_CAUSES_REG_ALIVE);
896
897
898
899
900 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
901 }
902}
903
904static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
905{
906 return index & (q->n_window - 1);
907}
908
909static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
910 struct iwl_txq *txq, int idx)
911{
912 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
913
914 if (trans->cfg->use_tfh)
915 idx = iwl_pcie_get_cmd_index(txq, idx);
916
917 return txq->tfds + trans_pcie->tfd_size * idx;
918}
919
920static inline const char *queue_name(struct device *dev,
921 struct iwl_trans_pcie *trans_p, int i)
922{
923 if (trans_p->shared_vec_mask) {
924 int vec = trans_p->shared_vec_mask &
925 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
926
927 if (i == 0)
928 return DRV_NAME ": shared IRQ";
929
930 return devm_kasprintf(dev, GFP_KERNEL,
931 DRV_NAME ": queue %d", i + vec);
932 }
933 if (i == 0)
934 return DRV_NAME ": default queue";
935
936 if (i == trans_p->alloc_vecs - 1)
937 return DRV_NAME ": exception";
938
939 return devm_kasprintf(dev, GFP_KERNEL,
940 DRV_NAME ": queue %d", i);
941}
942
943static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
944{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946
947 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
948 if (!trans_pcie->msix_enabled) {
949 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
950 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
951 } else {
952 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
953 trans_pcie->fh_init_mask);
954 iwl_enable_hw_int_msk_msix(trans,
955 MSIX_HW_INT_CAUSES_REG_RF_KILL);
956 }
957
958 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
959
960
961
962
963
964 iwl_set_bit(trans, CSR_GP_CNTRL,
965 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
966 }
967}
968
969void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
970
971static inline void iwl_wake_queue(struct iwl_trans *trans,
972 struct iwl_txq *txq)
973{
974 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
975
976 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
977 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
978 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
979 }
980}
981
982static inline void iwl_stop_queue(struct iwl_trans *trans,
983 struct iwl_txq *txq)
984{
985 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986
987 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
988 iwl_op_mode_queue_full(trans->op_mode, txq->id);
989 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
990 } else
991 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
992 txq->id);
993}
994
995static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
996{
997 int index = iwl_pcie_get_cmd_index(q, i);
998 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
999 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
1000
1001 return w >= r ?
1002 (index >= r && index < w) :
1003 !(index < r && index >= w);
1004}
1005
1006static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1007{
1008 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1009
1010 lockdep_assert_held(&trans_pcie->mutex);
1011
1012 if (trans_pcie->debug_rfkill == 1)
1013 return true;
1014
1015 return !(iwl_read32(trans, CSR_GP_CNTRL) &
1016 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1017}
1018
1019static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
1020 u32 reg, u32 mask, u32 value)
1021{
1022 u32 v;
1023
1024#ifdef CONFIG_IWLWIFI_DEBUG
1025 WARN_ON_ONCE(value & ~mask);
1026#endif
1027
1028 v = iwl_read32(trans, reg);
1029 v &= ~mask;
1030 v |= value;
1031 iwl_write32(trans, reg, v);
1032}
1033
1034static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1035 u32 reg, u32 mask)
1036{
1037 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1038}
1039
1040static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1041 u32 reg, u32 mask)
1042{
1043 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1044}
1045
1046static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1047{
1048 return (trans->dbg.dest_tlv || trans->dbg.ini_valid);
1049}
1050
1051void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1052void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1053void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1054
1055#ifdef CONFIG_IWLWIFI_DEBUGFS
1056void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1057#else
1058static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
1059#endif
1060
1061int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
1062int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
1063
1064void iwl_pcie_rx_allocator_work(struct work_struct *data);
1065
1066
1067int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
1068void iwl_pcie_apm_config(struct iwl_trans *trans);
1069int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1070void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1071bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1072void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1073 bool was_in_rfkill);
1074void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1075int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1076void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1077void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1078int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1079 int slots_num, bool cmd_queue);
1080int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1081 struct iwl_txq *txq, int slots_num, bool cmd_queue);
1082int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1083 struct iwl_dma_ptr *ptr, size_t size);
1084void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1085void iwl_pcie_apply_destination(struct iwl_trans *trans);
1086void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1087 struct sk_buff *skb);
1088#ifdef CONFIG_INET
1089struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1090#endif
1091
1092
1093void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1094
1095
1096int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1097 const struct fw_img *fw, bool run_in_rfkill);
1098void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1099void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1100 struct iwl_txq *txq);
1101int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1102 struct iwl_txq **intxq, int size,
1103 unsigned int timeout);
1104int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1105 struct iwl_txq *txq,
1106 struct iwl_host_cmd *hcmd);
1107int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1108 __le16 flags, u8 sta_id, u8 tid,
1109 int cmd_id, int size,
1110 unsigned int timeout);
1111void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1112int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1113 struct iwl_device_cmd *dev_cmd, int txq_id);
1114int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1115 struct iwl_host_cmd *cmd);
1116void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
1117 bool low_power);
1118void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1119void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1120void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1121void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1122#endif
1123