1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifndef __iwl_trans_int_pcie_h__
32#define __iwl_trans_int_pcie_h__
33
34#include <linux/spinlock.h>
35#include <linux/interrupt.h>
36#include <linux/skbuff.h>
37#include <linux/wait.h>
38#include <linux/pci.h>
39#include <linux/timer.h>
40
41#include "iwl-fh.h"
42#include "iwl-csr.h"
43#include "iwl-trans.h"
44#include "iwl-debug.h"
45#include "iwl-io.h"
46#include "iwl-op-mode.h"
47
48
49
50
51
52#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
53
54
55
56
57#define RX_NUM_QUEUES 1
58#define RX_POST_REQ_ALLOC 2
59#define RX_CLAIM_REQ_ALLOC 8
60#define RX_PENDING_WATERMARK 16
61
62struct iwl_host_cmd;
63
64
65
66
67
68
69
70
71
72
73struct iwl_rx_mem_buffer {
74 dma_addr_t page_dma;
75 struct page *page;
76 u16 vid;
77 struct list_head list;
78};
79
80
81
82
83
84struct isr_statistics {
85 u32 hw;
86 u32 sw;
87 u32 err_code;
88 u32 sch;
89 u32 alive;
90 u32 rfkill;
91 u32 ctkill;
92 u32 wakeup;
93 u32 rx;
94 u32 tx;
95 u32 unhandled;
96};
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct iwl_rxq {
122 int id;
123 void *bd;
124 dma_addr_t bd_dma;
125 __le32 *used_bd;
126 dma_addr_t used_bd_dma;
127 u32 read;
128 u32 write;
129 u32 free_count;
130 u32 used_count;
131 u32 write_actual;
132 u32 queue_size;
133 struct list_head rx_free;
134 struct list_head rx_used;
135 bool need_update;
136 struct iwl_rb_status *rb_stts;
137 dma_addr_t rb_stts_dma;
138 spinlock_t lock;
139 struct napi_struct napi;
140 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
141};
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct iwl_rb_allocator {
156 atomic_t req_pending;
157 atomic_t req_ready;
158 struct list_head rbd_allocated;
159 struct list_head rbd_empty;
160 spinlock_t lock;
161 struct workqueue_struct *alloc_wq;
162 struct work_struct rx_alloc;
163};
164
165struct iwl_dma_ptr {
166 dma_addr_t dma;
167 void *addr;
168 size_t size;
169};
170
171
172
173
174
175static inline int iwl_queue_inc_wrap(int index)
176{
177 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
178}
179
180
181
182
183
184static inline int iwl_queue_dec_wrap(int index)
185{
186 return --index & (TFD_QUEUE_SIZE_MAX - 1);
187}
188
189struct iwl_cmd_meta {
190
191 struct iwl_host_cmd *source;
192 u32 flags;
193};
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216struct iwl_queue {
217 int write_ptr;
218 int read_ptr;
219
220 dma_addr_t dma_addr;
221 int n_window;
222 u32 id;
223 int low_mark;
224
225 int high_mark;
226
227};
228
229#define TFD_TX_CMD_SLOTS 256
230#define TFD_CMD_SLOTS 32
231
232
233
234
235
236
237
238
239
240
241#define IWL_HCMD_SCRATCHBUF_SIZE 16
242
243struct iwl_pcie_txq_entry {
244 struct iwl_device_cmd *cmd;
245 struct sk_buff *skb;
246
247 const void *free_buf;
248 struct iwl_cmd_meta meta;
249};
250
251struct iwl_pcie_txq_scratch_buf {
252 struct iwl_cmd_header hdr;
253 u8 buf[8];
254 __le32 scratch;
255};
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279struct iwl_txq {
280 struct iwl_queue q;
281 struct iwl_tfd *tfds;
282 struct iwl_pcie_txq_scratch_buf *scratchbufs;
283 dma_addr_t scratchbufs_dma;
284 struct iwl_pcie_txq_entry *entries;
285 spinlock_t lock;
286 unsigned long frozen_expiry_remainder;
287 struct timer_list stuck_timer;
288 struct iwl_trans_pcie *trans_pcie;
289 bool need_update;
290 bool frozen;
291 u8 active;
292 bool ampdu;
293 bool block;
294 unsigned long wd_timeout;
295 struct sk_buff_head overflow_q;
296};
297
298static inline dma_addr_t
299iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
300{
301 return txq->scratchbufs_dma +
302 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
303}
304
305struct iwl_tso_hdr_page {
306 struct page *page;
307 u8 *pos;
308};
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348struct iwl_trans_pcie {
349 struct iwl_rxq *rxq;
350 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
351 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
352 struct iwl_rb_allocator rba;
353 struct iwl_trans *trans;
354 struct iwl_drv *drv;
355
356 struct net_device napi_dev;
357
358 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
359
360
361 __le32 *ict_tbl;
362 dma_addr_t ict_tbl_dma;
363 int ict_index;
364 bool use_ict;
365 bool is_down;
366 struct isr_statistics isr_stats;
367
368 spinlock_t irq_lock;
369 struct mutex mutex;
370 u32 inta_mask;
371 u32 scd_base_addr;
372 struct iwl_dma_ptr scd_bc_tbls;
373 struct iwl_dma_ptr kw;
374
375 struct iwl_txq *txq;
376 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
377 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
378
379
380 struct pci_dev *pci_dev;
381 void __iomem *hw_base;
382
383 bool ucode_write_complete;
384 wait_queue_head_t ucode_write_waitq;
385 wait_queue_head_t wait_command_queue;
386 wait_queue_head_t d0i3_waitq;
387
388 u8 cmd_queue;
389 u8 cmd_fifo;
390 unsigned int cmd_q_wdg_timeout;
391 u8 n_no_reclaim_cmds;
392 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
393
394 enum iwl_amsdu_size rx_buf_size;
395 bool bc_table_dword;
396 bool scd_set_active;
397 bool wide_cmd_header;
398 bool sw_csum_tx;
399 u32 rx_page_order;
400
401
402 spinlock_t reg_lock;
403 bool cmd_hold_nic_awake;
404 bool ref_cmd_in_flight;
405
406 dma_addr_t fw_mon_phys;
407 struct page *fw_mon_page;
408 u32 fw_mon_size;
409
410 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
411 bool msix_enabled;
412 u32 allocated_vector;
413 u32 default_irq_num;
414 u32 fh_init_mask;
415 u32 hw_init_mask;
416 u32 fh_mask;
417 u32 hw_mask;
418};
419
420static inline struct iwl_trans_pcie *
421IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
422{
423 return (void *)trans->trans_specific;
424}
425
426static inline struct iwl_trans *
427iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
428{
429 return container_of((void *)trans_pcie, struct iwl_trans,
430 trans_specific);
431}
432
433
434
435
436
437struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
438 const struct pci_device_id *ent,
439 const struct iwl_cfg *cfg);
440void iwl_trans_pcie_free(struct iwl_trans *trans);
441
442
443
444
445int iwl_pcie_rx_init(struct iwl_trans *trans);
446irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
447irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
448irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
449irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
450int iwl_pcie_rx_stop(struct iwl_trans *trans);
451void iwl_pcie_rx_free(struct iwl_trans *trans);
452
453
454
455
456irqreturn_t iwl_pcie_isr(int irq, void *data);
457int iwl_pcie_alloc_ict(struct iwl_trans *trans);
458void iwl_pcie_free_ict(struct iwl_trans *trans);
459void iwl_pcie_reset_ict(struct iwl_trans *trans);
460void iwl_pcie_disable_ict(struct iwl_trans *trans);
461
462
463
464
465int iwl_pcie_tx_init(struct iwl_trans *trans);
466void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
467int iwl_pcie_tx_stop(struct iwl_trans *trans);
468void iwl_pcie_tx_free(struct iwl_trans *trans);
469void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
470 const struct iwl_trans_txq_scd_cfg *cfg,
471 unsigned int wdg_timeout);
472void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
473 bool configure_scd);
474int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
475 struct iwl_device_cmd *dev_cmd, int txq_id);
476void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
477int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
478void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
479 struct iwl_rx_cmd_buffer *rxb);
480void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
481 struct sk_buff_head *skbs);
482void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
483
484static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
485{
486 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
487
488 return le16_to_cpu(tb->hi_n_len) >> 4;
489}
490
491
492
493
494void iwl_pcie_dump_csr(struct iwl_trans *trans);
495
496
497
498
499static inline void iwl_disable_interrupts(struct iwl_trans *trans)
500{
501 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
502
503 clear_bit(STATUS_INT_ENABLED, &trans->status);
504 if (!trans_pcie->msix_enabled) {
505
506 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
507
508
509
510 iwl_write32(trans, CSR_INT, 0xffffffff);
511 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
512 } else {
513
514 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
515 trans_pcie->fh_init_mask);
516 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
517 trans_pcie->hw_init_mask);
518 }
519 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
520}
521
522static inline void iwl_enable_interrupts(struct iwl_trans *trans)
523{
524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525
526 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
527 set_bit(STATUS_INT_ENABLED, &trans->status);
528 if (!trans_pcie->msix_enabled) {
529 trans_pcie->inta_mask = CSR_INI_SET_MASK;
530 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
531 } else {
532
533
534
535
536 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
537 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
538 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
539 ~trans_pcie->fh_mask);
540 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
541 ~trans_pcie->hw_mask);
542 }
543}
544
545static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
546{
547 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
548
549 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
550 trans_pcie->hw_mask = msk;
551}
552
553static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
554{
555 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
556
557 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
558 trans_pcie->fh_mask = msk;
559}
560
561static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
562{
563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564
565 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
566 if (!trans_pcie->msix_enabled) {
567 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
568 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
569 } else {
570 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
571 trans_pcie->hw_init_mask);
572 iwl_enable_fh_int_msk_msix(trans,
573 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
574 }
575}
576
577static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
578{
579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580
581 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
582 if (!trans_pcie->msix_enabled) {
583 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
584 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
585 } else {
586 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
587 trans_pcie->fh_init_mask);
588 iwl_enable_hw_int_msk_msix(trans,
589 MSIX_HW_INT_CAUSES_REG_RF_KILL);
590 }
591}
592
593static inline void iwl_wake_queue(struct iwl_trans *trans,
594 struct iwl_txq *txq)
595{
596 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
597
598 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
599 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
600 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
601 }
602}
603
604static inline void iwl_stop_queue(struct iwl_trans *trans,
605 struct iwl_txq *txq)
606{
607 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
608
609 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
610 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
611 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
612 } else
613 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
614 txq->q.id);
615}
616
617static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
618{
619 return q->write_ptr >= q->read_ptr ?
620 (i >= q->read_ptr && i < q->write_ptr) :
621 !(i < q->read_ptr && i >= q->write_ptr);
622}
623
624static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
625{
626 return index & (q->n_window - 1);
627}
628
629static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
630{
631 return !(iwl_read32(trans, CSR_GP_CNTRL) &
632 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
633}
634
635static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
636 u32 reg, u32 mask, u32 value)
637{
638 u32 v;
639
640#ifdef CONFIG_IWLWIFI_DEBUG
641 WARN_ON_ONCE(value & ~mask);
642#endif
643
644 v = iwl_read32(trans, reg);
645 v &= ~mask;
646 v |= value;
647 iwl_write32(trans, reg, v);
648}
649
650static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
651 u32 reg, u32 mask)
652{
653 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
654}
655
656static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
657 u32 reg, u32 mask)
658{
659 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
660}
661
662void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
663
664#ifdef CONFIG_IWLWIFI_DEBUGFS
665int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
666#else
667static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
668{
669 return 0;
670}
671#endif
672
673int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
674int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
675
676#endif
677