1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/gfp.h>
32
33#include "iwl-prph.h"
34#include "iwl-io.h"
35#include "internal.h"
36#include "iwl-op-mode.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113static int iwl_rxq_space(const struct iwl_rxq *q)
114{
115 int s = q->read - q->write;
116 if (s <= 0)
117 s += RX_QUEUE_SIZE;
118
119 s -= 2;
120 if (s < 0)
121 s = 0;
122 return s;
123}
124
125
126
127
128static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
129{
130 return cpu_to_le32((u32)(dma_addr >> 8));
131}
132
133
134
135
136int iwl_pcie_rx_stop(struct iwl_trans *trans)
137{
138 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
139 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
140 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
141}
142
143
144
145
146static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
147{
148 unsigned long flags;
149 u32 reg;
150
151 spin_lock_irqsave(&q->lock, flags);
152
153 if (q->need_update == 0)
154 goto exit_unlock;
155
156 if (trans->cfg->base_params->shadow_reg_enable) {
157
158
159 q->write_actual = (q->write & ~0x7);
160 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
161 } else {
162 struct iwl_trans_pcie *trans_pcie =
163 IWL_TRANS_GET_PCIE_TRANS(trans);
164
165
166 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
167 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
168
169 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
170 IWL_DEBUG_INFO(trans,
171 "Rx queue requesting wakeup,"
172 " GP1 = 0x%x\n", reg);
173 iwl_set_bit(trans, CSR_GP_CNTRL,
174 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
175 goto exit_unlock;
176 }
177
178 q->write_actual = (q->write & ~0x7);
179 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
180 q->write_actual);
181
182
183 } else {
184
185 q->write_actual = (q->write & ~0x7);
186 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
187 q->write_actual);
188 }
189 }
190 q->need_update = 0;
191
192 exit_unlock:
193 spin_unlock_irqrestore(&q->lock, flags);
194}
195
196
197
198
199
200
201
202
203
204
205
206
207static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
208{
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 struct iwl_rxq *rxq = &trans_pcie->rxq;
211 struct iwl_rx_mem_buffer *rxb;
212 unsigned long flags;
213
214
215
216
217
218
219
220
221
222 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
223 return;
224
225 spin_lock_irqsave(&rxq->lock, flags);
226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
227
228 rxb = rxq->queue[rxq->write];
229 BUG_ON(rxb && rxb->page);
230
231
232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
233 list);
234 list_del(&rxb->list);
235
236
237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
238 rxq->queue[rxq->write] = rxb;
239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
240 rxq->free_count--;
241 }
242 spin_unlock_irqrestore(&rxq->lock, flags);
243
244
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
247
248
249
250 if (rxq->write_actual != (rxq->write & ~0x7)) {
251 spin_lock_irqsave(&rxq->lock, flags);
252 rxq->need_update = 1;
253 spin_unlock_irqrestore(&rxq->lock, flags);
254 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
255 }
256}
257
258
259
260
261
262
263
264
265
266
267static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
268{
269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
270 struct iwl_rxq *rxq = &trans_pcie->rxq;
271 struct iwl_rx_mem_buffer *rxb;
272 struct page *page;
273 unsigned long flags;
274 gfp_t gfp_mask = priority;
275
276 while (1) {
277 spin_lock_irqsave(&rxq->lock, flags);
278 if (list_empty(&rxq->rx_used)) {
279 spin_unlock_irqrestore(&rxq->lock, flags);
280 return;
281 }
282 spin_unlock_irqrestore(&rxq->lock, flags);
283
284 if (rxq->free_count > RX_LOW_WATERMARK)
285 gfp_mask |= __GFP_NOWARN;
286
287 if (trans_pcie->rx_page_order > 0)
288 gfp_mask |= __GFP_COMP;
289
290
291 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
292 if (!page) {
293 if (net_ratelimit())
294 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
295 "order: %d\n",
296 trans_pcie->rx_page_order);
297
298 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
299 net_ratelimit())
300 IWL_CRIT(trans, "Failed to alloc_pages with %s."
301 "Only %u free buffers remaining.\n",
302 priority == GFP_ATOMIC ?
303 "GFP_ATOMIC" : "GFP_KERNEL",
304 rxq->free_count);
305
306
307
308 return;
309 }
310
311 spin_lock_irqsave(&rxq->lock, flags);
312
313 if (list_empty(&rxq->rx_used)) {
314 spin_unlock_irqrestore(&rxq->lock, flags);
315 __free_pages(page, trans_pcie->rx_page_order);
316 return;
317 }
318 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
319 list);
320 list_del(&rxb->list);
321 spin_unlock_irqrestore(&rxq->lock, flags);
322
323 BUG_ON(rxb->page);
324 rxb->page = page;
325
326 rxb->page_dma =
327 dma_map_page(trans->dev, page, 0,
328 PAGE_SIZE << trans_pcie->rx_page_order,
329 DMA_FROM_DEVICE);
330 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
331 rxb->page = NULL;
332 spin_lock_irqsave(&rxq->lock, flags);
333 list_add(&rxb->list, &rxq->rx_used);
334 spin_unlock_irqrestore(&rxq->lock, flags);
335 __free_pages(page, trans_pcie->rx_page_order);
336 return;
337 }
338
339 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
340
341 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
342
343 spin_lock_irqsave(&rxq->lock, flags);
344
345 list_add_tail(&rxb->list, &rxq->rx_free);
346 rxq->free_count++;
347
348 spin_unlock_irqrestore(&rxq->lock, flags);
349 }
350}
351
352static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
353{
354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
355 struct iwl_rxq *rxq = &trans_pcie->rxq;
356 int i;
357
358
359 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
360
361
362 if (rxq->pool[i].page != NULL) {
363 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
364 PAGE_SIZE << trans_pcie->rx_page_order,
365 DMA_FROM_DEVICE);
366 __free_pages(rxq->pool[i].page,
367 trans_pcie->rx_page_order);
368 rxq->pool[i].page = NULL;
369 }
370 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
371 }
372}
373
374
375
376
377
378
379
380
381
382static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
383{
384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 unsigned long flags;
386
387 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
388
389 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
390 iwl_pcie_rxq_restock(trans);
391 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392}
393
394static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
395{
396 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
397
398 iwl_pcie_rxq_restock(trans);
399}
400
401static void iwl_pcie_rx_replenish_work(struct work_struct *data)
402{
403 struct iwl_trans_pcie *trans_pcie =
404 container_of(data, struct iwl_trans_pcie, rx_replenish);
405
406 iwl_pcie_rx_replenish(trans_pcie->trans);
407}
408
409static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
410{
411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412 struct iwl_rxq *rxq = &trans_pcie->rxq;
413 struct device *dev = trans->dev;
414
415 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
416
417 spin_lock_init(&rxq->lock);
418
419 if (WARN_ON(rxq->bd || rxq->rb_stts))
420 return -EINVAL;
421
422
423 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
424 &rxq->bd_dma, GFP_KERNEL);
425 if (!rxq->bd)
426 goto err_bd;
427
428
429 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
430 &rxq->rb_stts_dma, GFP_KERNEL);
431 if (!rxq->rb_stts)
432 goto err_rb_stts;
433
434 return 0;
435
436err_rb_stts:
437 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
438 rxq->bd, rxq->bd_dma);
439 rxq->bd_dma = 0;
440 rxq->bd = NULL;
441err_bd:
442 return -ENOMEM;
443}
444
445static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
446{
447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
448 u32 rb_size;
449 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
450
451 if (trans_pcie->rx_buf_size_8k)
452 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
453 else
454 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
455
456
457 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458
459 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
460 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
461 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
462
463
464 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
465
466
467 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
468 (u32)(rxq->bd_dma >> 8));
469
470
471 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
472 rxq->rb_stts_dma >> 4);
473
474
475
476
477
478
479
480
481
482 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
483 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
484 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
485 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
486 rb_size|
487 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
488 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
489
490
491 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492}
493
494int iwl_pcie_rx_init(struct iwl_trans *trans)
495{
496 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
497 struct iwl_rxq *rxq = &trans_pcie->rxq;
498 int i, err;
499 unsigned long flags;
500
501 if (!rxq->bd) {
502 err = iwl_pcie_rx_alloc(trans);
503 if (err)
504 return err;
505 }
506
507 spin_lock_irqsave(&rxq->lock, flags);
508 INIT_LIST_HEAD(&rxq->rx_free);
509 INIT_LIST_HEAD(&rxq->rx_used);
510
511 INIT_WORK(&trans_pcie->rx_replenish,
512 iwl_pcie_rx_replenish_work);
513
514 iwl_pcie_rxq_free_rbs(trans);
515
516 for (i = 0; i < RX_QUEUE_SIZE; i++)
517 rxq->queue[i] = NULL;
518
519
520
521 rxq->read = rxq->write = 0;
522 rxq->write_actual = 0;
523 rxq->free_count = 0;
524 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
525 spin_unlock_irqrestore(&rxq->lock, flags);
526
527 iwl_pcie_rx_replenish(trans);
528
529 iwl_pcie_rx_hw_init(trans, rxq);
530
531 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
532 rxq->need_update = 1;
533 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
534 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
535
536 return 0;
537}
538
539void iwl_pcie_rx_free(struct iwl_trans *trans)
540{
541 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
542 struct iwl_rxq *rxq = &trans_pcie->rxq;
543 unsigned long flags;
544
545
546
547 if (!rxq->bd) {
548 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
549 return;
550 }
551
552 cancel_work_sync(&trans_pcie->rx_replenish);
553
554 spin_lock_irqsave(&rxq->lock, flags);
555 iwl_pcie_rxq_free_rbs(trans);
556 spin_unlock_irqrestore(&rxq->lock, flags);
557
558 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
559 rxq->bd, rxq->bd_dma);
560 rxq->bd_dma = 0;
561 rxq->bd = NULL;
562
563 if (rxq->rb_stts)
564 dma_free_coherent(trans->dev,
565 sizeof(struct iwl_rb_status),
566 rxq->rb_stts, rxq->rb_stts_dma);
567 else
568 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
569 rxq->rb_stts_dma = 0;
570 rxq->rb_stts = NULL;
571}
572
573static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
574 struct iwl_rx_mem_buffer *rxb)
575{
576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 struct iwl_rxq *rxq = &trans_pcie->rxq;
578 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
579 unsigned long flags;
580 bool page_stolen = false;
581 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
582 u32 offset = 0;
583
584 if (WARN_ON(!rxb))
585 return;
586
587 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
588
589 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
590 struct iwl_rx_packet *pkt;
591 struct iwl_device_cmd *cmd;
592 u16 sequence;
593 bool reclaim;
594 int index, cmd_index, err, len;
595 struct iwl_rx_cmd_buffer rxcb = {
596 ._offset = offset,
597 ._rx_page_order = trans_pcie->rx_page_order,
598 ._page = rxb->page,
599 ._page_stolen = false,
600 .truesize = max_len,
601 };
602
603 pkt = rxb_addr(&rxcb);
604
605 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
606 break;
607
608 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
609 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
610 pkt->hdr.cmd);
611
612 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
613 len += sizeof(u32);
614 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
615 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
616
617
618
619
620
621
622
623 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
624 if (reclaim) {
625 int i;
626
627 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
628 if (trans_pcie->no_reclaim_cmds[i] ==
629 pkt->hdr.cmd) {
630 reclaim = false;
631 break;
632 }
633 }
634 }
635
636 sequence = le16_to_cpu(pkt->hdr.sequence);
637 index = SEQ_TO_INDEX(sequence);
638 cmd_index = get_cmd_index(&txq->q, index);
639
640 if (reclaim)
641 cmd = txq->entries[cmd_index].cmd;
642 else
643 cmd = NULL;
644
645 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
646
647 if (reclaim) {
648 kfree(txq->entries[cmd_index].free_buf);
649 txq->entries[cmd_index].free_buf = NULL;
650 }
651
652
653
654
655
656
657 if (reclaim) {
658
659
660
661
662 if (!rxcb._page_stolen)
663 iwl_pcie_hcmd_complete(trans, &rxcb, err);
664 else
665 IWL_WARN(trans, "Claim null rxb?\n");
666 }
667
668 page_stolen |= rxcb._page_stolen;
669 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
670 }
671
672
673 if (page_stolen) {
674 __free_pages(rxb->page, trans_pcie->rx_page_order);
675 rxb->page = NULL;
676 }
677
678
679
680
681 spin_lock_irqsave(&rxq->lock, flags);
682 if (rxb->page != NULL) {
683 rxb->page_dma =
684 dma_map_page(trans->dev, rxb->page, 0,
685 PAGE_SIZE << trans_pcie->rx_page_order,
686 DMA_FROM_DEVICE);
687 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
688
689
690
691
692
693 __free_pages(rxb->page, trans_pcie->rx_page_order);
694 rxb->page = NULL;
695 list_add_tail(&rxb->list, &rxq->rx_used);
696 } else {
697 list_add_tail(&rxb->list, &rxq->rx_free);
698 rxq->free_count++;
699 }
700 } else
701 list_add_tail(&rxb->list, &rxq->rx_used);
702 spin_unlock_irqrestore(&rxq->lock, flags);
703}
704
705
706
707
708static void iwl_pcie_rx_handle(struct iwl_trans *trans)
709{
710 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
711 struct iwl_rxq *rxq = &trans_pcie->rxq;
712 u32 r, i;
713 u8 fill_rx = 0;
714 u32 count = 8;
715 int total_empty;
716
717
718
719 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
720 i = rxq->read;
721
722
723 if (i == r)
724 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
725
726
727 total_empty = r - rxq->write_actual;
728 if (total_empty < 0)
729 total_empty += RX_QUEUE_SIZE;
730
731 if (total_empty > (RX_QUEUE_SIZE / 2))
732 fill_rx = 1;
733
734 while (i != r) {
735 struct iwl_rx_mem_buffer *rxb;
736
737 rxb = rxq->queue[i];
738 rxq->queue[i] = NULL;
739
740 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
741 r, i, rxb);
742 iwl_pcie_rx_handle_rb(trans, rxb);
743
744 i = (i + 1) & RX_QUEUE_MASK;
745
746
747 if (fill_rx) {
748 count++;
749 if (count >= 8) {
750 rxq->read = i;
751 iwl_pcie_rx_replenish_now(trans);
752 count = 0;
753 }
754 }
755 }
756
757
758 rxq->read = i;
759 if (fill_rx)
760 iwl_pcie_rx_replenish_now(trans);
761 else
762 iwl_pcie_rxq_restock(trans);
763}
764
765
766
767
768static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
769{
770 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
771
772
773 if (trans->cfg->internal_wimax_coex &&
774 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
775 APMS_CLK_VAL_MRB_FUNC_MODE) ||
776 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
777 APMG_PS_CTRL_VAL_RESET_REQ))) {
778 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
779 iwl_op_mode_wimax_active(trans->op_mode);
780 wake_up(&trans_pcie->wait_command_queue);
781 return;
782 }
783
784 iwl_pcie_dump_csr(trans);
785 iwl_pcie_dump_fh(trans, NULL);
786
787 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
788 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
789 wake_up(&trans_pcie->wait_command_queue);
790
791 local_bh_disable();
792 iwl_op_mode_nic_error(trans->op_mode);
793 local_bh_enable();
794}
795
796irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
797{
798 struct iwl_trans *trans = dev_id;
799 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
800 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
801 u32 inta = 0;
802 u32 handled = 0;
803 unsigned long flags;
804 u32 i;
805#ifdef CONFIG_IWLWIFI_DEBUG
806 u32 inta_mask;
807#endif
808
809 lock_map_acquire(&trans->sync_cmd_lockdep_map);
810
811 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
812
813
814
815
816
817
818
819
820
821
822
823
824 iwl_write32(trans, CSR_INT,
825 trans_pcie->inta | ~trans_pcie->inta_mask);
826
827 inta = trans_pcie->inta;
828
829#ifdef CONFIG_IWLWIFI_DEBUG
830 if (iwl_have_debug_level(IWL_DL_ISR)) {
831
832 inta_mask = iwl_read32(trans, CSR_INT_MASK);
833 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
834 inta, inta_mask);
835 }
836#endif
837
838
839 trans_pcie->inta = 0;
840
841 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
842
843
844 if (inta & CSR_INT_BIT_HW_ERR) {
845 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
846
847
848 iwl_disable_interrupts(trans);
849
850 isr_stats->hw++;
851 iwl_pcie_irq_handle_error(trans);
852
853 handled |= CSR_INT_BIT_HW_ERR;
854
855 goto out;
856 }
857
858#ifdef CONFIG_IWLWIFI_DEBUG
859 if (iwl_have_debug_level(IWL_DL_ISR)) {
860
861 if (inta & CSR_INT_BIT_SCD) {
862 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
863 "the frame/frames.\n");
864 isr_stats->sch++;
865 }
866
867
868 if (inta & CSR_INT_BIT_ALIVE) {
869 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
870 isr_stats->alive++;
871 }
872 }
873#endif
874
875 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
876
877
878 if (inta & CSR_INT_BIT_RF_KILL) {
879 bool hw_rfkill;
880
881 hw_rfkill = iwl_is_rfkill_set(trans);
882 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
883 hw_rfkill ? "disable radio" : "enable radio");
884
885 isr_stats->rfkill++;
886
887 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
888 if (hw_rfkill) {
889 set_bit(STATUS_RFKILL, &trans_pcie->status);
890 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
891 &trans_pcie->status))
892 IWL_DEBUG_RF_KILL(trans,
893 "Rfkill while SYNC HCMD in flight\n");
894 wake_up(&trans_pcie->wait_command_queue);
895 } else {
896 clear_bit(STATUS_RFKILL, &trans_pcie->status);
897 }
898
899 handled |= CSR_INT_BIT_RF_KILL;
900 }
901
902
903 if (inta & CSR_INT_BIT_CT_KILL) {
904 IWL_ERR(trans, "Microcode CT kill error detected.\n");
905 isr_stats->ctkill++;
906 handled |= CSR_INT_BIT_CT_KILL;
907 }
908
909
910 if (inta & CSR_INT_BIT_SW_ERR) {
911 IWL_ERR(trans, "Microcode SW error detected. "
912 " Restarting 0x%X.\n", inta);
913 isr_stats->sw++;
914 iwl_pcie_irq_handle_error(trans);
915 handled |= CSR_INT_BIT_SW_ERR;
916 }
917
918
919 if (inta & CSR_INT_BIT_WAKEUP) {
920 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
921 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
922 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
923 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
924
925 isr_stats->wakeup++;
926
927 handled |= CSR_INT_BIT_WAKEUP;
928 }
929
930
931
932
933 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
934 CSR_INT_BIT_RX_PERIODIC)) {
935 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
936 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
937 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
938 iwl_write32(trans, CSR_FH_INT_STATUS,
939 CSR_FH_INT_RX_MASK);
940 }
941 if (inta & CSR_INT_BIT_RX_PERIODIC) {
942 handled |= CSR_INT_BIT_RX_PERIODIC;
943 iwl_write32(trans,
944 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
945 }
946
947
948
949
950
951
952
953
954
955
956
957
958 iwl_write8(trans, CSR_INT_PERIODIC_REG,
959 CSR_INT_PERIODIC_DIS);
960
961 iwl_pcie_rx_handle(trans);
962
963
964
965
966
967
968
969
970 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
971 iwl_write8(trans, CSR_INT_PERIODIC_REG,
972 CSR_INT_PERIODIC_ENA);
973
974 isr_stats->rx++;
975 }
976
977
978 if (inta & CSR_INT_BIT_FH_TX) {
979 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
980 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
981 isr_stats->tx++;
982 handled |= CSR_INT_BIT_FH_TX;
983
984 trans_pcie->ucode_write_complete = true;
985 wake_up(&trans_pcie->ucode_write_waitq);
986 }
987
988 if (inta & ~handled) {
989 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
990 isr_stats->unhandled++;
991 }
992
993 if (inta & ~(trans_pcie->inta_mask)) {
994 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
995 inta & ~trans_pcie->inta_mask);
996 }
997
998
999
1000 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1001 iwl_enable_interrupts(trans);
1002
1003 else if (handled & CSR_INT_BIT_RF_KILL)
1004 iwl_enable_rfkill_int(trans);
1005
1006out:
1007 lock_map_release(&trans->sync_cmd_lockdep_map);
1008 return IRQ_HANDLED;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018#define ICT_SHIFT 12
1019#define ICT_SIZE (1 << ICT_SHIFT)
1020#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1021
1022
1023void iwl_pcie_free_ict(struct iwl_trans *trans)
1024{
1025 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1026
1027 if (trans_pcie->ict_tbl) {
1028 dma_free_coherent(trans->dev, ICT_SIZE,
1029 trans_pcie->ict_tbl,
1030 trans_pcie->ict_tbl_dma);
1031 trans_pcie->ict_tbl = NULL;
1032 trans_pcie->ict_tbl_dma = 0;
1033 }
1034}
1035
1036
1037
1038
1039
1040
1041int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1042{
1043 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1044
1045 trans_pcie->ict_tbl =
1046 dma_alloc_coherent(trans->dev, ICT_SIZE,
1047 &trans_pcie->ict_tbl_dma,
1048 GFP_KERNEL);
1049 if (!trans_pcie->ict_tbl)
1050 return -ENOMEM;
1051
1052
1053 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1054 iwl_pcie_free_ict(trans);
1055 return -EINVAL;
1056 }
1057
1058 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
1059 (unsigned long long)trans_pcie->ict_tbl_dma);
1060
1061 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1062
1063
1064 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1065 trans_pcie->ict_index = 0;
1066
1067
1068 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1069 return 0;
1070}
1071
1072
1073
1074
1075void iwl_pcie_reset_ict(struct iwl_trans *trans)
1076{
1077 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1078 u32 val;
1079 unsigned long flags;
1080
1081 if (!trans_pcie->ict_tbl)
1082 return;
1083
1084 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1085 iwl_disable_interrupts(trans);
1086
1087 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1088
1089 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1090
1091 val |= CSR_DRAM_INT_TBL_ENABLE;
1092 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1093
1094 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1095
1096 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1097 trans_pcie->use_ict = true;
1098 trans_pcie->ict_index = 0;
1099 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1100 iwl_enable_interrupts(trans);
1101 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1102}
1103
1104
1105void iwl_pcie_disable_ict(struct iwl_trans *trans)
1106{
1107 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1108 unsigned long flags;
1109
1110 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1111 trans_pcie->use_ict = false;
1112 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1113}
1114
1115
1116static irqreturn_t iwl_pcie_isr(int irq, void *data)
1117{
1118 struct iwl_trans *trans = data;
1119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120 u32 inta, inta_mask;
1121#ifdef CONFIG_IWLWIFI_DEBUG
1122 u32 inta_fh;
1123#endif
1124
1125 lockdep_assert_held(&trans_pcie->irq_lock);
1126
1127 trace_iwlwifi_dev_irq(trans->dev);
1128
1129
1130
1131
1132
1133 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1134 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1135
1136
1137 inta = iwl_read32(trans, CSR_INT);
1138
1139 if (inta & (~inta_mask)) {
1140 IWL_DEBUG_ISR(trans,
1141 "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1142 inta & (~inta_mask));
1143 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1144 inta &= inta_mask;
1145 }
1146
1147
1148
1149
1150 if (!inta) {
1151 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1152 goto none;
1153 }
1154
1155 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1156
1157
1158 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1159 return IRQ_HANDLED;
1160 }
1161
1162#ifdef CONFIG_IWLWIFI_DEBUG
1163 if (iwl_have_debug_level(IWL_DL_ISR)) {
1164 inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
1165 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
1166 "fh 0x%08x\n", inta, inta_mask, inta_fh);
1167 }
1168#endif
1169
1170 trans_pcie->inta |= inta;
1171
1172 if (likely(inta))
1173 return IRQ_WAKE_THREAD;
1174 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1175 !trans_pcie->inta)
1176 iwl_enable_interrupts(trans);
1177 return IRQ_HANDLED;
1178
1179none:
1180
1181
1182 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1183 !trans_pcie->inta)
1184 iwl_enable_interrupts(trans);
1185
1186 return IRQ_NONE;
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1198{
1199 struct iwl_trans *trans = data;
1200 struct iwl_trans_pcie *trans_pcie;
1201 u32 inta, inta_mask;
1202 u32 val = 0;
1203 u32 read;
1204 unsigned long flags;
1205
1206 if (!trans)
1207 return IRQ_NONE;
1208
1209 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1210
1211 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1212
1213
1214
1215
1216 if (unlikely(!trans_pcie->use_ict)) {
1217 irqreturn_t ret = iwl_pcie_isr(irq, data);
1218 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1219 return ret;
1220 }
1221
1222 trace_iwlwifi_dev_irq(trans->dev);
1223
1224
1225
1226
1227
1228
1229 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1230 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1231
1232
1233
1234
1235 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1236 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1237 if (!read) {
1238 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1239 goto none;
1240 }
1241
1242
1243
1244
1245
1246 do {
1247 val |= read;
1248 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1249 trans_pcie->ict_index, read);
1250 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1251 trans_pcie->ict_index =
1252 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1253
1254 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1255 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1256 read);
1257 } while (read);
1258
1259
1260 if (val == 0xffffffff)
1261 val = 0;
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (val & 0xC0000)
1271 val |= 0x8000;
1272
1273 inta = (0xff & val) | ((0xff00 & val) << 16);
1274 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1275 inta, inta_mask, val);
1276
1277 inta &= trans_pcie->inta_mask;
1278 trans_pcie->inta |= inta;
1279
1280
1281 if (likely(inta)) {
1282 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1283 return IRQ_WAKE_THREAD;
1284 } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1285 !trans_pcie->inta) {
1286
1287
1288
1289
1290 iwl_enable_interrupts(trans);
1291 }
1292
1293 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1294 return IRQ_HANDLED;
1295
1296 none:
1297
1298
1299
1300 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1301 !trans_pcie->inta)
1302 iwl_enable_interrupts(trans);
1303
1304 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1305 return IRQ_NONE;
1306}
1307