1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/gfp.h>
32
33#include "iwl-prph.h"
34#include "iwl-io.h"
35#include "internal.h"
36#include "iwl-op-mode.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113static int iwl_rxq_space(const struct iwl_rxq *rxq)
114{
115 int s = rxq->read - rxq->write;
116
117 if (s <= 0)
118 s += RX_QUEUE_SIZE;
119
120 s -= 2;
121 if (s < 0)
122 s = 0;
123 return s;
124}
125
126
127
128
129static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
130{
131 return cpu_to_le32((u32)(dma_addr >> 8));
132}
133
134
135
136
137int iwl_pcie_rx_stop(struct iwl_trans *trans)
138{
139 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
140 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
141 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
142}
143
144
145
146
147static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
148 struct iwl_rxq *rxq)
149{
150 unsigned long flags;
151 u32 reg;
152
153 spin_lock_irqsave(&rxq->lock, flags);
154
155 if (rxq->need_update == 0)
156 goto exit_unlock;
157
158 if (trans->cfg->base_params->shadow_reg_enable) {
159
160
161 rxq->write_actual = (rxq->write & ~0x7);
162 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
163 } else {
164 struct iwl_trans_pcie *trans_pcie =
165 IWL_TRANS_GET_PCIE_TRANS(trans);
166
167
168 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
169 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
170
171 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
172 IWL_DEBUG_INFO(trans,
173 "Rx queue requesting wakeup,"
174 " GP1 = 0x%x\n", reg);
175 iwl_set_bit(trans, CSR_GP_CNTRL,
176 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
177 goto exit_unlock;
178 }
179
180 rxq->write_actual = (rxq->write & ~0x7);
181 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
182 rxq->write_actual);
183
184
185 } else {
186
187 rxq->write_actual = (rxq->write & ~0x7);
188 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
189 rxq->write_actual);
190 }
191 }
192 rxq->need_update = 0;
193
194 exit_unlock:
195 spin_unlock_irqrestore(&rxq->lock, flags);
196}
197
198
199
200
201
202
203
204
205
206
207
208
209static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
210{
211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
212 struct iwl_rxq *rxq = &trans_pcie->rxq;
213 struct iwl_rx_mem_buffer *rxb;
214 unsigned long flags;
215
216
217
218
219
220
221
222
223
224 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
225 return;
226
227 spin_lock_irqsave(&rxq->lock, flags);
228 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
229
230 rxb = rxq->queue[rxq->write];
231 BUG_ON(rxb && rxb->page);
232
233
234 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
235 list);
236 list_del(&rxb->list);
237
238
239 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
240 rxq->queue[rxq->write] = rxb;
241 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
242 rxq->free_count--;
243 }
244 spin_unlock_irqrestore(&rxq->lock, flags);
245
246
247 if (rxq->free_count <= RX_LOW_WATERMARK)
248 schedule_work(&trans_pcie->rx_replenish);
249
250
251
252 if (rxq->write_actual != (rxq->write & ~0x7)) {
253 spin_lock_irqsave(&rxq->lock, flags);
254 rxq->need_update = 1;
255 spin_unlock_irqrestore(&rxq->lock, flags);
256 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
257 }
258}
259
260
261
262
263
264
265
266
267
268
269static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
270{
271 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
272 struct iwl_rxq *rxq = &trans_pcie->rxq;
273 struct iwl_rx_mem_buffer *rxb;
274 struct page *page;
275 unsigned long flags;
276 gfp_t gfp_mask = priority;
277
278 while (1) {
279 spin_lock_irqsave(&rxq->lock, flags);
280 if (list_empty(&rxq->rx_used)) {
281 spin_unlock_irqrestore(&rxq->lock, flags);
282 return;
283 }
284 spin_unlock_irqrestore(&rxq->lock, flags);
285
286 if (rxq->free_count > RX_LOW_WATERMARK)
287 gfp_mask |= __GFP_NOWARN;
288
289 if (trans_pcie->rx_page_order > 0)
290 gfp_mask |= __GFP_COMP;
291
292
293 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
294 if (!page) {
295 if (net_ratelimit())
296 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
297 "order: %d\n",
298 trans_pcie->rx_page_order);
299
300 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
301 net_ratelimit())
302 IWL_CRIT(trans, "Failed to alloc_pages with %s."
303 "Only %u free buffers remaining.\n",
304 priority == GFP_ATOMIC ?
305 "GFP_ATOMIC" : "GFP_KERNEL",
306 rxq->free_count);
307
308
309
310 return;
311 }
312
313 spin_lock_irqsave(&rxq->lock, flags);
314
315 if (list_empty(&rxq->rx_used)) {
316 spin_unlock_irqrestore(&rxq->lock, flags);
317 __free_pages(page, trans_pcie->rx_page_order);
318 return;
319 }
320 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
321 list);
322 list_del(&rxb->list);
323 spin_unlock_irqrestore(&rxq->lock, flags);
324
325 BUG_ON(rxb->page);
326 rxb->page = page;
327
328 rxb->page_dma =
329 dma_map_page(trans->dev, page, 0,
330 PAGE_SIZE << trans_pcie->rx_page_order,
331 DMA_FROM_DEVICE);
332 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
333 rxb->page = NULL;
334 spin_lock_irqsave(&rxq->lock, flags);
335 list_add(&rxb->list, &rxq->rx_used);
336 spin_unlock_irqrestore(&rxq->lock, flags);
337 __free_pages(page, trans_pcie->rx_page_order);
338 return;
339 }
340
341 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
342
343 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
344
345 spin_lock_irqsave(&rxq->lock, flags);
346
347 list_add_tail(&rxb->list, &rxq->rx_free);
348 rxq->free_count++;
349
350 spin_unlock_irqrestore(&rxq->lock, flags);
351 }
352}
353
354static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
355{
356 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
357 struct iwl_rxq *rxq = &trans_pcie->rxq;
358 int i;
359
360 lockdep_assert_held(&rxq->lock);
361
362 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
363 if (!rxq->pool[i].page)
364 continue;
365 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
366 PAGE_SIZE << trans_pcie->rx_page_order,
367 DMA_FROM_DEVICE);
368 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
369 rxq->pool[i].page = NULL;
370 }
371}
372
373
374
375
376
377
378
379
380
381static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
382{
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
384 unsigned long flags;
385
386 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
387
388 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
389 iwl_pcie_rxq_restock(trans);
390 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
391}
392
393static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
394{
395 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
396
397 iwl_pcie_rxq_restock(trans);
398}
399
400static void iwl_pcie_rx_replenish_work(struct work_struct *data)
401{
402 struct iwl_trans_pcie *trans_pcie =
403 container_of(data, struct iwl_trans_pcie, rx_replenish);
404
405 iwl_pcie_rx_replenish(trans_pcie->trans);
406}
407
408static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
409{
410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
411 struct iwl_rxq *rxq = &trans_pcie->rxq;
412 struct device *dev = trans->dev;
413
414 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
415
416 spin_lock_init(&rxq->lock);
417
418 if (WARN_ON(rxq->bd || rxq->rb_stts))
419 return -EINVAL;
420
421
422 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
423 &rxq->bd_dma, GFP_KERNEL);
424 if (!rxq->bd)
425 goto err_bd;
426
427
428 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
429 &rxq->rb_stts_dma, GFP_KERNEL);
430 if (!rxq->rb_stts)
431 goto err_rb_stts;
432
433 return 0;
434
435err_rb_stts:
436 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
437 rxq->bd, rxq->bd_dma);
438 rxq->bd_dma = 0;
439 rxq->bd = NULL;
440err_bd:
441 return -ENOMEM;
442}
443
444static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
445{
446 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
447 u32 rb_size;
448 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
449
450 if (trans_pcie->rx_buf_size_8k)
451 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
452 else
453 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
454
455
456 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
457
458 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
459 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
460 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
461
462
463 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
464
465
466 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
467 (u32)(rxq->bd_dma >> 8));
468
469
470 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
471 rxq->rb_stts_dma >> 4);
472
473
474
475
476
477
478
479
480
481 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
482 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
483 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
484 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
485 rb_size|
486 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
487 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
488
489
490 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
491}
492
493static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
494{
495 int i;
496
497 lockdep_assert_held(&rxq->lock);
498
499 INIT_LIST_HEAD(&rxq->rx_free);
500 INIT_LIST_HEAD(&rxq->rx_used);
501 rxq->free_count = 0;
502
503 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
504 list_add(&rxq->pool[i].list, &rxq->rx_used);
505}
506
507int iwl_pcie_rx_init(struct iwl_trans *trans)
508{
509 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
510 struct iwl_rxq *rxq = &trans_pcie->rxq;
511 int i, err;
512 unsigned long flags;
513
514 if (!rxq->bd) {
515 err = iwl_pcie_rx_alloc(trans);
516 if (err)
517 return err;
518 }
519
520 spin_lock_irqsave(&rxq->lock, flags);
521
522 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
523
524
525 iwl_pcie_rxq_free_rbs(trans);
526 iwl_pcie_rx_init_rxb_lists(rxq);
527
528 for (i = 0; i < RX_QUEUE_SIZE; i++)
529 rxq->queue[i] = NULL;
530
531
532
533 rxq->read = rxq->write = 0;
534 rxq->write_actual = 0;
535 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
536 spin_unlock_irqrestore(&rxq->lock, flags);
537
538 iwl_pcie_rx_replenish(trans);
539
540 iwl_pcie_rx_hw_init(trans, rxq);
541
542 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
543 rxq->need_update = 1;
544 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
545 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
546
547 return 0;
548}
549
550void iwl_pcie_rx_free(struct iwl_trans *trans)
551{
552 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
553 struct iwl_rxq *rxq = &trans_pcie->rxq;
554 unsigned long flags;
555
556
557
558 if (!rxq->bd) {
559 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
560 return;
561 }
562
563 cancel_work_sync(&trans_pcie->rx_replenish);
564
565 spin_lock_irqsave(&rxq->lock, flags);
566 iwl_pcie_rxq_free_rbs(trans);
567 spin_unlock_irqrestore(&rxq->lock, flags);
568
569 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
570 rxq->bd, rxq->bd_dma);
571 rxq->bd_dma = 0;
572 rxq->bd = NULL;
573
574 if (rxq->rb_stts)
575 dma_free_coherent(trans->dev,
576 sizeof(struct iwl_rb_status),
577 rxq->rb_stts, rxq->rb_stts_dma);
578 else
579 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
580 rxq->rb_stts_dma = 0;
581 rxq->rb_stts = NULL;
582}
583
584static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
585 struct iwl_rx_mem_buffer *rxb)
586{
587 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
588 struct iwl_rxq *rxq = &trans_pcie->rxq;
589 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
590 unsigned long flags;
591 bool page_stolen = false;
592 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
593 u32 offset = 0;
594
595 if (WARN_ON(!rxb))
596 return;
597
598 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
599
600 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
601 struct iwl_rx_packet *pkt;
602 struct iwl_device_cmd *cmd;
603 u16 sequence;
604 bool reclaim;
605 int index, cmd_index, err, len;
606 struct iwl_rx_cmd_buffer rxcb = {
607 ._offset = offset,
608 ._rx_page_order = trans_pcie->rx_page_order,
609 ._page = rxb->page,
610 ._page_stolen = false,
611 .truesize = max_len,
612 };
613
614 pkt = rxb_addr(&rxcb);
615
616 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
617 break;
618
619 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
620 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
621 pkt->hdr.cmd);
622
623 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
624 len += sizeof(u32);
625 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
626 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
627
628
629
630
631
632
633
634 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
635 if (reclaim) {
636 int i;
637
638 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
639 if (trans_pcie->no_reclaim_cmds[i] ==
640 pkt->hdr.cmd) {
641 reclaim = false;
642 break;
643 }
644 }
645 }
646
647 sequence = le16_to_cpu(pkt->hdr.sequence);
648 index = SEQ_TO_INDEX(sequence);
649 cmd_index = get_cmd_index(&txq->q, index);
650
651 if (reclaim)
652 cmd = txq->entries[cmd_index].cmd;
653 else
654 cmd = NULL;
655
656 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
657
658 if (reclaim) {
659 kfree(txq->entries[cmd_index].free_buf);
660 txq->entries[cmd_index].free_buf = NULL;
661 }
662
663
664
665
666
667
668 if (reclaim) {
669
670
671
672
673 if (!rxcb._page_stolen)
674 iwl_pcie_hcmd_complete(trans, &rxcb, err);
675 else
676 IWL_WARN(trans, "Claim null rxb?\n");
677 }
678
679 page_stolen |= rxcb._page_stolen;
680 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
681 }
682
683
684 if (page_stolen) {
685 __free_pages(rxb->page, trans_pcie->rx_page_order);
686 rxb->page = NULL;
687 }
688
689
690
691
692 spin_lock_irqsave(&rxq->lock, flags);
693 if (rxb->page != NULL) {
694 rxb->page_dma =
695 dma_map_page(trans->dev, rxb->page, 0,
696 PAGE_SIZE << trans_pcie->rx_page_order,
697 DMA_FROM_DEVICE);
698 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
699
700
701
702
703
704 __free_pages(rxb->page, trans_pcie->rx_page_order);
705 rxb->page = NULL;
706 list_add_tail(&rxb->list, &rxq->rx_used);
707 } else {
708 list_add_tail(&rxb->list, &rxq->rx_free);
709 rxq->free_count++;
710 }
711 } else
712 list_add_tail(&rxb->list, &rxq->rx_used);
713 spin_unlock_irqrestore(&rxq->lock, flags);
714}
715
716
717
718
719static void iwl_pcie_rx_handle(struct iwl_trans *trans)
720{
721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
722 struct iwl_rxq *rxq = &trans_pcie->rxq;
723 u32 r, i;
724 u8 fill_rx = 0;
725 u32 count = 8;
726 int total_empty;
727
728
729
730 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
731 i = rxq->read;
732
733
734 if (i == r)
735 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
736
737
738 total_empty = r - rxq->write_actual;
739 if (total_empty < 0)
740 total_empty += RX_QUEUE_SIZE;
741
742 if (total_empty > (RX_QUEUE_SIZE / 2))
743 fill_rx = 1;
744
745 while (i != r) {
746 struct iwl_rx_mem_buffer *rxb;
747
748 rxb = rxq->queue[i];
749 rxq->queue[i] = NULL;
750
751 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
752 r, i, rxb);
753 iwl_pcie_rx_handle_rb(trans, rxb);
754
755 i = (i + 1) & RX_QUEUE_MASK;
756
757
758 if (fill_rx) {
759 count++;
760 if (count >= 8) {
761 rxq->read = i;
762 iwl_pcie_rx_replenish_now(trans);
763 count = 0;
764 }
765 }
766 }
767
768
769 rxq->read = i;
770 if (fill_rx)
771 iwl_pcie_rx_replenish_now(trans);
772 else
773 iwl_pcie_rxq_restock(trans);
774}
775
776
777
778
779static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
780{
781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
782
783
784 if (trans->cfg->internal_wimax_coex &&
785 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
786 APMS_CLK_VAL_MRB_FUNC_MODE) ||
787 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
788 APMG_PS_CTRL_VAL_RESET_REQ))) {
789 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
790 iwl_op_mode_wimax_active(trans->op_mode);
791 wake_up(&trans_pcie->wait_command_queue);
792 return;
793 }
794
795 iwl_pcie_dump_csr(trans);
796 iwl_pcie_dump_fh(trans, NULL);
797
798 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
799 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
800 wake_up(&trans_pcie->wait_command_queue);
801
802 local_bh_disable();
803 iwl_op_mode_nic_error(trans->op_mode);
804 local_bh_enable();
805}
806
807irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
808{
809 struct iwl_trans *trans = dev_id;
810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
811 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
812 u32 inta = 0;
813 u32 handled = 0;
814 unsigned long flags;
815 u32 i;
816
817 lock_map_acquire(&trans->sync_cmd_lockdep_map);
818
819 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
820
821
822
823
824
825
826
827
828
829
830
831
832 iwl_write32(trans, CSR_INT,
833 trans_pcie->inta | ~trans_pcie->inta_mask);
834
835 inta = trans_pcie->inta;
836
837 if (iwl_have_debug_level(IWL_DL_ISR))
838 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
839 inta, iwl_read32(trans, CSR_INT_MASK));
840
841
842 trans_pcie->inta = 0;
843
844 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
845
846
847 if (inta & CSR_INT_BIT_HW_ERR) {
848 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
849
850
851 iwl_disable_interrupts(trans);
852
853 isr_stats->hw++;
854 iwl_pcie_irq_handle_error(trans);
855
856 handled |= CSR_INT_BIT_HW_ERR;
857
858 goto out;
859 }
860
861 if (iwl_have_debug_level(IWL_DL_ISR)) {
862
863 if (inta & CSR_INT_BIT_SCD) {
864 IWL_DEBUG_ISR(trans,
865 "Scheduler finished to transmit the frame/frames.\n");
866 isr_stats->sch++;
867 }
868
869
870 if (inta & CSR_INT_BIT_ALIVE) {
871 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
872 isr_stats->alive++;
873 }
874 }
875
876
877 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
878
879
880 if (inta & CSR_INT_BIT_RF_KILL) {
881 bool hw_rfkill;
882
883 hw_rfkill = iwl_is_rfkill_set(trans);
884 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
885 hw_rfkill ? "disable radio" : "enable radio");
886
887 isr_stats->rfkill++;
888
889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
890 if (hw_rfkill) {
891 set_bit(STATUS_RFKILL, &trans_pcie->status);
892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
893 &trans_pcie->status))
894 IWL_DEBUG_RF_KILL(trans,
895 "Rfkill while SYNC HCMD in flight\n");
896 wake_up(&trans_pcie->wait_command_queue);
897 } else {
898 clear_bit(STATUS_RFKILL, &trans_pcie->status);
899 }
900
901 handled |= CSR_INT_BIT_RF_KILL;
902 }
903
904
905 if (inta & CSR_INT_BIT_CT_KILL) {
906 IWL_ERR(trans, "Microcode CT kill error detected.\n");
907 isr_stats->ctkill++;
908 handled |= CSR_INT_BIT_CT_KILL;
909 }
910
911
912 if (inta & CSR_INT_BIT_SW_ERR) {
913 IWL_ERR(trans, "Microcode SW error detected. "
914 " Restarting 0x%X.\n", inta);
915 isr_stats->sw++;
916 iwl_pcie_irq_handle_error(trans);
917 handled |= CSR_INT_BIT_SW_ERR;
918 }
919
920
921 if (inta & CSR_INT_BIT_WAKEUP) {
922 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
923 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
924 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
925 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
926
927 isr_stats->wakeup++;
928
929 handled |= CSR_INT_BIT_WAKEUP;
930 }
931
932
933
934
935 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
936 CSR_INT_BIT_RX_PERIODIC)) {
937 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
938 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
939 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
940 iwl_write32(trans, CSR_FH_INT_STATUS,
941 CSR_FH_INT_RX_MASK);
942 }
943 if (inta & CSR_INT_BIT_RX_PERIODIC) {
944 handled |= CSR_INT_BIT_RX_PERIODIC;
945 iwl_write32(trans,
946 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
947 }
948
949
950
951
952
953
954
955
956
957
958
959
960 iwl_write8(trans, CSR_INT_PERIODIC_REG,
961 CSR_INT_PERIODIC_DIS);
962
963 iwl_pcie_rx_handle(trans);
964
965
966
967
968
969
970
971
972 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
973 iwl_write8(trans, CSR_INT_PERIODIC_REG,
974 CSR_INT_PERIODIC_ENA);
975
976 isr_stats->rx++;
977 }
978
979
980 if (inta & CSR_INT_BIT_FH_TX) {
981 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
982 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
983 isr_stats->tx++;
984 handled |= CSR_INT_BIT_FH_TX;
985
986 trans_pcie->ucode_write_complete = true;
987 wake_up(&trans_pcie->ucode_write_waitq);
988 }
989
990 if (inta & ~handled) {
991 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
992 isr_stats->unhandled++;
993 }
994
995 if (inta & ~(trans_pcie->inta_mask)) {
996 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
997 inta & ~trans_pcie->inta_mask);
998 }
999
1000
1001
1002 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1003 iwl_enable_interrupts(trans);
1004
1005 else if (handled & CSR_INT_BIT_RF_KILL)
1006 iwl_enable_rfkill_int(trans);
1007
1008out:
1009 lock_map_release(&trans->sync_cmd_lockdep_map);
1010 return IRQ_HANDLED;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020#define ICT_SHIFT 12
1021#define ICT_SIZE (1 << ICT_SHIFT)
1022#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1023
1024
1025void iwl_pcie_free_ict(struct iwl_trans *trans)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028
1029 if (trans_pcie->ict_tbl) {
1030 dma_free_coherent(trans->dev, ICT_SIZE,
1031 trans_pcie->ict_tbl,
1032 trans_pcie->ict_tbl_dma);
1033 trans_pcie->ict_tbl = NULL;
1034 trans_pcie->ict_tbl_dma = 0;
1035 }
1036}
1037
1038
1039
1040
1041
1042
1043int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1044{
1045 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1046
1047 trans_pcie->ict_tbl =
1048 dma_alloc_coherent(trans->dev, ICT_SIZE,
1049 &trans_pcie->ict_tbl_dma,
1050 GFP_KERNEL);
1051 if (!trans_pcie->ict_tbl)
1052 return -ENOMEM;
1053
1054
1055 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1056 iwl_pcie_free_ict(trans);
1057 return -EINVAL;
1058 }
1059
1060 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
1061 (unsigned long long)trans_pcie->ict_tbl_dma);
1062
1063 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1064
1065
1066 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1067 trans_pcie->ict_index = 0;
1068
1069
1070 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1071 return 0;
1072}
1073
1074
1075
1076
1077void iwl_pcie_reset_ict(struct iwl_trans *trans)
1078{
1079 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1080 u32 val;
1081 unsigned long flags;
1082
1083 if (!trans_pcie->ict_tbl)
1084 return;
1085
1086 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1087 iwl_disable_interrupts(trans);
1088
1089 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1090
1091 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1092
1093 val |= CSR_DRAM_INT_TBL_ENABLE;
1094 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1095
1096 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1097
1098 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1099 trans_pcie->use_ict = true;
1100 trans_pcie->ict_index = 0;
1101 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1102 iwl_enable_interrupts(trans);
1103 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1104}
1105
1106
1107void iwl_pcie_disable_ict(struct iwl_trans *trans)
1108{
1109 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1110 unsigned long flags;
1111
1112 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1113 trans_pcie->use_ict = false;
1114 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1115}
1116
1117
1118static irqreturn_t iwl_pcie_isr(int irq, void *data)
1119{
1120 struct iwl_trans *trans = data;
1121 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1122 u32 inta, inta_mask;
1123
1124 lockdep_assert_held(&trans_pcie->irq_lock);
1125
1126 trace_iwlwifi_dev_irq(trans->dev);
1127
1128
1129
1130
1131
1132 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1133 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1134
1135
1136 inta = iwl_read32(trans, CSR_INT);
1137
1138 if (inta & (~inta_mask)) {
1139 IWL_DEBUG_ISR(trans,
1140 "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1141 inta & (~inta_mask));
1142 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1143 inta &= inta_mask;
1144 }
1145
1146
1147
1148
1149 if (!inta) {
1150 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1151 goto none;
1152 }
1153
1154 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1155
1156
1157 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1158 return IRQ_HANDLED;
1159 }
1160
1161 if (iwl_have_debug_level(IWL_DL_ISR))
1162 IWL_DEBUG_ISR(trans,
1163 "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1164 inta, inta_mask,
1165 iwl_read32(trans, CSR_FH_INT_STATUS));
1166
1167 trans_pcie->inta |= inta;
1168
1169 if (likely(inta))
1170 return IRQ_WAKE_THREAD;
1171 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1172 !trans_pcie->inta)
1173 iwl_enable_interrupts(trans);
1174 return IRQ_HANDLED;
1175
1176none:
1177
1178
1179 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1180 !trans_pcie->inta)
1181 iwl_enable_interrupts(trans);
1182
1183 return IRQ_NONE;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1195{
1196 struct iwl_trans *trans = data;
1197 struct iwl_trans_pcie *trans_pcie;
1198 u32 inta;
1199 u32 val = 0;
1200 u32 read;
1201 unsigned long flags;
1202
1203 if (!trans)
1204 return IRQ_NONE;
1205
1206 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207
1208 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1209
1210
1211
1212
1213 if (unlikely(!trans_pcie->use_ict)) {
1214 irqreturn_t ret = iwl_pcie_isr(irq, data);
1215 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1216 return ret;
1217 }
1218
1219 trace_iwlwifi_dev_irq(trans->dev);
1220
1221
1222
1223
1224
1225
1226 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1227
1228
1229
1230
1231 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1232 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1233 if (!read) {
1234 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1235 goto none;
1236 }
1237
1238
1239
1240
1241
1242 do {
1243 val |= read;
1244 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1245 trans_pcie->ict_index, read);
1246 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1247 trans_pcie->ict_index =
1248 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1249
1250 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1251 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1252 read);
1253 } while (read);
1254
1255
1256 if (val == 0xffffffff)
1257 val = 0;
1258
1259
1260
1261
1262
1263
1264
1265
1266 if (val & 0xC0000)
1267 val |= 0x8000;
1268
1269 inta = (0xff & val) | ((0xff00 & val) << 16);
1270 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
1271 inta, trans_pcie->inta_mask, val);
1272 if (iwl_have_debug_level(IWL_DL_ISR))
1273 IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
1274 iwl_read32(trans, CSR_INT_MASK));
1275
1276 inta &= trans_pcie->inta_mask;
1277 trans_pcie->inta |= inta;
1278
1279
1280 if (likely(inta)) {
1281 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1282 return IRQ_WAKE_THREAD;
1283 } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1284 !trans_pcie->inta) {
1285
1286
1287
1288
1289 iwl_enable_interrupts(trans);
1290 }
1291
1292 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1293 return IRQ_HANDLED;
1294
1295 none:
1296
1297
1298
1299 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1300 !trans_pcie->inta)
1301 iwl_enable_interrupts(trans);
1302
1303 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1304 return IRQ_NONE;
1305}
1306