1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <linux/sched.h>
65#include <linux/wait.h>
66#include <linux/gfp.h>
67
68#include "iwl-prph.h"
69#include "iwl-io.h"
70#include "internal.h"
71#include "iwl-op-mode.h"
72#include "iwl-context-info-gen3.h"
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176static int iwl_rxq_space(const struct iwl_rxq *rxq)
177{
178
179 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
180
181
182
183
184
185
186
187 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
188}
189
190
191
192
193static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
194{
195 return cpu_to_le32((u32)(dma_addr >> 8));
196}
197
198
199
200
201int iwl_pcie_rx_stop(struct iwl_trans *trans)
202{
203 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
204
205 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
206 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
207 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
208 } else if (trans->cfg->mq_rx_supported) {
209 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
210 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
211 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
212 } else {
213 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
214 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
215 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
216 1000);
217 }
218}
219
220
221
222
223static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
224 struct iwl_rxq *rxq)
225{
226 u32 reg;
227
228 lockdep_assert_held(&rxq->lock);
229
230
231
232
233
234
235 if (!trans->cfg->base_params->shadow_reg_enable &&
236 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
237 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
238
239 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
240 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
241 reg);
242 iwl_set_bit(trans, CSR_GP_CNTRL,
243 BIT(trans->cfg->csr->flag_mac_access_req));
244 rxq->need_update = true;
245 return;
246 }
247 }
248
249 rxq->write_actual = round_down(rxq->write, 8);
250 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
251 iwl_write32(trans, HBUS_TARG_WRPTR,
252 (rxq->write_actual |
253 ((FIRST_RX_QUEUE + rxq->id) << 16)));
254 else if (trans->cfg->mq_rx_supported)
255 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
256 rxq->write_actual);
257 else
258 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
259}
260
261static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
262{
263 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
264 int i;
265
266 for (i = 0; i < trans->num_rx_queues; i++) {
267 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
268
269 if (!rxq->need_update)
270 continue;
271 spin_lock(&rxq->lock);
272 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
273 rxq->need_update = false;
274 spin_unlock(&rxq->lock);
275 }
276}
277
278static void iwl_pcie_restock_bd(struct iwl_trans *trans,
279 struct iwl_rxq *rxq,
280 struct iwl_rx_mem_buffer *rxb)
281{
282 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
283 struct iwl_rx_transfer_desc *bd = rxq->bd;
284
285 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
286
287 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
288 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
289 } else {
290 __le64 *bd = rxq->bd;
291
292 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
293 }
294
295 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
296 (u32)rxb->vid, rxq->id, rxq->write);
297}
298
299
300
301
302static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
303 struct iwl_rxq *rxq)
304{
305 struct iwl_rx_mem_buffer *rxb;
306
307
308
309
310
311
312
313
314
315 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
316 return;
317
318 spin_lock(&rxq->lock);
319 while (rxq->free_count) {
320
321 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
322 list);
323 list_del(&rxb->list);
324 rxb->invalid = false;
325
326 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
327
328 iwl_pcie_restock_bd(trans, rxq, rxb);
329 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
330 rxq->free_count--;
331 }
332 spin_unlock(&rxq->lock);
333
334
335
336
337
338 if (rxq->write_actual != (rxq->write & ~0x7)) {
339 spin_lock(&rxq->lock);
340 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
341 spin_unlock(&rxq->lock);
342 }
343}
344
345
346
347
348static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
349 struct iwl_rxq *rxq)
350{
351 struct iwl_rx_mem_buffer *rxb;
352
353
354
355
356
357
358
359
360
361 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
362 return;
363
364 spin_lock(&rxq->lock);
365 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
366 __le32 *bd = (__le32 *)rxq->bd;
367
368 rxb = rxq->queue[rxq->write];
369 BUG_ON(rxb && rxb->page);
370
371
372 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
373 list);
374 list_del(&rxb->list);
375 rxb->invalid = false;
376
377
378 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
379 rxq->queue[rxq->write] = rxb;
380 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
381 rxq->free_count--;
382 }
383 spin_unlock(&rxq->lock);
384
385
386
387 if (rxq->write_actual != (rxq->write & ~0x7)) {
388 spin_lock(&rxq->lock);
389 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
390 spin_unlock(&rxq->lock);
391 }
392}
393
394
395
396
397
398
399
400
401
402
403
404
405static
406void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
407{
408 if (trans->cfg->mq_rx_supported)
409 iwl_pcie_rxmq_restock(trans, rxq);
410 else
411 iwl_pcie_rxsq_restock(trans, rxq);
412}
413
414
415
416
417
418static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
419 gfp_t priority)
420{
421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
422 struct page *page;
423 gfp_t gfp_mask = priority;
424
425 if (trans_pcie->rx_page_order > 0)
426 gfp_mask |= __GFP_COMP;
427
428
429 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
430 if (!page) {
431 if (net_ratelimit())
432 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
433 trans_pcie->rx_page_order);
434
435
436
437
438 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
439 IWL_CRIT(trans,
440 "Failed to alloc_pages\n");
441 return NULL;
442 }
443 return page;
444}
445
446
447
448
449
450
451
452
453
454
455void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
456 struct iwl_rxq *rxq)
457{
458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
459 struct iwl_rx_mem_buffer *rxb;
460 struct page *page;
461
462 while (1) {
463 spin_lock(&rxq->lock);
464 if (list_empty(&rxq->rx_used)) {
465 spin_unlock(&rxq->lock);
466 return;
467 }
468 spin_unlock(&rxq->lock);
469
470
471 page = iwl_pcie_rx_alloc_page(trans, priority);
472 if (!page)
473 return;
474
475 spin_lock(&rxq->lock);
476
477 if (list_empty(&rxq->rx_used)) {
478 spin_unlock(&rxq->lock);
479 __free_pages(page, trans_pcie->rx_page_order);
480 return;
481 }
482 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
483 list);
484 list_del(&rxb->list);
485 spin_unlock(&rxq->lock);
486
487 BUG_ON(rxb->page);
488 rxb->page = page;
489
490 rxb->page_dma =
491 dma_map_page(trans->dev, page, 0,
492 PAGE_SIZE << trans_pcie->rx_page_order,
493 DMA_FROM_DEVICE);
494 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
495 rxb->page = NULL;
496 spin_lock(&rxq->lock);
497 list_add(&rxb->list, &rxq->rx_used);
498 spin_unlock(&rxq->lock);
499 __free_pages(page, trans_pcie->rx_page_order);
500 return;
501 }
502
503 spin_lock(&rxq->lock);
504
505 list_add_tail(&rxb->list, &rxq->rx_free);
506 rxq->free_count++;
507
508 spin_unlock(&rxq->lock);
509 }
510}
511
512void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
513{
514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515 int i;
516
517 for (i = 0; i < RX_POOL_SIZE; i++) {
518 if (!trans_pcie->rx_pool[i].page)
519 continue;
520 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
521 PAGE_SIZE << trans_pcie->rx_page_order,
522 DMA_FROM_DEVICE);
523 __free_pages(trans_pcie->rx_pool[i].page,
524 trans_pcie->rx_page_order);
525 trans_pcie->rx_pool[i].page = NULL;
526 }
527}
528
529
530
531
532
533
534
535static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
536{
537 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
538 struct iwl_rb_allocator *rba = &trans_pcie->rba;
539 struct list_head local_empty;
540 int pending = atomic_read(&rba->req_pending);
541
542 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
543
544
545 spin_lock(&rba->lock);
546
547 list_replace_init(&rba->rbd_empty, &local_empty);
548 spin_unlock(&rba->lock);
549
550 while (pending) {
551 int i;
552 LIST_HEAD(local_allocated);
553 gfp_t gfp_mask = GFP_KERNEL;
554
555
556 if (pending < RX_PENDING_WATERMARK)
557 gfp_mask |= __GFP_NOWARN;
558
559 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
560 struct iwl_rx_mem_buffer *rxb;
561 struct page *page;
562
563
564
565
566
567
568 BUG_ON(list_empty(&local_empty));
569
570 rxb = list_first_entry(&local_empty,
571 struct iwl_rx_mem_buffer, list);
572 BUG_ON(rxb->page);
573
574
575 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
576 if (!page)
577 continue;
578 rxb->page = page;
579
580
581 rxb->page_dma = dma_map_page(trans->dev, page, 0,
582 PAGE_SIZE << trans_pcie->rx_page_order,
583 DMA_FROM_DEVICE);
584 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
585 rxb->page = NULL;
586 __free_pages(page, trans_pcie->rx_page_order);
587 continue;
588 }
589
590
591 list_move(&rxb->list, &local_allocated);
592 i++;
593 }
594
595 atomic_dec(&rba->req_pending);
596 pending--;
597
598 if (!pending) {
599 pending = atomic_read(&rba->req_pending);
600 if (pending)
601 IWL_DEBUG_TPT(trans,
602 "Got more pending allocation requests = %d\n",
603 pending);
604 }
605
606 spin_lock(&rba->lock);
607
608 list_splice_tail(&local_allocated, &rba->rbd_allocated);
609
610 list_splice_tail_init(&rba->rbd_empty, &local_empty);
611 spin_unlock(&rba->lock);
612
613 atomic_inc(&rba->req_ready);
614
615 }
616
617 spin_lock(&rba->lock);
618
619 list_splice_tail(&local_empty, &rba->rbd_empty);
620 spin_unlock(&rba->lock);
621
622 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
623}
624
625
626
627
628
629
630
631
632
633static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
634 struct iwl_rxq *rxq)
635{
636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
637 struct iwl_rb_allocator *rba = &trans_pcie->rba;
638 int i;
639
640 lockdep_assert_held(&rxq->lock);
641
642
643
644
645
646
647
648
649
650 if (atomic_dec_if_positive(&rba->req_ready) < 0)
651 return;
652
653 spin_lock(&rba->lock);
654 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
655
656 struct iwl_rx_mem_buffer *rxb =
657 list_first_entry(&rba->rbd_allocated,
658 struct iwl_rx_mem_buffer, list);
659
660 list_move(&rxb->list, &rxq->rx_free);
661 }
662 spin_unlock(&rba->lock);
663
664 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
665 rxq->free_count += RX_CLAIM_REQ_ALLOC;
666}
667
668void iwl_pcie_rx_allocator_work(struct work_struct *data)
669{
670 struct iwl_rb_allocator *rba_p =
671 container_of(data, struct iwl_rb_allocator, rx_alloc);
672 struct iwl_trans_pcie *trans_pcie =
673 container_of(rba_p, struct iwl_trans_pcie, rba);
674
675 iwl_pcie_rx_allocator(trans_pcie->trans);
676}
677
678static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
679{
680 struct iwl_rx_transfer_desc *rx_td;
681
682 if (use_rx_td)
683 return sizeof(*rx_td);
684 else
685 return trans->cfg->mq_rx_supported ? sizeof(__le64) :
686 sizeof(__le32);
687}
688
689static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
690 struct iwl_rxq *rxq)
691{
692 struct device *dev = trans->dev;
693 bool use_rx_td = (trans->cfg->device_family >=
694 IWL_DEVICE_FAMILY_22560);
695 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
696
697 if (rxq->bd)
698 dma_free_coherent(trans->dev,
699 free_size * rxq->queue_size,
700 rxq->bd, rxq->bd_dma);
701 rxq->bd_dma = 0;
702 rxq->bd = NULL;
703
704 rxq->rb_stts_dma = 0;
705 rxq->rb_stts = NULL;
706
707 if (rxq->used_bd)
708 dma_free_coherent(trans->dev,
709 (use_rx_td ? sizeof(*rxq->cd) :
710 sizeof(__le32)) * rxq->queue_size,
711 rxq->used_bd, rxq->used_bd_dma);
712 rxq->used_bd_dma = 0;
713 rxq->used_bd = NULL;
714
715 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
716 return;
717
718 if (rxq->tr_tail)
719 dma_free_coherent(dev, sizeof(__le16),
720 rxq->tr_tail, rxq->tr_tail_dma);
721 rxq->tr_tail_dma = 0;
722 rxq->tr_tail = NULL;
723
724 if (rxq->cr_tail)
725 dma_free_coherent(dev, sizeof(__le16),
726 rxq->cr_tail, rxq->cr_tail_dma);
727 rxq->cr_tail_dma = 0;
728 rxq->cr_tail = NULL;
729}
730
731static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
732 struct iwl_rxq *rxq)
733{
734 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
735 struct device *dev = trans->dev;
736 int i;
737 int free_size;
738 bool use_rx_td = (trans->cfg->device_family >=
739 IWL_DEVICE_FAMILY_22560);
740 size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
741 sizeof(struct iwl_rb_status);
742
743 spin_lock_init(&rxq->lock);
744 if (trans->cfg->mq_rx_supported)
745 rxq->queue_size = MQ_RX_TABLE_SIZE;
746 else
747 rxq->queue_size = RX_QUEUE_SIZE;
748
749 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
750
751
752
753
754
755 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
756 &rxq->bd_dma, GFP_KERNEL);
757 if (!rxq->bd)
758 goto err;
759
760 if (trans->cfg->mq_rx_supported) {
761 rxq->used_bd = dma_alloc_coherent(dev,
762 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
763 &rxq->used_bd_dma,
764 GFP_KERNEL);
765 if (!rxq->used_bd)
766 goto err;
767 }
768
769 rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
770 rxq->rb_stts_dma =
771 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
772
773 if (!use_rx_td)
774 return 0;
775
776
777 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
778 &rxq->tr_tail_dma, GFP_KERNEL);
779 if (!rxq->tr_tail)
780 goto err;
781
782
783 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
784 &rxq->cr_tail_dma, GFP_KERNEL);
785 if (!rxq->cr_tail)
786 goto err;
787
788
789
790
791 *rxq->cr_tail = cpu_to_le16(500);
792
793 return 0;
794
795err:
796 for (i = 0; i < trans->num_rx_queues; i++) {
797 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
798
799 iwl_pcie_free_rxq_dma(trans, rxq);
800 }
801
802 return -ENOMEM;
803}
804
805int iwl_pcie_rx_alloc(struct iwl_trans *trans)
806{
807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808 struct iwl_rb_allocator *rba = &trans_pcie->rba;
809 int i, ret;
810 size_t rb_stts_size = trans->cfg->device_family >=
811 IWL_DEVICE_FAMILY_22560 ?
812 sizeof(__le16) : sizeof(struct iwl_rb_status);
813
814 if (WARN_ON(trans_pcie->rxq))
815 return -EINVAL;
816
817 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
818 GFP_KERNEL);
819 if (!trans_pcie->rxq)
820 return -ENOMEM;
821
822 spin_lock_init(&rba->lock);
823
824
825
826
827
828 trans_pcie->base_rb_stts =
829 dma_alloc_coherent(trans->dev,
830 rb_stts_size * trans->num_rx_queues,
831 &trans_pcie->base_rb_stts_dma,
832 GFP_KERNEL);
833 if (!trans_pcie->base_rb_stts) {
834 ret = -ENOMEM;
835 goto err;
836 }
837
838 for (i = 0; i < trans->num_rx_queues; i++) {
839 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
840
841 rxq->id = i;
842 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
843 if (ret)
844 goto err;
845 }
846 return 0;
847
848err:
849 if (trans_pcie->base_rb_stts) {
850 dma_free_coherent(trans->dev,
851 rb_stts_size * trans->num_rx_queues,
852 trans_pcie->base_rb_stts,
853 trans_pcie->base_rb_stts_dma);
854 trans_pcie->base_rb_stts = NULL;
855 trans_pcie->base_rb_stts_dma = 0;
856 }
857 kfree(trans_pcie->rxq);
858
859 return ret;
860}
861
862static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
863{
864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
865 u32 rb_size;
866 unsigned long flags;
867 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
868
869 switch (trans_pcie->rx_buf_size) {
870 case IWL_AMSDU_4K:
871 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
872 break;
873 case IWL_AMSDU_8K:
874 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
875 break;
876 case IWL_AMSDU_12K:
877 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
878 break;
879 default:
880 WARN_ON(1);
881 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
882 }
883
884 if (!iwl_trans_grab_nic_access(trans, &flags))
885 return;
886
887
888 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
889
890 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
891 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
892 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
893
894
895 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
896
897
898 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
899 (u32)(rxq->bd_dma >> 8));
900
901
902 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
903 rxq->rb_stts_dma >> 4);
904
905
906
907
908
909
910
911
912
913 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
914 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
915 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
916 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
917 rb_size |
918 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
919 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
920
921 iwl_trans_release_nic_access(trans, &flags);
922
923
924 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
925
926
927 if (trans->cfg->host_interrupt_operation_mode)
928 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
929}
930
931static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
932{
933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
934 u32 rb_size, enabled = 0;
935 unsigned long flags;
936 int i;
937
938 switch (trans_pcie->rx_buf_size) {
939 case IWL_AMSDU_2K:
940 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
941 break;
942 case IWL_AMSDU_4K:
943 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
944 break;
945 case IWL_AMSDU_8K:
946 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
947 break;
948 case IWL_AMSDU_12K:
949 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
950 break;
951 default:
952 WARN_ON(1);
953 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
954 }
955
956 if (!iwl_trans_grab_nic_access(trans, &flags))
957 return;
958
959
960 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
961
962 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
963
964 for (i = 0; i < trans->num_rx_queues; i++) {
965
966 iwl_write_prph64_no_grab(trans,
967 RFH_Q_FRBDCB_BA_LSB(i),
968 trans_pcie->rxq[i].bd_dma);
969
970 iwl_write_prph64_no_grab(trans,
971 RFH_Q_URBDCB_BA_LSB(i),
972 trans_pcie->rxq[i].used_bd_dma);
973
974 iwl_write_prph64_no_grab(trans,
975 RFH_Q_URBD_STTS_WPTR_LSB(i),
976 trans_pcie->rxq[i].rb_stts_dma);
977
978 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
979 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
980 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
981
982 enabled |= BIT(i) | BIT(i + 16);
983 }
984
985
986
987
988
989
990
991
992 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
993 RFH_DMA_EN_ENABLE_VAL | rb_size |
994 RFH_RXF_DMA_MIN_RB_4_8 |
995 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
996 RFH_RXF_DMA_RBDCB_SIZE_512);
997
998
999
1000
1001
1002
1003 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
1004 RFH_GEN_CFG_RFH_DMA_SNOOP |
1005 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
1006 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1007 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
1008 trans->cfg->integrated ?
1009 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1010 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1011
1012 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
1013
1014 iwl_trans_release_nic_access(trans, &flags);
1015
1016
1017 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1018}
1019
1020void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
1021{
1022 lockdep_assert_held(&rxq->lock);
1023
1024 INIT_LIST_HEAD(&rxq->rx_free);
1025 INIT_LIST_HEAD(&rxq->rx_used);
1026 rxq->free_count = 0;
1027 rxq->used_count = 0;
1028}
1029
1030int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1031{
1032 WARN_ON(1);
1033 return 0;
1034}
1035
1036int _iwl_pcie_rx_init(struct iwl_trans *trans)
1037{
1038 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1039 struct iwl_rxq *def_rxq;
1040 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1041 int i, err, queue_size, allocator_pool_size, num_alloc;
1042
1043 if (!trans_pcie->rxq) {
1044 err = iwl_pcie_rx_alloc(trans);
1045 if (err)
1046 return err;
1047 }
1048 def_rxq = trans_pcie->rxq;
1049
1050 cancel_work_sync(&rba->rx_alloc);
1051
1052 spin_lock(&rba->lock);
1053 atomic_set(&rba->req_pending, 0);
1054 atomic_set(&rba->req_ready, 0);
1055 INIT_LIST_HEAD(&rba->rbd_allocated);
1056 INIT_LIST_HEAD(&rba->rbd_empty);
1057 spin_unlock(&rba->lock);
1058
1059
1060 iwl_pcie_free_rbs_pool(trans);
1061
1062 for (i = 0; i < RX_QUEUE_SIZE; i++)
1063 def_rxq->queue[i] = NULL;
1064
1065 for (i = 0; i < trans->num_rx_queues; i++) {
1066 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1067
1068 spin_lock(&rxq->lock);
1069
1070
1071
1072
1073
1074 rxq->read = 0;
1075 rxq->write = 0;
1076 rxq->write_actual = 0;
1077 memset(rxq->rb_stts, 0,
1078 (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1079 sizeof(__le16) : sizeof(struct iwl_rb_status));
1080
1081 iwl_pcie_rx_init_rxb_lists(rxq);
1082
1083 if (!rxq->napi.poll)
1084 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1085 iwl_pcie_dummy_napi_poll, 64);
1086
1087 spin_unlock(&rxq->lock);
1088 }
1089
1090
1091 queue_size = trans->cfg->mq_rx_supported ?
1092 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1093 allocator_pool_size = trans->num_rx_queues *
1094 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1095 num_alloc = queue_size + allocator_pool_size;
1096 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1097 ARRAY_SIZE(trans_pcie->rx_pool));
1098 for (i = 0; i < num_alloc; i++) {
1099 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1100
1101 if (i < allocator_pool_size)
1102 list_add(&rxb->list, &rba->rbd_empty);
1103 else
1104 list_add(&rxb->list, &def_rxq->rx_used);
1105 trans_pcie->global_table[i] = rxb;
1106 rxb->vid = (u16)(i + 1);
1107 rxb->invalid = true;
1108 }
1109
1110 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1111
1112 return 0;
1113}
1114
1115int iwl_pcie_rx_init(struct iwl_trans *trans)
1116{
1117 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1118 int ret = _iwl_pcie_rx_init(trans);
1119
1120 if (ret)
1121 return ret;
1122
1123 if (trans->cfg->mq_rx_supported)
1124 iwl_pcie_rx_mq_hw_init(trans);
1125 else
1126 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1127
1128 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1129
1130 spin_lock(&trans_pcie->rxq->lock);
1131 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1132 spin_unlock(&trans_pcie->rxq->lock);
1133
1134 return 0;
1135}
1136
1137int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1138{
1139
1140 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1141
1142
1143
1144
1145
1146 return _iwl_pcie_rx_init(trans);
1147}
1148
1149void iwl_pcie_rx_free(struct iwl_trans *trans)
1150{
1151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1153 int i;
1154 size_t rb_stts_size = trans->cfg->device_family >=
1155 IWL_DEVICE_FAMILY_22560 ?
1156 sizeof(__le16) : sizeof(struct iwl_rb_status);
1157
1158
1159
1160
1161
1162 if (!trans_pcie->rxq) {
1163 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1164 return;
1165 }
1166
1167 cancel_work_sync(&rba->rx_alloc);
1168
1169 iwl_pcie_free_rbs_pool(trans);
1170
1171 if (trans_pcie->base_rb_stts) {
1172 dma_free_coherent(trans->dev,
1173 rb_stts_size * trans->num_rx_queues,
1174 trans_pcie->base_rb_stts,
1175 trans_pcie->base_rb_stts_dma);
1176 trans_pcie->base_rb_stts = NULL;
1177 trans_pcie->base_rb_stts_dma = 0;
1178 }
1179
1180 for (i = 0; i < trans->num_rx_queues; i++) {
1181 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1182
1183 iwl_pcie_free_rxq_dma(trans, rxq);
1184
1185 if (rxq->napi.poll)
1186 netif_napi_del(&rxq->napi);
1187 }
1188 kfree(trans_pcie->rxq);
1189}
1190
1191static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1192 struct iwl_rb_allocator *rba)
1193{
1194 spin_lock(&rba->lock);
1195 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1196 spin_unlock(&rba->lock);
1197}
1198
1199
1200
1201
1202
1203
1204
1205static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1206 struct iwl_rx_mem_buffer *rxb,
1207 struct iwl_rxq *rxq, bool emergency)
1208{
1209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1210 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1211
1212
1213
1214 list_add_tail(&rxb->list, &rxq->rx_used);
1215
1216 if (unlikely(emergency))
1217 return;
1218
1219
1220 rxq->used_count++;
1221
1222
1223
1224
1225
1226
1227 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1228
1229
1230 iwl_pcie_rx_move_to_allocator(rxq, rba);
1231
1232 atomic_inc(&rba->req_pending);
1233 queue_work(rba->alloc_wq, &rba->rx_alloc);
1234 }
1235}
1236
1237static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1238 struct iwl_rxq *rxq,
1239 struct iwl_rx_mem_buffer *rxb,
1240 bool emergency,
1241 int i)
1242{
1243 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1244 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1245 bool page_stolen = false;
1246 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1247 u32 offset = 0;
1248
1249 if (WARN_ON(!rxb))
1250 return;
1251
1252 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1253
1254 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1255 struct iwl_rx_packet *pkt;
1256 u16 sequence;
1257 bool reclaim;
1258 int index, cmd_index, len;
1259 struct iwl_rx_cmd_buffer rxcb = {
1260 ._offset = offset,
1261 ._rx_page_order = trans_pcie->rx_page_order,
1262 ._page = rxb->page,
1263 ._page_stolen = false,
1264 .truesize = max_len,
1265 };
1266
1267 pkt = rxb_addr(&rxcb);
1268
1269 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1270 IWL_DEBUG_RX(trans,
1271 "Q %d: RB end marker at offset %d\n",
1272 rxq->id, offset);
1273 break;
1274 }
1275
1276 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1277 FH_RSCSR_RXQ_POS != rxq->id,
1278 "frame on invalid queue - is on %d and indicates %d\n",
1279 rxq->id,
1280 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1281 FH_RSCSR_RXQ_POS);
1282
1283 IWL_DEBUG_RX(trans,
1284 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1285 rxq->id, offset,
1286 iwl_get_cmd_string(trans,
1287 iwl_cmd_id(pkt->hdr.cmd,
1288 pkt->hdr.group_id,
1289 0)),
1290 pkt->hdr.group_id, pkt->hdr.cmd,
1291 le16_to_cpu(pkt->hdr.sequence));
1292
1293 len = iwl_rx_packet_len(pkt);
1294 len += sizeof(u32);
1295 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1296 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1297
1298
1299
1300
1301
1302
1303
1304 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1305 if (reclaim && !pkt->hdr.group_id) {
1306 int i;
1307
1308 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1309 if (trans_pcie->no_reclaim_cmds[i] ==
1310 pkt->hdr.cmd) {
1311 reclaim = false;
1312 break;
1313 }
1314 }
1315 }
1316
1317 sequence = le16_to_cpu(pkt->hdr.sequence);
1318 index = SEQ_TO_INDEX(sequence);
1319 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1320
1321 if (rxq->id == trans_pcie->def_rx_queue)
1322 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1323 &rxcb);
1324 else
1325 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1326 &rxcb, rxq->id);
1327
1328 if (reclaim) {
1329 kzfree(txq->entries[cmd_index].free_buf);
1330 txq->entries[cmd_index].free_buf = NULL;
1331 }
1332
1333
1334
1335
1336
1337
1338 if (reclaim) {
1339
1340
1341
1342
1343 if (!rxcb._page_stolen)
1344 iwl_pcie_hcmd_complete(trans, &rxcb);
1345 else
1346 IWL_WARN(trans, "Claim null rxb?\n");
1347 }
1348
1349 page_stolen |= rxcb._page_stolen;
1350 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1351 break;
1352 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1353 }
1354
1355
1356 if (page_stolen) {
1357 __free_pages(rxb->page, trans_pcie->rx_page_order);
1358 rxb->page = NULL;
1359 }
1360
1361
1362
1363
1364 if (rxb->page != NULL) {
1365 rxb->page_dma =
1366 dma_map_page(trans->dev, rxb->page, 0,
1367 PAGE_SIZE << trans_pcie->rx_page_order,
1368 DMA_FROM_DEVICE);
1369 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1370
1371
1372
1373
1374
1375 __free_pages(rxb->page, trans_pcie->rx_page_order);
1376 rxb->page = NULL;
1377 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1378 } else {
1379 list_add_tail(&rxb->list, &rxq->rx_free);
1380 rxq->free_count++;
1381 }
1382 } else
1383 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1384}
1385
1386static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1387 struct iwl_rxq *rxq, int i)
1388{
1389 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1390 struct iwl_rx_mem_buffer *rxb;
1391 u16 vid;
1392
1393 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1394
1395 if (!trans->cfg->mq_rx_supported) {
1396 rxb = rxq->queue[i];
1397 rxq->queue[i] = NULL;
1398 return rxb;
1399 }
1400
1401
1402 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1403 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1404 else
1405 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1406
1407 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1408 goto out_err;
1409
1410 rxb = trans_pcie->global_table[vid - 1];
1411 if (rxb->invalid)
1412 goto out_err;
1413
1414 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1415
1416 rxb->invalid = true;
1417
1418 return rxb;
1419
1420out_err:
1421 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1422 iwl_force_nmi(trans);
1423 return NULL;
1424}
1425
1426
1427
1428
1429static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1430{
1431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1432 struct iwl_rxq *rxq;
1433 u32 r, i, count = 0;
1434 bool emergency = false;
1435
1436 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1437 return;
1438
1439 rxq = &trans_pcie->rxq[queue];
1440
1441restart:
1442 spin_lock(&rxq->lock);
1443
1444
1445 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1446 i = rxq->read;
1447
1448
1449 r &= (rxq->queue_size - 1);
1450
1451
1452 if (i == r)
1453 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1454
1455 while (i != r) {
1456 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1457 struct iwl_rx_mem_buffer *rxb;
1458
1459 u32 rb_pending_alloc =
1460 atomic_read(&trans_pcie->rba.req_pending) *
1461 RX_CLAIM_REQ_ALLOC;
1462
1463 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1464 !emergency)) {
1465 iwl_pcie_rx_move_to_allocator(rxq, rba);
1466 emergency = true;
1467 IWL_DEBUG_TPT(trans,
1468 "RX path is in emergency. Pending allocations %d\n",
1469 rb_pending_alloc);
1470 }
1471
1472 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1473
1474 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1475 if (!rxb)
1476 goto out;
1477
1478 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1479
1480 i = (i + 1) & (rxq->queue_size - 1);
1481
1482
1483
1484
1485
1486
1487
1488
1489 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1490 iwl_pcie_rx_allocator_get(trans, rxq);
1491
1492 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1493
1494 iwl_pcie_rx_move_to_allocator(rxq, rba);
1495 } else if (emergency) {
1496 count++;
1497 if (count == 8) {
1498 count = 0;
1499 if (rb_pending_alloc < rxq->queue_size / 3) {
1500 IWL_DEBUG_TPT(trans,
1501 "RX path exited emergency. Pending allocations %d\n",
1502 rb_pending_alloc);
1503 emergency = false;
1504 }
1505
1506 rxq->read = i;
1507 spin_unlock(&rxq->lock);
1508 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1509 iwl_pcie_rxq_restock(trans, rxq);
1510 goto restart;
1511 }
1512 }
1513 }
1514out:
1515
1516 rxq->read = i;
1517
1518 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1519 *rxq->cr_tail = cpu_to_le16(r);
1520 spin_unlock(&rxq->lock);
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 if (unlikely(emergency && count))
1535 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1536
1537 if (rxq->napi.poll)
1538 napi_gro_flush(&rxq->napi, false);
1539
1540 iwl_pcie_rxq_restock(trans, rxq);
1541}
1542
1543static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1544{
1545 u8 queue = entry->entry;
1546 struct msix_entry *entries = entry - queue;
1547
1548 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1549}
1550
1551
1552
1553
1554
1555irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1556{
1557 struct msix_entry *entry = dev_id;
1558 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1559 struct iwl_trans *trans = trans_pcie->trans;
1560
1561 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1562
1563 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1564 return IRQ_NONE;
1565
1566 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1567
1568 local_bh_disable();
1569 iwl_pcie_rx_handle(trans, entry->entry);
1570 local_bh_enable();
1571
1572 iwl_pcie_clear_irq(trans, entry);
1573
1574 lock_map_release(&trans->sync_cmd_lockdep_map);
1575
1576 return IRQ_HANDLED;
1577}
1578
1579
1580
1581
1582static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1583{
1584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1585 int i;
1586
1587
1588 if (trans->cfg->internal_wimax_coex &&
1589 !trans->cfg->apmg_not_supported &&
1590 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1591 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1592 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1593 APMG_PS_CTRL_VAL_RESET_REQ))) {
1594 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1595 iwl_op_mode_wimax_active(trans->op_mode);
1596 wake_up(&trans_pcie->wait_command_queue);
1597 return;
1598 }
1599
1600 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1601 if (!trans_pcie->txq[i])
1602 continue;
1603 del_timer(&trans_pcie->txq[i]->stuck_timer);
1604 }
1605
1606
1607
1608 iwl_trans_fw_error(trans);
1609
1610 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1611 wake_up(&trans_pcie->wait_command_queue);
1612}
1613
1614static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1615{
1616 u32 inta;
1617
1618 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1619
1620 trace_iwlwifi_dev_irq(trans->dev);
1621
1622
1623 inta = iwl_read32(trans, CSR_INT);
1624
1625
1626 return inta;
1627}
1628
1629
1630#define ICT_SHIFT 12
1631#define ICT_SIZE (1 << ICT_SHIFT)
1632#define ICT_COUNT (ICT_SIZE / sizeof(u32))
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1643{
1644 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1645 u32 inta;
1646 u32 val = 0;
1647 u32 read;
1648
1649 trace_iwlwifi_dev_irq(trans->dev);
1650
1651
1652
1653
1654 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1655 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1656 if (!read)
1657 return 0;
1658
1659
1660
1661
1662
1663 do {
1664 val |= read;
1665 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1666 trans_pcie->ict_index, read);
1667 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1668 trans_pcie->ict_index =
1669 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1670
1671 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1672 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1673 read);
1674 } while (read);
1675
1676
1677 if (val == 0xffffffff)
1678 val = 0;
1679
1680
1681
1682
1683
1684
1685
1686
1687 if (val & 0xC0000)
1688 val |= 0x8000;
1689
1690 inta = (0xff & val) | ((0xff00 & val) << 16);
1691 return inta;
1692}
1693
1694void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1695{
1696 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1697 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1698 bool hw_rfkill, prev, report;
1699
1700 mutex_lock(&trans_pcie->mutex);
1701 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1702 hw_rfkill = iwl_is_rfkill_set(trans);
1703 if (hw_rfkill) {
1704 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1705 set_bit(STATUS_RFKILL_HW, &trans->status);
1706 }
1707 if (trans_pcie->opmode_down)
1708 report = hw_rfkill;
1709 else
1710 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1711
1712 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1713 hw_rfkill ? "disable radio" : "enable radio");
1714
1715 isr_stats->rfkill++;
1716
1717 if (prev != report)
1718 iwl_trans_pcie_rf_kill(trans, report);
1719 mutex_unlock(&trans_pcie->mutex);
1720
1721 if (hw_rfkill) {
1722 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1723 &trans->status))
1724 IWL_DEBUG_RF_KILL(trans,
1725 "Rfkill while SYNC HCMD in flight\n");
1726 wake_up(&trans_pcie->wait_command_queue);
1727 } else {
1728 clear_bit(STATUS_RFKILL_HW, &trans->status);
1729 if (trans_pcie->opmode_down)
1730 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1731 }
1732}
1733
1734irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1735{
1736 struct iwl_trans *trans = dev_id;
1737 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1738 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1739 u32 inta = 0;
1740 u32 handled = 0;
1741
1742 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1743
1744 spin_lock(&trans_pcie->irq_lock);
1745
1746
1747
1748
1749 if (likely(trans_pcie->use_ict))
1750 inta = iwl_pcie_int_cause_ict(trans);
1751 else
1752 inta = iwl_pcie_int_cause_non_ict(trans);
1753
1754 if (iwl_have_debug_level(IWL_DL_ISR)) {
1755 IWL_DEBUG_ISR(trans,
1756 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1757 inta, trans_pcie->inta_mask,
1758 iwl_read32(trans, CSR_INT_MASK),
1759 iwl_read32(trans, CSR_FH_INT_STATUS));
1760 if (inta & (~trans_pcie->inta_mask))
1761 IWL_DEBUG_ISR(trans,
1762 "We got a masked interrupt (0x%08x)\n",
1763 inta & (~trans_pcie->inta_mask));
1764 }
1765
1766 inta &= trans_pcie->inta_mask;
1767
1768
1769
1770
1771
1772
1773 if (unlikely(!inta)) {
1774 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1775
1776
1777
1778
1779 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1780 _iwl_enable_interrupts(trans);
1781 spin_unlock(&trans_pcie->irq_lock);
1782 lock_map_release(&trans->sync_cmd_lockdep_map);
1783 return IRQ_NONE;
1784 }
1785
1786 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1787
1788
1789
1790
1791 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1792 spin_unlock(&trans_pcie->irq_lock);
1793 goto out;
1794 }
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1808
1809 if (iwl_have_debug_level(IWL_DL_ISR))
1810 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1811 inta, iwl_read32(trans, CSR_INT_MASK));
1812
1813 spin_unlock(&trans_pcie->irq_lock);
1814
1815
1816 if (inta & CSR_INT_BIT_HW_ERR) {
1817 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1818
1819
1820 iwl_disable_interrupts(trans);
1821
1822 isr_stats->hw++;
1823 iwl_pcie_irq_handle_error(trans);
1824
1825 handled |= CSR_INT_BIT_HW_ERR;
1826
1827 goto out;
1828 }
1829
1830
1831 if (inta & CSR_INT_BIT_SCD) {
1832 IWL_DEBUG_ISR(trans,
1833 "Scheduler finished to transmit the frame/frames.\n");
1834 isr_stats->sch++;
1835 }
1836
1837
1838 if (inta & CSR_INT_BIT_ALIVE) {
1839 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1840 isr_stats->alive++;
1841 if (trans->cfg->gen2) {
1842
1843
1844
1845
1846 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1847 }
1848
1849 handled |= CSR_INT_BIT_ALIVE;
1850 }
1851
1852
1853 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1854
1855
1856 if (inta & CSR_INT_BIT_RF_KILL) {
1857 iwl_pcie_handle_rfkill_irq(trans);
1858 handled |= CSR_INT_BIT_RF_KILL;
1859 }
1860
1861
1862 if (inta & CSR_INT_BIT_CT_KILL) {
1863 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1864 isr_stats->ctkill++;
1865 handled |= CSR_INT_BIT_CT_KILL;
1866 }
1867
1868
1869 if (inta & CSR_INT_BIT_SW_ERR) {
1870 IWL_ERR(trans, "Microcode SW error detected. "
1871 " Restarting 0x%X.\n", inta);
1872 isr_stats->sw++;
1873 iwl_pcie_irq_handle_error(trans);
1874 handled |= CSR_INT_BIT_SW_ERR;
1875 }
1876
1877
1878 if (inta & CSR_INT_BIT_WAKEUP) {
1879 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1880 iwl_pcie_rxq_check_wrptr(trans);
1881 iwl_pcie_txq_check_wrptrs(trans);
1882
1883 isr_stats->wakeup++;
1884
1885 handled |= CSR_INT_BIT_WAKEUP;
1886 }
1887
1888
1889
1890
1891 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1892 CSR_INT_BIT_RX_PERIODIC)) {
1893 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1894 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1895 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1896 iwl_write32(trans, CSR_FH_INT_STATUS,
1897 CSR_FH_INT_RX_MASK);
1898 }
1899 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1900 handled |= CSR_INT_BIT_RX_PERIODIC;
1901 iwl_write32(trans,
1902 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1903 }
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1917 CSR_INT_PERIODIC_DIS);
1918
1919
1920
1921
1922
1923
1924
1925
1926 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1927 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1928 CSR_INT_PERIODIC_ENA);
1929
1930 isr_stats->rx++;
1931
1932 local_bh_disable();
1933 iwl_pcie_rx_handle(trans, 0);
1934 local_bh_enable();
1935 }
1936
1937
1938 if (inta & CSR_INT_BIT_FH_TX) {
1939 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1940 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1941 isr_stats->tx++;
1942 handled |= CSR_INT_BIT_FH_TX;
1943
1944 trans_pcie->ucode_write_complete = true;
1945 wake_up(&trans_pcie->ucode_write_waitq);
1946 }
1947
1948 if (inta & ~handled) {
1949 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1950 isr_stats->unhandled++;
1951 }
1952
1953 if (inta & ~(trans_pcie->inta_mask)) {
1954 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1955 inta & ~trans_pcie->inta_mask);
1956 }
1957
1958 spin_lock(&trans_pcie->irq_lock);
1959
1960 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1961 _iwl_enable_interrupts(trans);
1962
1963 else if (handled & CSR_INT_BIT_FH_TX)
1964 iwl_enable_fw_load_int(trans);
1965
1966 else if (handled & CSR_INT_BIT_RF_KILL)
1967 iwl_enable_rfkill_int(trans);
1968
1969 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
1970 iwl_enable_fw_load_int_ctx_info(trans);
1971 spin_unlock(&trans_pcie->irq_lock);
1972
1973out:
1974 lock_map_release(&trans->sync_cmd_lockdep_map);
1975 return IRQ_HANDLED;
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985void iwl_pcie_free_ict(struct iwl_trans *trans)
1986{
1987 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1988
1989 if (trans_pcie->ict_tbl) {
1990 dma_free_coherent(trans->dev, ICT_SIZE,
1991 trans_pcie->ict_tbl,
1992 trans_pcie->ict_tbl_dma);
1993 trans_pcie->ict_tbl = NULL;
1994 trans_pcie->ict_tbl_dma = 0;
1995 }
1996}
1997
1998
1999
2000
2001
2002
2003int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2004{
2005 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2006
2007 trans_pcie->ict_tbl =
2008 dma_alloc_coherent(trans->dev, ICT_SIZE,
2009 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2010 if (!trans_pcie->ict_tbl)
2011 return -ENOMEM;
2012
2013
2014 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2015 iwl_pcie_free_ict(trans);
2016 return -EINVAL;
2017 }
2018
2019 return 0;
2020}
2021
2022
2023
2024
2025void iwl_pcie_reset_ict(struct iwl_trans *trans)
2026{
2027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2028 u32 val;
2029
2030 if (!trans_pcie->ict_tbl)
2031 return;
2032
2033 spin_lock(&trans_pcie->irq_lock);
2034 _iwl_disable_interrupts(trans);
2035
2036 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2037
2038 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2039
2040 val |= CSR_DRAM_INT_TBL_ENABLE |
2041 CSR_DRAM_INIT_TBL_WRAP_CHECK |
2042 CSR_DRAM_INIT_TBL_WRITE_POINTER;
2043
2044 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2045
2046 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2047 trans_pcie->use_ict = true;
2048 trans_pcie->ict_index = 0;
2049 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2050 _iwl_enable_interrupts(trans);
2051 spin_unlock(&trans_pcie->irq_lock);
2052}
2053
2054
2055void iwl_pcie_disable_ict(struct iwl_trans *trans)
2056{
2057 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2058
2059 spin_lock(&trans_pcie->irq_lock);
2060 trans_pcie->use_ict = false;
2061 spin_unlock(&trans_pcie->irq_lock);
2062}
2063
2064irqreturn_t iwl_pcie_isr(int irq, void *data)
2065{
2066 struct iwl_trans *trans = data;
2067
2068 if (!trans)
2069 return IRQ_NONE;
2070
2071
2072
2073
2074
2075
2076 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2077
2078 return IRQ_WAKE_THREAD;
2079}
2080
2081irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2082{
2083 return IRQ_WAKE_THREAD;
2084}
2085
2086irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2087{
2088 struct msix_entry *entry = dev_id;
2089 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2090 struct iwl_trans *trans = trans_pcie->trans;
2091 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2092 u32 inta_fh, inta_hw;
2093
2094 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2095
2096 spin_lock(&trans_pcie->irq_lock);
2097 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2098 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2099
2100
2101
2102 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2103 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2104 spin_unlock(&trans_pcie->irq_lock);
2105
2106 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2107
2108 if (unlikely(!(inta_fh | inta_hw))) {
2109 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2110 lock_map_release(&trans->sync_cmd_lockdep_map);
2111 return IRQ_NONE;
2112 }
2113
2114 if (iwl_have_debug_level(IWL_DL_ISR)) {
2115 IWL_DEBUG_ISR(trans,
2116 "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2117 inta_fh, trans_pcie->fh_mask,
2118 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2119 if (inta_fh & ~trans_pcie->fh_mask)
2120 IWL_DEBUG_ISR(trans,
2121 "We got a masked interrupt (0x%08x)\n",
2122 inta_fh & ~trans_pcie->fh_mask);
2123 }
2124
2125 inta_fh &= trans_pcie->fh_mask;
2126
2127 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2128 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2129 local_bh_disable();
2130 iwl_pcie_rx_handle(trans, 0);
2131 local_bh_enable();
2132 }
2133
2134 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2135 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2136 local_bh_disable();
2137 iwl_pcie_rx_handle(trans, 1);
2138 local_bh_enable();
2139 }
2140
2141
2142 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2143 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2144 isr_stats->tx++;
2145
2146
2147
2148
2149 trans_pcie->ucode_write_complete = true;
2150 wake_up(&trans_pcie->ucode_write_waitq);
2151 }
2152
2153
2154 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2155 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2156 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2157 IWL_ERR(trans,
2158 "Microcode SW error detected. Restarting 0x%X.\n",
2159 inta_fh);
2160 isr_stats->sw++;
2161 iwl_pcie_irq_handle_error(trans);
2162 }
2163
2164
2165 if (iwl_have_debug_level(IWL_DL_ISR)) {
2166 IWL_DEBUG_ISR(trans,
2167 "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2168 inta_hw, trans_pcie->hw_mask,
2169 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2170 if (inta_hw & ~trans_pcie->hw_mask)
2171 IWL_DEBUG_ISR(trans,
2172 "We got a masked interrupt 0x%08x\n",
2173 inta_hw & ~trans_pcie->hw_mask);
2174 }
2175
2176 inta_hw &= trans_pcie->hw_mask;
2177
2178
2179 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2180 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2181 isr_stats->alive++;
2182 if (trans->cfg->gen2) {
2183
2184 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2185 }
2186 }
2187
2188 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
2189 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2190
2191 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2192
2193 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2194 if (res == IWL_IMAGE_RESP_FAIL) {
2195 isr_stats->sw++;
2196 iwl_pcie_irq_handle_error(trans);
2197 }
2198 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2199
2200 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2201 iwl_pcie_rxq_check_wrptr(trans);
2202 iwl_pcie_txq_check_wrptrs(trans);
2203
2204 isr_stats->wakeup++;
2205 }
2206
2207 if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
2208
2209 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2210
2211 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2212 if (res == IWL_IMAGE_RESP_FAIL) {
2213 isr_stats->sw++;
2214 iwl_pcie_irq_handle_error(trans);
2215 }
2216 }
2217
2218
2219 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2220 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2221 isr_stats->ctkill++;
2222 }
2223
2224
2225 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2226 iwl_pcie_handle_rfkill_irq(trans);
2227
2228 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2229 IWL_ERR(trans,
2230 "Hardware error detected. Restarting.\n");
2231
2232 isr_stats->hw++;
2233 trans->dbg.hw_error = true;
2234 iwl_pcie_irq_handle_error(trans);
2235 }
2236
2237 iwl_pcie_clear_irq(trans, entry);
2238
2239 lock_map_release(&trans->sync_cmd_lockdep_map);
2240
2241 return IRQ_HANDLED;
2242}
2243