1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/sched.h>
31#include <linux/wait.h>
32#include <linux/gfp.h>
33
34#include "iwl-prph.h"
35#include "iwl-io.h"
36#include "internal.h"
37#include "iwl-op-mode.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114static int iwl_rxq_space(const struct iwl_rxq *rxq)
115{
116
117 BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
118
119
120
121
122
123
124
125 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
126}
127
128
129
130
131static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
132{
133 return cpu_to_le32((u32)(dma_addr >> 8));
134}
135
136
137
138
139int iwl_pcie_rx_stop(struct iwl_trans *trans)
140{
141 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
142 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
143 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
144}
145
146
147
148
149static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
150{
151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
152 struct iwl_rxq *rxq = &trans_pcie->rxq;
153 u32 reg;
154
155 lockdep_assert_held(&rxq->lock);
156
157
158
159
160
161
162 if (!trans->cfg->base_params->shadow_reg_enable &&
163 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
164 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
165
166 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
167 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
168 reg);
169 iwl_set_bit(trans, CSR_GP_CNTRL,
170 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
171 rxq->need_update = true;
172 return;
173 }
174 }
175
176 rxq->write_actual = round_down(rxq->write, 8);
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
178}
179
180static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
181{
182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183 struct iwl_rxq *rxq = &trans_pcie->rxq;
184
185 spin_lock(&rxq->lock);
186
187 if (!rxq->need_update)
188 goto exit_unlock;
189
190 iwl_pcie_rxq_inc_wr_ptr(trans);
191 rxq->need_update = false;
192
193 exit_unlock:
194 spin_unlock(&rxq->lock);
195}
196
197
198
199
200
201
202
203
204
205
206
207
208static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
209{
210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211 struct iwl_rxq *rxq = &trans_pcie->rxq;
212 struct iwl_rx_mem_buffer *rxb;
213
214
215
216
217
218
219
220
221
222 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
223 return;
224
225 spin_lock(&rxq->lock);
226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
227
228 rxb = rxq->queue[rxq->write];
229 BUG_ON(rxb && rxb->page);
230
231
232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
233 list);
234 list_del(&rxb->list);
235
236
237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
238 rxq->queue[rxq->write] = rxb;
239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
240 rxq->free_count--;
241 }
242 spin_unlock(&rxq->lock);
243
244
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
247
248
249
250 if (rxq->write_actual != (rxq->write & ~0x7)) {
251 spin_lock(&rxq->lock);
252 iwl_pcie_rxq_inc_wr_ptr(trans);
253 spin_unlock(&rxq->lock);
254 }
255}
256
257
258
259
260
261
262
263
264
265
266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
267{
268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
269 struct iwl_rxq *rxq = &trans_pcie->rxq;
270 struct iwl_rx_mem_buffer *rxb;
271 struct page *page;
272 gfp_t gfp_mask = priority;
273
274 while (1) {
275 spin_lock(&rxq->lock);
276 if (list_empty(&rxq->rx_used)) {
277 spin_unlock(&rxq->lock);
278 return;
279 }
280 spin_unlock(&rxq->lock);
281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
288
289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303
304
305
306 return;
307 }
308
309 spin_lock(&rxq->lock);
310
311 if (list_empty(&rxq->rx_used)) {
312 spin_unlock(&rxq->lock);
313 __free_pages(page, trans_pcie->rx_page_order);
314 return;
315 }
316 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
317 list);
318 list_del(&rxb->list);
319 spin_unlock(&rxq->lock);
320
321 BUG_ON(rxb->page);
322 rxb->page = page;
323
324 rxb->page_dma =
325 dma_map_page(trans->dev, page, 0,
326 PAGE_SIZE << trans_pcie->rx_page_order,
327 DMA_FROM_DEVICE);
328 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
329 rxb->page = NULL;
330 spin_lock(&rxq->lock);
331 list_add(&rxb->list, &rxq->rx_used);
332 spin_unlock(&rxq->lock);
333 __free_pages(page, trans_pcie->rx_page_order);
334 return;
335 }
336
337 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
338
339 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
340
341 spin_lock(&rxq->lock);
342
343 list_add_tail(&rxb->list, &rxq->rx_free);
344 rxq->free_count++;
345
346 spin_unlock(&rxq->lock);
347 }
348}
349
350static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
351{
352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
353 struct iwl_rxq *rxq = &trans_pcie->rxq;
354 int i;
355
356 lockdep_assert_held(&rxq->lock);
357
358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
359 if (!rxq->pool[i].page)
360 continue;
361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
362 PAGE_SIZE << trans_pcie->rx_page_order,
363 DMA_FROM_DEVICE);
364 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
365 rxq->pool[i].page = NULL;
366 }
367}
368
369
370
371
372
373
374
375
376
377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
378{
379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
380
381 iwl_pcie_rxq_restock(trans);
382}
383
384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
385{
386 struct iwl_trans_pcie *trans_pcie =
387 container_of(data, struct iwl_trans_pcie, rx_replenish);
388
389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
390}
391
392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
393{
394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
395 struct iwl_rxq *rxq = &trans_pcie->rxq;
396 struct device *dev = trans->dev;
397
398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
399
400 spin_lock_init(&rxq->lock);
401
402 if (WARN_ON(rxq->bd || rxq->rb_stts))
403 return -EINVAL;
404
405
406 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
407 &rxq->bd_dma, GFP_KERNEL);
408 if (!rxq->bd)
409 goto err_bd;
410
411
412 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
413 &rxq->rb_stts_dma, GFP_KERNEL);
414 if (!rxq->rb_stts)
415 goto err_rb_stts;
416
417 return 0;
418
419err_rb_stts:
420 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
421 rxq->bd, rxq->bd_dma);
422 rxq->bd_dma = 0;
423 rxq->bd = NULL;
424err_bd:
425 return -ENOMEM;
426}
427
428static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 u32 rb_size;
432 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
433
434 if (trans_pcie->rx_buf_size_8k)
435 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
436 else
437 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
438
439
440 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
441
442 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
443 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
444 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
445
446
447 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
448
449
450 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
451 (u32)(rxq->bd_dma >> 8));
452
453
454 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
455 rxq->rb_stts_dma >> 4);
456
457
458
459
460
461
462
463
464
465 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
466 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
467 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
468 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
469 rb_size|
470 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
472
473
474 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
475
476
477 if (trans->cfg->host_interrupt_operation_mode)
478 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
479}
480
481static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
482{
483 int i;
484
485 lockdep_assert_held(&rxq->lock);
486
487 INIT_LIST_HEAD(&rxq->rx_free);
488 INIT_LIST_HEAD(&rxq->rx_used);
489 rxq->free_count = 0;
490
491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
492 list_add(&rxq->pool[i].list, &rxq->rx_used);
493}
494
495int iwl_pcie_rx_init(struct iwl_trans *trans)
496{
497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
498 struct iwl_rxq *rxq = &trans_pcie->rxq;
499 int i, err;
500
501 if (!rxq->bd) {
502 err = iwl_pcie_rx_alloc(trans);
503 if (err)
504 return err;
505 }
506
507 spin_lock(&rxq->lock);
508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
511
512 iwl_pcie_rxq_free_rbs(trans);
513 iwl_pcie_rx_init_rxb_lists(rxq);
514
515 for (i = 0; i < RX_QUEUE_SIZE; i++)
516 rxq->queue[i] = NULL;
517
518
519
520 rxq->read = rxq->write = 0;
521 rxq->write_actual = 0;
522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
523 spin_unlock(&rxq->lock);
524
525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
526
527 iwl_pcie_rx_hw_init(trans, rxq);
528
529 spin_lock(&rxq->lock);
530 iwl_pcie_rxq_inc_wr_ptr(trans);
531 spin_unlock(&rxq->lock);
532
533 return 0;
534}
535
536void iwl_pcie_rx_free(struct iwl_trans *trans)
537{
538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
539 struct iwl_rxq *rxq = &trans_pcie->rxq;
540
541
542
543 if (!rxq->bd) {
544 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
545 return;
546 }
547
548 cancel_work_sync(&trans_pcie->rx_replenish);
549
550 spin_lock(&rxq->lock);
551 iwl_pcie_rxq_free_rbs(trans);
552 spin_unlock(&rxq->lock);
553
554 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
555 rxq->bd, rxq->bd_dma);
556 rxq->bd_dma = 0;
557 rxq->bd = NULL;
558
559 if (rxq->rb_stts)
560 dma_free_coherent(trans->dev,
561 sizeof(struct iwl_rb_status),
562 rxq->rb_stts, rxq->rb_stts_dma);
563 else
564 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
565 rxq->rb_stts_dma = 0;
566 rxq->rb_stts = NULL;
567}
568
569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
570 struct iwl_rx_mem_buffer *rxb)
571{
572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
573 struct iwl_rxq *rxq = &trans_pcie->rxq;
574 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
575 bool page_stolen = false;
576 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
577 u32 offset = 0;
578
579 if (WARN_ON(!rxb))
580 return;
581
582 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
583
584 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
585 struct iwl_rx_packet *pkt;
586 struct iwl_device_cmd *cmd;
587 u16 sequence;
588 bool reclaim;
589 int index, cmd_index, err, len;
590 struct iwl_rx_cmd_buffer rxcb = {
591 ._offset = offset,
592 ._rx_page_order = trans_pcie->rx_page_order,
593 ._page = rxb->page,
594 ._page_stolen = false,
595 .truesize = max_len,
596 };
597
598 pkt = rxb_addr(&rxcb);
599
600 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
601 break;
602
603 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
604 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
605 pkt->hdr.cmd);
606
607 len = iwl_rx_packet_len(pkt);
608 len += sizeof(u32);
609 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
610 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
611
612
613
614
615
616
617
618 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
619 if (reclaim) {
620 int i;
621
622 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
623 if (trans_pcie->no_reclaim_cmds[i] ==
624 pkt->hdr.cmd) {
625 reclaim = false;
626 break;
627 }
628 }
629 }
630
631 sequence = le16_to_cpu(pkt->hdr.sequence);
632 index = SEQ_TO_INDEX(sequence);
633 cmd_index = get_cmd_index(&txq->q, index);
634
635 if (reclaim)
636 cmd = txq->entries[cmd_index].cmd;
637 else
638 cmd = NULL;
639
640 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
641
642 if (reclaim) {
643 kzfree(txq->entries[cmd_index].free_buf);
644 txq->entries[cmd_index].free_buf = NULL;
645 }
646
647
648
649
650
651
652 if (reclaim) {
653
654
655
656
657 if (!rxcb._page_stolen)
658 iwl_pcie_hcmd_complete(trans, &rxcb, err);
659 else
660 IWL_WARN(trans, "Claim null rxb?\n");
661 }
662
663 page_stolen |= rxcb._page_stolen;
664 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
665 }
666
667
668 if (page_stolen) {
669 __free_pages(rxb->page, trans_pcie->rx_page_order);
670 rxb->page = NULL;
671 }
672
673
674
675
676 if (rxb->page != NULL) {
677 rxb->page_dma =
678 dma_map_page(trans->dev, rxb->page, 0,
679 PAGE_SIZE << trans_pcie->rx_page_order,
680 DMA_FROM_DEVICE);
681 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
682
683
684
685
686
687 __free_pages(rxb->page, trans_pcie->rx_page_order);
688 rxb->page = NULL;
689 list_add_tail(&rxb->list, &rxq->rx_used);
690 } else {
691 list_add_tail(&rxb->list, &rxq->rx_free);
692 rxq->free_count++;
693 }
694 } else
695 list_add_tail(&rxb->list, &rxq->rx_used);
696}
697
698
699
700
701static void iwl_pcie_rx_handle(struct iwl_trans *trans)
702{
703 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
704 struct iwl_rxq *rxq = &trans_pcie->rxq;
705 u32 r, i;
706 u8 fill_rx = 0;
707 u32 count = 8;
708 int total_empty;
709
710restart:
711 spin_lock(&rxq->lock);
712
713
714 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
715 i = rxq->read;
716
717
718 if (i == r)
719 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
720
721
722 total_empty = r - rxq->write_actual;
723 if (total_empty < 0)
724 total_empty += RX_QUEUE_SIZE;
725
726 if (total_empty > (RX_QUEUE_SIZE / 2))
727 fill_rx = 1;
728
729 while (i != r) {
730 struct iwl_rx_mem_buffer *rxb;
731
732 rxb = rxq->queue[i];
733 rxq->queue[i] = NULL;
734
735 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
736 r, i, rxb);
737 iwl_pcie_rx_handle_rb(trans, rxb);
738
739 i = (i + 1) & RX_QUEUE_MASK;
740
741
742 if (fill_rx) {
743 count++;
744 if (count >= 8) {
745 rxq->read = i;
746 spin_unlock(&rxq->lock);
747 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
748 count = 0;
749 goto restart;
750 }
751 }
752 }
753
754
755 rxq->read = i;
756 spin_unlock(&rxq->lock);
757
758 if (fill_rx)
759 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
760 else
761 iwl_pcie_rxq_restock(trans);
762
763 if (trans_pcie->napi.poll)
764 napi_gro_flush(&trans_pcie->napi, false);
765}
766
767
768
769
770static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
771{
772 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
773
774
775 if (trans->cfg->internal_wimax_coex &&
776 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
777 APMS_CLK_VAL_MRB_FUNC_MODE) ||
778 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
779 APMG_PS_CTRL_VAL_RESET_REQ))) {
780 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
781 iwl_op_mode_wimax_active(trans->op_mode);
782 wake_up(&trans_pcie->wait_command_queue);
783 return;
784 }
785
786 iwl_pcie_dump_csr(trans);
787 iwl_dump_fh(trans, NULL);
788
789 local_bh_disable();
790
791
792 iwl_trans_fw_error(trans);
793 local_bh_enable();
794
795 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
796 wake_up(&trans_pcie->wait_command_queue);
797}
798
799static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
800{
801 u32 inta;
802
803 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
804
805 trace_iwlwifi_dev_irq(trans->dev);
806
807
808 inta = iwl_read32(trans, CSR_INT);
809
810
811 return inta;
812}
813
814
815#define ICT_SHIFT 12
816#define ICT_SIZE (1 << ICT_SHIFT)
817#define ICT_COUNT (ICT_SIZE / sizeof(u32))
818
819
820
821
822
823
824
825
826
827static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
828{
829 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
830 u32 inta;
831 u32 val = 0;
832 u32 read;
833
834 trace_iwlwifi_dev_irq(trans->dev);
835
836
837
838
839 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
840 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
841 if (!read)
842 return 0;
843
844
845
846
847
848 do {
849 val |= read;
850 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
851 trans_pcie->ict_index, read);
852 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
853 trans_pcie->ict_index =
854 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
855
856 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
857 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
858 read);
859 } while (read);
860
861
862 if (val == 0xffffffff)
863 val = 0;
864
865
866
867
868
869
870
871
872 if (val & 0xC0000)
873 val |= 0x8000;
874
875 inta = (0xff & val) | ((0xff00 & val) << 16);
876 return inta;
877}
878
879irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
880{
881 struct iwl_trans *trans = dev_id;
882 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
883 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
884 u32 inta = 0;
885 u32 handled = 0;
886
887 lock_map_acquire(&trans->sync_cmd_lockdep_map);
888
889 spin_lock(&trans_pcie->irq_lock);
890
891
892
893
894 if (likely(trans_pcie->use_ict))
895 inta = iwl_pcie_int_cause_ict(trans);
896 else
897 inta = iwl_pcie_int_cause_non_ict(trans);
898
899 if (iwl_have_debug_level(IWL_DL_ISR)) {
900 IWL_DEBUG_ISR(trans,
901 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
902 inta, trans_pcie->inta_mask,
903 iwl_read32(trans, CSR_INT_MASK),
904 iwl_read32(trans, CSR_FH_INT_STATUS));
905 if (inta & (~trans_pcie->inta_mask))
906 IWL_DEBUG_ISR(trans,
907 "We got a masked interrupt (0x%08x)\n",
908 inta & (~trans_pcie->inta_mask));
909 }
910
911 inta &= trans_pcie->inta_mask;
912
913
914
915
916
917
918 if (unlikely(!inta)) {
919 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
920
921
922
923
924 if (test_bit(STATUS_INT_ENABLED, &trans->status))
925 iwl_enable_interrupts(trans);
926 spin_unlock(&trans_pcie->irq_lock);
927 lock_map_release(&trans->sync_cmd_lockdep_map);
928 return IRQ_NONE;
929 }
930
931 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
932
933
934
935
936 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
937 spin_unlock(&trans_pcie->irq_lock);
938 goto out;
939 }
940
941
942
943
944
945
946
947
948
949
950
951
952 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
953
954 if (iwl_have_debug_level(IWL_DL_ISR))
955 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
956 inta, iwl_read32(trans, CSR_INT_MASK));
957
958 spin_unlock(&trans_pcie->irq_lock);
959
960
961 if (inta & CSR_INT_BIT_HW_ERR) {
962 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
963
964
965 iwl_disable_interrupts(trans);
966
967 isr_stats->hw++;
968 iwl_pcie_irq_handle_error(trans);
969
970 handled |= CSR_INT_BIT_HW_ERR;
971
972 goto out;
973 }
974
975 if (iwl_have_debug_level(IWL_DL_ISR)) {
976
977 if (inta & CSR_INT_BIT_SCD) {
978 IWL_DEBUG_ISR(trans,
979 "Scheduler finished to transmit the frame/frames.\n");
980 isr_stats->sch++;
981 }
982
983
984 if (inta & CSR_INT_BIT_ALIVE) {
985 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
986 isr_stats->alive++;
987 }
988 }
989
990
991 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
992
993
994 if (inta & CSR_INT_BIT_RF_KILL) {
995 bool hw_rfkill;
996
997 hw_rfkill = iwl_is_rfkill_set(trans);
998 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
999 hw_rfkill ? "disable radio" : "enable radio");
1000
1001 isr_stats->rfkill++;
1002
1003 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1004 if (hw_rfkill) {
1005 set_bit(STATUS_RFKILL, &trans->status);
1006 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1007 &trans->status))
1008 IWL_DEBUG_RF_KILL(trans,
1009 "Rfkill while SYNC HCMD in flight\n");
1010 wake_up(&trans_pcie->wait_command_queue);
1011 } else {
1012 clear_bit(STATUS_RFKILL, &trans->status);
1013 }
1014
1015 handled |= CSR_INT_BIT_RF_KILL;
1016 }
1017
1018
1019 if (inta & CSR_INT_BIT_CT_KILL) {
1020 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1021 isr_stats->ctkill++;
1022 handled |= CSR_INT_BIT_CT_KILL;
1023 }
1024
1025
1026 if (inta & CSR_INT_BIT_SW_ERR) {
1027 IWL_ERR(trans, "Microcode SW error detected. "
1028 " Restarting 0x%X.\n", inta);
1029 isr_stats->sw++;
1030 iwl_pcie_irq_handle_error(trans);
1031 handled |= CSR_INT_BIT_SW_ERR;
1032 }
1033
1034
1035 if (inta & CSR_INT_BIT_WAKEUP) {
1036 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1037 iwl_pcie_rxq_check_wrptr(trans);
1038 iwl_pcie_txq_check_wrptrs(trans);
1039
1040 isr_stats->wakeup++;
1041
1042 handled |= CSR_INT_BIT_WAKEUP;
1043 }
1044
1045
1046
1047
1048 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1049 CSR_INT_BIT_RX_PERIODIC)) {
1050 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1051 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1052 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1053 iwl_write32(trans, CSR_FH_INT_STATUS,
1054 CSR_FH_INT_RX_MASK);
1055 }
1056 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1057 handled |= CSR_INT_BIT_RX_PERIODIC;
1058 iwl_write32(trans,
1059 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1074 CSR_INT_PERIODIC_DIS);
1075
1076
1077
1078
1079
1080
1081
1082
1083 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1084 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1085 CSR_INT_PERIODIC_ENA);
1086
1087 isr_stats->rx++;
1088
1089 local_bh_disable();
1090 iwl_pcie_rx_handle(trans);
1091 local_bh_enable();
1092 }
1093
1094
1095 if (inta & CSR_INT_BIT_FH_TX) {
1096 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1097 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1098 isr_stats->tx++;
1099 handled |= CSR_INT_BIT_FH_TX;
1100
1101 trans_pcie->ucode_write_complete = true;
1102 wake_up(&trans_pcie->ucode_write_waitq);
1103 }
1104
1105 if (inta & ~handled) {
1106 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1107 isr_stats->unhandled++;
1108 }
1109
1110 if (inta & ~(trans_pcie->inta_mask)) {
1111 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1112 inta & ~trans_pcie->inta_mask);
1113 }
1114
1115
1116
1117 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1118 iwl_enable_interrupts(trans);
1119
1120 else if (handled & CSR_INT_BIT_RF_KILL)
1121 iwl_enable_rfkill_int(trans);
1122
1123out:
1124 lock_map_release(&trans->sync_cmd_lockdep_map);
1125 return IRQ_HANDLED;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135void iwl_pcie_free_ict(struct iwl_trans *trans)
1136{
1137 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1138
1139 if (trans_pcie->ict_tbl) {
1140 dma_free_coherent(trans->dev, ICT_SIZE,
1141 trans_pcie->ict_tbl,
1142 trans_pcie->ict_tbl_dma);
1143 trans_pcie->ict_tbl = NULL;
1144 trans_pcie->ict_tbl_dma = 0;
1145 }
1146}
1147
1148
1149
1150
1151
1152
1153int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1154{
1155 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1156
1157 trans_pcie->ict_tbl =
1158 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1159 &trans_pcie->ict_tbl_dma,
1160 GFP_KERNEL);
1161 if (!trans_pcie->ict_tbl)
1162 return -ENOMEM;
1163
1164
1165 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1166 iwl_pcie_free_ict(trans);
1167 return -EINVAL;
1168 }
1169
1170 IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
1171 (unsigned long long)trans_pcie->ict_tbl_dma,
1172 trans_pcie->ict_tbl);
1173
1174 return 0;
1175}
1176
1177
1178
1179
1180void iwl_pcie_reset_ict(struct iwl_trans *trans)
1181{
1182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1183 u32 val;
1184
1185 if (!trans_pcie->ict_tbl)
1186 return;
1187
1188 spin_lock(&trans_pcie->irq_lock);
1189 iwl_disable_interrupts(trans);
1190
1191 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1192
1193 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1194
1195 val |= CSR_DRAM_INT_TBL_ENABLE;
1196 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1197
1198 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1199
1200 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1201 trans_pcie->use_ict = true;
1202 trans_pcie->ict_index = 0;
1203 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1204 iwl_enable_interrupts(trans);
1205 spin_unlock(&trans_pcie->irq_lock);
1206}
1207
1208
1209void iwl_pcie_disable_ict(struct iwl_trans *trans)
1210{
1211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1212
1213 spin_lock(&trans_pcie->irq_lock);
1214 trans_pcie->use_ict = false;
1215 spin_unlock(&trans_pcie->irq_lock);
1216}
1217
1218irqreturn_t iwl_pcie_isr(int irq, void *data)
1219{
1220 struct iwl_trans *trans = data;
1221
1222 if (!trans)
1223 return IRQ_NONE;
1224
1225
1226
1227
1228
1229
1230 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1231
1232 return IRQ_WAKE_THREAD;
1233}
1234