1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56
57
58
59
60
61
62
63
64
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91
92
93
94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1,
97
98};
99
100void
101il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
102{
103 unsigned long flags;
104 int i;
105 spin_lock_irqsave(&rxq->lock, flags);
106 INIT_LIST_HEAD(&rxq->rx_free);
107 INIT_LIST_HEAD(&rxq->rx_used);
108
109 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
110
111
112 if (rxq->pool[i].page != NULL) {
113 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
114 PAGE_SIZE << il->hw_params.rx_page_order,
115 PCI_DMA_FROMDEVICE);
116 __il_free_pages(il, rxq->pool[i].page);
117 rxq->pool[i].page = NULL;
118 }
119 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
120 }
121
122 for (i = 0; i < RX_QUEUE_SIZE; i++)
123 rxq->queue[i] = NULL;
124
125
126
127 rxq->read = rxq->write = 0;
128 rxq->write_actual = 0;
129 rxq->free_count = 0;
130 spin_unlock_irqrestore(&rxq->lock, flags);
131}
132
133int
134il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
135{
136 u32 rb_size;
137 const u32 rfdnlog = RX_QUEUE_SIZE_LOG;
138 u32 rb_timeout = 0;
139
140 if (il->cfg->mod_params->amsdu_size_8K)
141 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148
149 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151
152 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
153
154
155 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
156
157
158
159
160
161
162
163 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
164 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
166 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
167 rb_size |
168 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
169 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
170
171
172 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
173
174 return 0;
175}
176
177static void
178il4965_set_pwr_vmain(struct il_priv *il)
179{
180
181
182
183
184
185
186
187
188
189
190 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
191 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
192 ~APMG_PS_CTRL_MSK_PWR_SRC);
193}
194
195int
196il4965_hw_nic_init(struct il_priv *il)
197{
198 unsigned long flags;
199 struct il_rx_queue *rxq = &il->rxq;
200 int ret;
201
202 spin_lock_irqsave(&il->lock, flags);
203 il_apm_init(il);
204
205 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
206 spin_unlock_irqrestore(&il->lock, flags);
207
208 il4965_set_pwr_vmain(il);
209 il4965_nic_config(il);
210
211
212 if (!rxq->bd) {
213 ret = il_rx_queue_alloc(il);
214 if (ret) {
215 IL_ERR("Unable to initialize Rx queue\n");
216 return -ENOMEM;
217 }
218 } else
219 il4965_rx_queue_reset(il, rxq);
220
221 il4965_rx_replenish(il);
222
223 il4965_rx_init(il, rxq);
224
225 spin_lock_irqsave(&il->lock, flags);
226
227 rxq->need_update = 1;
228 il_rx_queue_update_write_ptr(il, rxq);
229
230 spin_unlock_irqrestore(&il->lock, flags);
231
232
233 if (!il->txq) {
234 ret = il4965_txq_ctx_alloc(il);
235 if (ret)
236 return ret;
237 } else
238 il4965_txq_ctx_reset(il);
239
240 set_bit(S_INIT, &il->status);
241
242 return 0;
243}
244
245
246
247
248static inline __le32
249il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
250{
251 return cpu_to_le32((u32) (dma_addr >> 8));
252}
253
254
255
256
257
258
259
260
261
262
263
264
265void
266il4965_rx_queue_restock(struct il_priv *il)
267{
268 struct il_rx_queue *rxq = &il->rxq;
269 struct list_head *element;
270 struct il_rx_buf *rxb;
271 unsigned long flags;
272
273 spin_lock_irqsave(&rxq->lock, flags);
274 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
275
276 rxb = rxq->queue[rxq->write];
277 BUG_ON(rxb && rxb->page);
278
279
280 element = rxq->rx_free.next;
281 rxb = list_entry(element, struct il_rx_buf, list);
282 list_del(element);
283
284
285 rxq->bd[rxq->write] =
286 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
287 rxq->queue[rxq->write] = rxb;
288 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
289 rxq->free_count--;
290 }
291 spin_unlock_irqrestore(&rxq->lock, flags);
292
293
294 if (rxq->free_count <= RX_LOW_WATERMARK)
295 queue_work(il->workqueue, &il->rx_replenish);
296
297
298
299 if (rxq->write_actual != (rxq->write & ~0x7)) {
300 spin_lock_irqsave(&rxq->lock, flags);
301 rxq->need_update = 1;
302 spin_unlock_irqrestore(&rxq->lock, flags);
303 il_rx_queue_update_write_ptr(il, rxq);
304 }
305}
306
307
308
309
310
311
312
313
314
315static void
316il4965_rx_allocate(struct il_priv *il, gfp_t priority)
317{
318 struct il_rx_queue *rxq = &il->rxq;
319 struct list_head *element;
320 struct il_rx_buf *rxb;
321 struct page *page;
322 dma_addr_t page_dma;
323 unsigned long flags;
324 gfp_t gfp_mask = priority;
325
326 while (1) {
327 spin_lock_irqsave(&rxq->lock, flags);
328 if (list_empty(&rxq->rx_used)) {
329 spin_unlock_irqrestore(&rxq->lock, flags);
330 return;
331 }
332 spin_unlock_irqrestore(&rxq->lock, flags);
333
334 if (rxq->free_count > RX_LOW_WATERMARK)
335 gfp_mask |= __GFP_NOWARN;
336
337 if (il->hw_params.rx_page_order > 0)
338 gfp_mask |= __GFP_COMP;
339
340
341 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
342 if (!page) {
343 if (net_ratelimit())
344 D_INFO("alloc_pages failed, " "order: %d\n",
345 il->hw_params.rx_page_order);
346
347 if (rxq->free_count <= RX_LOW_WATERMARK &&
348 net_ratelimit())
349 IL_ERR("Failed to alloc_pages with %s. "
350 "Only %u free buffers remaining.\n",
351 priority ==
352 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
353 rxq->free_count);
354
355
356
357 return;
358 }
359
360
361 page_dma =
362 pci_map_page(il->pci_dev, page, 0,
363 PAGE_SIZE << il->hw_params.rx_page_order,
364 PCI_DMA_FROMDEVICE);
365 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
366 __free_pages(page, il->hw_params.rx_page_order);
367 break;
368 }
369
370 spin_lock_irqsave(&rxq->lock, flags);
371
372 if (list_empty(&rxq->rx_used)) {
373 spin_unlock_irqrestore(&rxq->lock, flags);
374 pci_unmap_page(il->pci_dev, page_dma,
375 PAGE_SIZE << il->hw_params.rx_page_order,
376 PCI_DMA_FROMDEVICE);
377 __free_pages(page, il->hw_params.rx_page_order);
378 return;
379 }
380
381 element = rxq->rx_used.next;
382 rxb = list_entry(element, struct il_rx_buf, list);
383 list_del(element);
384
385 BUG_ON(rxb->page);
386
387 rxb->page = page;
388 rxb->page_dma = page_dma;
389 list_add_tail(&rxb->list, &rxq->rx_free);
390 rxq->free_count++;
391 il->alloc_rxb_page++;
392
393 spin_unlock_irqrestore(&rxq->lock, flags);
394 }
395}
396
397void
398il4965_rx_replenish(struct il_priv *il)
399{
400 unsigned long flags;
401
402 il4965_rx_allocate(il, GFP_KERNEL);
403
404 spin_lock_irqsave(&il->lock, flags);
405 il4965_rx_queue_restock(il);
406 spin_unlock_irqrestore(&il->lock, flags);
407}
408
409void
410il4965_rx_replenish_now(struct il_priv *il)
411{
412 il4965_rx_allocate(il, GFP_ATOMIC);
413
414 il4965_rx_queue_restock(il);
415}
416
417
418
419
420
421
422void
423il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
424{
425 int i;
426 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
427 if (rxq->pool[i].page != NULL) {
428 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
429 PAGE_SIZE << il->hw_params.rx_page_order,
430 PCI_DMA_FROMDEVICE);
431 __il_free_pages(il, rxq->pool[i].page);
432 rxq->pool[i].page = NULL;
433 }
434 }
435
436 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
437 rxq->bd_dma);
438 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
439 rxq->rb_stts, rxq->rb_stts_dma);
440 rxq->bd = NULL;
441 rxq->rb_stts = NULL;
442}
443
444int
445il4965_rxq_stop(struct il_priv *il)
446{
447 int ret;
448
449 _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
450 ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
451 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
452 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
453 1000);
454 if (ret < 0)
455 IL_ERR("Can't stop Rx DMA.\n");
456
457 return 0;
458}
459
460int
461il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
462{
463 int idx = 0;
464 int band_offset = 0;
465
466
467 if (rate_n_flags & RATE_MCS_HT_MSK) {
468 idx = (rate_n_flags & 0xff);
469 return idx;
470
471 } else {
472 if (band == IEEE80211_BAND_5GHZ)
473 band_offset = IL_FIRST_OFDM_RATE;
474 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
475 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
476 return idx - band_offset;
477 }
478
479 return -1;
480}
481
482static int
483il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
484{
485
486
487 struct il4965_rx_non_cfg_phy *ncphy =
488 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
489 u32 agc =
490 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
491 IL49_AGC_DB_POS;
492
493 u32 valid_antennae =
494 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
495 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
496 u8 max_rssi = 0;
497 u32 i;
498
499
500
501
502
503
504 for (i = 0; i < 3; i++)
505 if (valid_antennae & (1 << i))
506 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
507
508 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
509 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
510 max_rssi, agc);
511
512
513
514 return max_rssi - agc - IL4965_RSSI_OFFSET;
515}
516
517static u32
518il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
519{
520 u32 decrypt_out = 0;
521
522 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
523 RX_RES_STATUS_STATION_FOUND)
524 decrypt_out |=
525 (RX_RES_STATUS_STATION_FOUND |
526 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
527
528 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
529
530
531 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
532 RX_RES_STATUS_SEC_TYPE_NONE)
533 return decrypt_out;
534
535
536 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
537 RX_RES_STATUS_SEC_TYPE_ERR)
538 return decrypt_out;
539
540
541 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
542 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
543 return decrypt_out;
544
545 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
546
547 case RX_RES_STATUS_SEC_TYPE_CCMP:
548
549 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
550
551 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
552 else
553 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
554
555 break;
556
557 case RX_RES_STATUS_SEC_TYPE_TKIP:
558 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
559
560 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
561 break;
562 }
563
564 default:
565 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
566 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
567 else
568 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
569 break;
570 }
571
572 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
573
574 return decrypt_out;
575}
576
577static void
578il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
579 u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
580 struct ieee80211_rx_status *stats)
581{
582 struct sk_buff *skb;
583 __le16 fc = hdr->frame_control;
584
585
586 if (unlikely(!il->is_open)) {
587 D_DROP("Dropping packet while interface is not open.\n");
588 return;
589 }
590
591 if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
592 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
593 D_INFO("Woke queues - frame received on passive channel\n");
594 }
595
596
597 if (!il->cfg->mod_params->sw_crypto &&
598 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
599 return;
600
601 skb = dev_alloc_skb(128);
602 if (!skb) {
603 IL_ERR("dev_alloc_skb failed\n");
604 return;
605 }
606
607 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
608 len);
609
610 il_update_stats(il, false, fc, len);
611 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
612
613 ieee80211_rx(il->hw, skb);
614 il->alloc_rxb_page--;
615 rxb->page = NULL;
616}
617
618
619
620static void
621il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
622{
623 struct ieee80211_hdr *header;
624 struct ieee80211_rx_status rx_status = {};
625 struct il_rx_pkt *pkt = rxb_addr(rxb);
626 struct il_rx_phy_res *phy_res;
627 __le32 rx_pkt_status;
628 struct il_rx_mpdu_res_start *amsdu;
629 u32 len;
630 u32 ampdu_status;
631 u32 rate_n_flags;
632
633
634
635
636
637
638
639
640
641
642 if (pkt->hdr.cmd == N_RX) {
643 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
644 header =
645 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
646 phy_res->cfg_phy_cnt);
647
648 len = le16_to_cpu(phy_res->byte_count);
649 rx_pkt_status =
650 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
651 phy_res->cfg_phy_cnt + len);
652 ampdu_status = le32_to_cpu(rx_pkt_status);
653 } else {
654 if (!il->_4965.last_phy_res_valid) {
655 IL_ERR("MPDU frame without cached PHY data\n");
656 return;
657 }
658 phy_res = &il->_4965.last_phy_res;
659 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
660 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
661 len = le16_to_cpu(amsdu->byte_count);
662 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
663 ampdu_status =
664 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
665 }
666
667 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
668 D_DROP("dsp size out of range [0,20]: %d/n",
669 phy_res->cfg_phy_cnt);
670 return;
671 }
672
673 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
674 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
675 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
676 return;
677 }
678
679
680 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
681
682
683 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
684 rx_status.band =
685 (phy_res->
686 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
687 IEEE80211_BAND_5GHZ;
688 rx_status.freq =
689 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
690 rx_status.band);
691 rx_status.rate_idx =
692 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
693 rx_status.flag = 0;
694
695
696
697
698
699 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
700
701
702 rx_status.signal = il4965_calc_rssi(il, phy_res);
703
704 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
705 (unsigned long long)rx_status.mactime);
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720 rx_status.antenna =
721 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
722 RX_RES_PHY_FLAGS_ANTENNA_POS;
723
724
725 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
726 rx_status.flag |= RX_FLAG_SHORTPRE;
727
728
729 if (rate_n_flags & RATE_MCS_HT_MSK)
730 rx_status.flag |= RX_FLAG_HT;
731 if (rate_n_flags & RATE_MCS_HT40_MSK)
732 rx_status.flag |= RX_FLAG_40MHZ;
733 if (rate_n_flags & RATE_MCS_SGI_MSK)
734 rx_status.flag |= RX_FLAG_SHORT_GI;
735
736 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
737
738
739
740
741
742 rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
743 rx_status.ampdu_reference = il->_4965.ampdu_ref;
744 }
745
746 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
747 &rx_status);
748}
749
750
751
752static void
753il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
754{
755 struct il_rx_pkt *pkt = rxb_addr(rxb);
756 il->_4965.last_phy_res_valid = true;
757 il->_4965.ampdu_ref++;
758 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
759 sizeof(struct il_rx_phy_res));
760}
761
762static int
763il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
764 enum ieee80211_band band, u8 is_active,
765 u8 n_probes, struct il_scan_channel *scan_ch)
766{
767 struct ieee80211_channel *chan;
768 const struct ieee80211_supported_band *sband;
769 const struct il_channel_info *ch_info;
770 u16 passive_dwell = 0;
771 u16 active_dwell = 0;
772 int added, i;
773 u16 channel;
774
775 sband = il_get_hw_mode(il, band);
776 if (!sband)
777 return 0;
778
779 active_dwell = il_get_active_dwell_time(il, band, n_probes);
780 passive_dwell = il_get_passive_dwell_time(il, band, vif);
781
782 if (passive_dwell <= active_dwell)
783 passive_dwell = active_dwell + 1;
784
785 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
786 chan = il->scan_request->channels[i];
787
788 if (chan->band != band)
789 continue;
790
791 channel = chan->hw_value;
792 scan_ch->channel = cpu_to_le16(channel);
793
794 ch_info = il_get_channel_info(il, band, channel);
795 if (!il_is_channel_valid(ch_info)) {
796 D_SCAN("Channel %d is INVALID for this band.\n",
797 channel);
798 continue;
799 }
800
801 if (!is_active || il_is_channel_passive(ch_info) ||
802 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
803 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
804 else
805 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
806
807 if (n_probes)
808 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
809
810 scan_ch->active_dwell = cpu_to_le16(active_dwell);
811 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
812
813
814 scan_ch->dsp_atten = 110;
815
816
817
818
819
820 if (band == IEEE80211_BAND_5GHZ)
821 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
822 else
823 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
824
825 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
826 le32_to_cpu(scan_ch->type),
827 (scan_ch->
828 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
829 (scan_ch->
830 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
831 passive_dwell);
832
833 scan_ch++;
834 added++;
835 }
836
837 D_SCAN("total channels to scan %d\n", added);
838 return added;
839}
840
841static void
842il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
843{
844 int i;
845 u8 ind = *ant;
846
847 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
848 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
849 if (valid & BIT(ind)) {
850 *ant = ind;
851 return;
852 }
853 }
854}
855
856int
857il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
858{
859 struct il_host_cmd cmd = {
860 .id = C_SCAN,
861 .len = sizeof(struct il_scan_cmd),
862 .flags = CMD_SIZE_HUGE,
863 };
864 struct il_scan_cmd *scan;
865 u32 rate_flags = 0;
866 u16 cmd_len;
867 u16 rx_chain = 0;
868 enum ieee80211_band band;
869 u8 n_probes = 0;
870 u8 rx_ant = il->hw_params.valid_rx_ant;
871 u8 rate;
872 bool is_active = false;
873 int chan_mod;
874 u8 active_chains;
875 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
876 int ret;
877
878 lockdep_assert_held(&il->mutex);
879
880 if (!il->scan_cmd) {
881 il->scan_cmd =
882 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
883 GFP_KERNEL);
884 if (!il->scan_cmd) {
885 D_SCAN("fail to allocate memory for scan\n");
886 return -ENOMEM;
887 }
888 }
889 scan = il->scan_cmd;
890 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
891
892 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
893 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
894
895 if (il_is_any_associated(il)) {
896 u16 interval;
897 u32 extra;
898 u32 suspend_time = 100;
899 u32 scan_suspend_time = 100;
900
901 D_INFO("Scanning while associated...\n");
902 interval = vif->bss_conf.beacon_int;
903
904 scan->suspend_time = 0;
905 scan->max_out_time = cpu_to_le32(200 * 1024);
906 if (!interval)
907 interval = suspend_time;
908
909 extra = (suspend_time / interval) << 22;
910 scan_suspend_time =
911 (extra | ((suspend_time % interval) * 1024));
912 scan->suspend_time = cpu_to_le32(scan_suspend_time);
913 D_SCAN("suspend_time 0x%X beacon interval %d\n",
914 scan_suspend_time, interval);
915 }
916
917 if (il->scan_request->n_ssids) {
918 int i, p = 0;
919 D_SCAN("Kicking off active scan\n");
920 for (i = 0; i < il->scan_request->n_ssids; i++) {
921
922 if (!il->scan_request->ssids[i].ssid_len)
923 continue;
924 scan->direct_scan[p].id = WLAN_EID_SSID;
925 scan->direct_scan[p].len =
926 il->scan_request->ssids[i].ssid_len;
927 memcpy(scan->direct_scan[p].ssid,
928 il->scan_request->ssids[i].ssid,
929 il->scan_request->ssids[i].ssid_len);
930 n_probes++;
931 p++;
932 }
933 is_active = true;
934 } else
935 D_SCAN("Start passive scan.\n");
936
937 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
938 scan->tx_cmd.sta_id = il->hw_params.bcast_id;
939 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
940
941 switch (il->scan_band) {
942 case IEEE80211_BAND_2GHZ:
943 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
944 chan_mod =
945 le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
946 RXON_FLG_CHANNEL_MODE_POS;
947 if (chan_mod == CHANNEL_MODE_PURE_40) {
948 rate = RATE_6M_PLCP;
949 } else {
950 rate = RATE_1M_PLCP;
951 rate_flags = RATE_MCS_CCK_MSK;
952 }
953 break;
954 case IEEE80211_BAND_5GHZ:
955 rate = RATE_6M_PLCP;
956 break;
957 default:
958 IL_WARN("Invalid scan band\n");
959 return -EIO;
960 }
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979 scan->good_CRC_th =
980 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
981
982 band = il->scan_band;
983
984 if (il->cfg->scan_rx_antennas[band])
985 rx_ant = il->cfg->scan_rx_antennas[band];
986
987 il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
988 rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
989 scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
990
991
992 if (test_bit(S_POWER_PMI, &il->status)) {
993
994 active_chains =
995 rx_ant & ((u8) (il->chain_noise_data.active_chains));
996 if (!active_chains)
997 active_chains = rx_ant;
998
999 D_SCAN("chain_noise_data.active_chains: %u\n",
1000 il->chain_noise_data.active_chains);
1001
1002 rx_ant = il4965_first_antenna(active_chains);
1003 }
1004
1005
1006 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1007 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1008 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1009 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1010 scan->rx_chain = cpu_to_le16(rx_chain);
1011
1012 cmd_len =
1013 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
1014 vif->addr, il->scan_request->ie,
1015 il->scan_request->ie_len,
1016 IL_MAX_SCAN_SIZE - sizeof(*scan));
1017 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1018
1019 scan->filter_flags |=
1020 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
1021
1022 scan->channel_count =
1023 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1024 (void *)&scan->data[cmd_len]);
1025 if (scan->channel_count == 0) {
1026 D_SCAN("channel count %d\n", scan->channel_count);
1027 return -EIO;
1028 }
1029
1030 cmd.len +=
1031 le16_to_cpu(scan->tx_cmd.len) +
1032 scan->channel_count * sizeof(struct il_scan_channel);
1033 cmd.data = scan;
1034 scan->len = cpu_to_le16(cmd.len);
1035
1036 set_bit(S_SCAN_HW, &il->status);
1037
1038 ret = il_send_cmd_sync(il, &cmd);
1039 if (ret)
1040 clear_bit(S_SCAN_HW, &il->status);
1041
1042 return ret;
1043}
1044
1045int
1046il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1047 bool add)
1048{
1049 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1050
1051 if (add)
1052 return il4965_add_bssid_station(il, vif->bss_conf.bssid,
1053 &vif_priv->ibss_bssid_sta_id);
1054 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1055 vif->bss_conf.bssid);
1056}
1057
1058void
1059il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1060{
1061 lockdep_assert_held(&il->sta_lock);
1062
1063 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1064 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1065 else {
1066 D_TX("free more than tfds_in_queue (%u:%d)\n",
1067 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1068 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1069 }
1070}
1071
1072#define IL_TX_QUEUE_MSK 0xfffff
1073
1074static bool
1075il4965_is_single_rx_stream(struct il_priv *il)
1076{
1077 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1078 il->current_ht_config.single_chain_sufficient;
1079}
1080
1081#define IL_NUM_RX_CHAINS_MULTIPLE 3
1082#define IL_NUM_RX_CHAINS_SINGLE 2
1083#define IL_NUM_IDLE_CHAINS_DUAL 2
1084#define IL_NUM_IDLE_CHAINS_SINGLE 1
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static int
1097il4965_get_active_rx_chain_count(struct il_priv *il)
1098{
1099
1100 if (il4965_is_single_rx_stream(il))
1101 return IL_NUM_RX_CHAINS_SINGLE;
1102 else
1103 return IL_NUM_RX_CHAINS_MULTIPLE;
1104}
1105
1106
1107
1108
1109
1110static int
1111il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1112{
1113
1114 switch (il->current_ht_config.smps) {
1115 case IEEE80211_SMPS_STATIC:
1116 case IEEE80211_SMPS_DYNAMIC:
1117 return IL_NUM_IDLE_CHAINS_SINGLE;
1118 case IEEE80211_SMPS_OFF:
1119 return active_cnt;
1120 default:
1121 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1122 return active_cnt;
1123 }
1124}
1125
1126
1127static u8
1128il4965_count_chain_bitmap(u32 chain_bitmap)
1129{
1130 u8 res;
1131 res = (chain_bitmap & BIT(0)) >> 0;
1132 res += (chain_bitmap & BIT(1)) >> 1;
1133 res += (chain_bitmap & BIT(2)) >> 2;
1134 res += (chain_bitmap & BIT(3)) >> 3;
1135 return res;
1136}
1137
1138
1139
1140
1141
1142
1143
1144void
1145il4965_set_rxon_chain(struct il_priv *il)
1146{
1147 bool is_single = il4965_is_single_rx_stream(il);
1148 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1149 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1150 u32 active_chains;
1151 u16 rx_chain;
1152
1153
1154
1155
1156
1157 if (il->chain_noise_data.active_chains)
1158 active_chains = il->chain_noise_data.active_chains;
1159 else
1160 active_chains = il->hw_params.valid_rx_ant;
1161
1162 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1163
1164
1165 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1166 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1167
1168
1169
1170
1171 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1172 if (valid_rx_cnt < active_rx_cnt)
1173 active_rx_cnt = valid_rx_cnt;
1174
1175 if (valid_rx_cnt < idle_rx_cnt)
1176 idle_rx_cnt = valid_rx_cnt;
1177
1178 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1179 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1180
1181 il->staging.rx_chain = cpu_to_le16(rx_chain);
1182
1183 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1184 il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1185 else
1186 il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1187
1188 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
1189 active_rx_cnt, idle_rx_cnt);
1190
1191 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1192 active_rx_cnt < idle_rx_cnt);
1193}
1194
1195static const char *
1196il4965_get_fh_string(int cmd)
1197{
1198 switch (cmd) {
1199 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1200 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1201 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1202 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1203 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1204 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1205 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1206 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1207 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1208 default:
1209 return "UNKNOWN";
1210 }
1211}
1212
1213int
1214il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1215{
1216 int i;
1217#ifdef CONFIG_IWLEGACY_DEBUG
1218 int pos = 0;
1219 size_t bufsz = 0;
1220#endif
1221 static const u32 fh_tbl[] = {
1222 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1223 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1224 FH49_RSCSR_CHNL0_WPTR,
1225 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1226 FH49_MEM_RSSR_SHARED_CTRL_REG,
1227 FH49_MEM_RSSR_RX_STATUS_REG,
1228 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1229 FH49_TSSR_TX_STATUS_REG,
1230 FH49_TSSR_TX_ERROR_REG
1231 };
1232#ifdef CONFIG_IWLEGACY_DEBUG
1233 if (display) {
1234 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1235 *buf = kmalloc(bufsz, GFP_KERNEL);
1236 if (!*buf)
1237 return -ENOMEM;
1238 pos +=
1239 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1240 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1241 pos +=
1242 scnprintf(*buf + pos, bufsz - pos,
1243 " %34s: 0X%08x\n",
1244 il4965_get_fh_string(fh_tbl[i]),
1245 il_rd(il, fh_tbl[i]));
1246 }
1247 return pos;
1248 }
1249#endif
1250 IL_ERR("FH register values:\n");
1251 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1252 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1253 il_rd(il, fh_tbl[i]));
1254 }
1255 return 0;
1256}
1257
1258static void
1259il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1260{
1261 struct il_rx_pkt *pkt = rxb_addr(rxb);
1262 struct il_missed_beacon_notif *missed_beacon;
1263
1264 missed_beacon = &pkt->u.missed_beacon;
1265 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1266 il->missed_beacon_threshold) {
1267 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1268 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1269 le32_to_cpu(missed_beacon->total_missed_becons),
1270 le32_to_cpu(missed_beacon->num_recvd_beacons),
1271 le32_to_cpu(missed_beacon->num_expected_beacons));
1272 if (!test_bit(S_SCANNING, &il->status))
1273 il4965_init_sensitivity(il);
1274 }
1275}
1276
1277
1278
1279
1280static void
1281il4965_rx_calc_noise(struct il_priv *il)
1282{
1283 struct stats_rx_non_phy *rx_info;
1284 int num_active_rx = 0;
1285 int total_silence = 0;
1286 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1287 int last_rx_noise;
1288
1289 rx_info = &(il->_4965.stats.rx.general);
1290 bcn_silence_a =
1291 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1292 bcn_silence_b =
1293 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1294 bcn_silence_c =
1295 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1296
1297 if (bcn_silence_a) {
1298 total_silence += bcn_silence_a;
1299 num_active_rx++;
1300 }
1301 if (bcn_silence_b) {
1302 total_silence += bcn_silence_b;
1303 num_active_rx++;
1304 }
1305 if (bcn_silence_c) {
1306 total_silence += bcn_silence_c;
1307 num_active_rx++;
1308 }
1309
1310
1311 if (num_active_rx)
1312 last_rx_noise = (total_silence / num_active_rx) - 107;
1313 else
1314 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1315
1316 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1317 bcn_silence_b, bcn_silence_c, last_rx_noise);
1318}
1319
1320#ifdef CONFIG_IWLEGACY_DEBUGFS
1321
1322
1323
1324
1325
1326static void
1327il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1328{
1329 int i, size;
1330 __le32 *prev_stats;
1331 u32 *accum_stats;
1332 u32 *delta, *max_delta;
1333 struct stats_general_common *general, *accum_general;
1334 struct stats_tx *tx, *accum_tx;
1335
1336 prev_stats = (__le32 *) &il->_4965.stats;
1337 accum_stats = (u32 *) &il->_4965.accum_stats;
1338 size = sizeof(struct il_notif_stats);
1339 general = &il->_4965.stats.general.common;
1340 accum_general = &il->_4965.accum_stats.general.common;
1341 tx = &il->_4965.stats.tx;
1342 accum_tx = &il->_4965.accum_stats.tx;
1343 delta = (u32 *) &il->_4965.delta_stats;
1344 max_delta = (u32 *) &il->_4965.max_delta;
1345
1346 for (i = sizeof(__le32); i < size;
1347 i +=
1348 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1349 accum_stats++) {
1350 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1351 *delta =
1352 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1353 *accum_stats += *delta;
1354 if (*delta > *max_delta)
1355 *max_delta = *delta;
1356 }
1357 }
1358
1359
1360 accum_general->temperature = general->temperature;
1361 accum_general->ttl_timestamp = general->ttl_timestamp;
1362}
1363#endif
1364
1365static void
1366il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1367{
1368 const int recalib_seconds = 60;
1369 bool change;
1370 struct il_rx_pkt *pkt = rxb_addr(rxb);
1371
1372 D_RX("Statistics notification received (%d vs %d).\n",
1373 (int)sizeof(struct il_notif_stats),
1374 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1375
1376 change =
1377 ((il->_4965.stats.general.common.temperature !=
1378 pkt->u.stats.general.common.temperature) ||
1379 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1380 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1381#ifdef CONFIG_IWLEGACY_DEBUGFS
1382 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1383#endif
1384
1385
1386 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1387
1388 set_bit(S_STATS, &il->status);
1389
1390
1391
1392
1393
1394 mod_timer(&il->stats_periodic,
1395 jiffies + msecs_to_jiffies(recalib_seconds * 1000));
1396
1397 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1398 (pkt->hdr.cmd == N_STATS)) {
1399 il4965_rx_calc_noise(il);
1400 queue_work(il->workqueue, &il->run_time_calib_work);
1401 }
1402
1403 if (change)
1404 il4965_temperature_calib(il);
1405}
1406
1407static void
1408il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1409{
1410 struct il_rx_pkt *pkt = rxb_addr(rxb);
1411
1412 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1413#ifdef CONFIG_IWLEGACY_DEBUGFS
1414 memset(&il->_4965.accum_stats, 0,
1415 sizeof(struct il_notif_stats));
1416 memset(&il->_4965.delta_stats, 0,
1417 sizeof(struct il_notif_stats));
1418 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1419#endif
1420 D_RX("Statistics have been cleared\n");
1421 }
1422 il4965_hdl_stats(il, rxb);
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452static const u8 tid_to_ac[] = {
1453 IEEE80211_AC_BE,
1454 IEEE80211_AC_BK,
1455 IEEE80211_AC_BK,
1456 IEEE80211_AC_BE,
1457 IEEE80211_AC_VI,
1458 IEEE80211_AC_VI,
1459 IEEE80211_AC_VO,
1460 IEEE80211_AC_VO
1461};
1462
1463static inline int
1464il4965_get_ac_from_tid(u16 tid)
1465{
1466 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1467 return tid_to_ac[tid];
1468
1469
1470 return -EINVAL;
1471}
1472
1473static inline int
1474il4965_get_fifo_from_tid(u16 tid)
1475{
1476 const u8 ac_to_fifo[] = {
1477 IL_TX_FIFO_VO,
1478 IL_TX_FIFO_VI,
1479 IL_TX_FIFO_BE,
1480 IL_TX_FIFO_BK,
1481 };
1482
1483 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1484 return ac_to_fifo[tid_to_ac[tid]];
1485
1486
1487 return -EINVAL;
1488}
1489
1490
1491
1492
1493static void
1494il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1495 struct il_tx_cmd *tx_cmd,
1496 struct ieee80211_tx_info *info,
1497 struct ieee80211_hdr *hdr, u8 std_id)
1498{
1499 __le16 fc = hdr->frame_control;
1500 __le32 tx_flags = tx_cmd->tx_flags;
1501
1502 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1503 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1504 tx_flags |= TX_CMD_FLG_ACK_MSK;
1505 if (ieee80211_is_mgmt(fc))
1506 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1507 if (ieee80211_is_probe_resp(fc) &&
1508 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1509 tx_flags |= TX_CMD_FLG_TSF_MSK;
1510 } else {
1511 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1512 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1513 }
1514
1515 if (ieee80211_is_back_req(fc))
1516 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1517
1518 tx_cmd->sta_id = std_id;
1519 if (ieee80211_has_morefrags(fc))
1520 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1521
1522 if (ieee80211_is_data_qos(fc)) {
1523 u8 *qc = ieee80211_get_qos_ctl(hdr);
1524 tx_cmd->tid_tspec = qc[0] & 0xf;
1525 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1526 } else {
1527 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1528 }
1529
1530 il_tx_cmd_protection(il, info, fc, &tx_flags);
1531
1532 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1533 if (ieee80211_is_mgmt(fc)) {
1534 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1535 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1536 else
1537 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1538 } else {
1539 tx_cmd->timeout.pm_frame_timeout = 0;
1540 }
1541
1542 tx_cmd->driver_txop = 0;
1543 tx_cmd->tx_flags = tx_flags;
1544 tx_cmd->next_frame_len = 0;
1545}
1546
1547static void
1548il4965_tx_cmd_build_rate(struct il_priv *il,
1549 struct il_tx_cmd *tx_cmd,
1550 struct ieee80211_tx_info *info,
1551 struct ieee80211_sta *sta,
1552 __le16 fc)
1553{
1554 const u8 rts_retry_limit = 60;
1555 u32 rate_flags;
1556 int rate_idx;
1557 u8 data_retry_limit;
1558 u8 rate_plcp;
1559
1560
1561 if (ieee80211_is_probe_resp(fc))
1562 data_retry_limit = 3;
1563 else
1564 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1565 tx_cmd->data_retry_limit = data_retry_limit;
1566
1567 tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
1568
1569
1570
1571 if (ieee80211_is_data(fc)) {
1572 tx_cmd->initial_rate_idx = 0;
1573 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1574 return;
1575 }
1576
1577
1578
1579
1580
1581
1582
1583 rate_idx = info->control.rates[0].idx;
1584 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1585 || rate_idx > RATE_COUNT_LEGACY)
1586 rate_idx = rate_lowest_index(&il->bands[info->band], sta);
1587
1588 if (info->band == IEEE80211_BAND_5GHZ)
1589 rate_idx += IL_FIRST_OFDM_RATE;
1590
1591 rate_plcp = il_rates[rate_idx].plcp;
1592
1593 rate_flags = 0;
1594
1595
1596 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1597 rate_flags |= RATE_MCS_CCK_MSK;
1598
1599
1600 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
1601 rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
1602
1603
1604 tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
1605}
1606
1607static void
1608il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1609 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1610 int sta_id)
1611{
1612 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1613
1614 switch (keyconf->cipher) {
1615 case WLAN_CIPHER_SUITE_CCMP:
1616 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1617 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1618 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1619 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1620 D_TX("tx_cmd with AES hwcrypto\n");
1621 break;
1622
1623 case WLAN_CIPHER_SUITE_TKIP:
1624 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1625 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1626 D_TX("tx_cmd with tkip hwcrypto\n");
1627 break;
1628
1629 case WLAN_CIPHER_SUITE_WEP104:
1630 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1631
1632 case WLAN_CIPHER_SUITE_WEP40:
1633 tx_cmd->sec_ctl |=
1634 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1635 TX_CMD_SEC_SHIFT);
1636
1637 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1638
1639 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1640 keyconf->keyidx);
1641 break;
1642
1643 default:
1644 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1645 break;
1646 }
1647}
1648
1649
1650
1651
1652int
1653il4965_tx_skb(struct il_priv *il,
1654 struct ieee80211_sta *sta,
1655 struct sk_buff *skb)
1656{
1657 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1658 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1659 struct il_station_priv *sta_priv = NULL;
1660 struct il_tx_queue *txq;
1661 struct il_queue *q;
1662 struct il_device_cmd *out_cmd;
1663 struct il_cmd_meta *out_meta;
1664 struct il_tx_cmd *tx_cmd;
1665 int txq_id;
1666 dma_addr_t phys_addr;
1667 dma_addr_t txcmd_phys;
1668 dma_addr_t scratch_phys;
1669 u16 len, firstlen, secondlen;
1670 u16 seq_number = 0;
1671 __le16 fc;
1672 u8 hdr_len;
1673 u8 sta_id;
1674 u8 wait_write_ptr = 0;
1675 u8 tid = 0;
1676 u8 *qc = NULL;
1677 unsigned long flags;
1678 bool is_agg = false;
1679
1680 spin_lock_irqsave(&il->lock, flags);
1681 if (il_is_rfkill(il)) {
1682 D_DROP("Dropping - RF KILL\n");
1683 goto drop_unlock;
1684 }
1685
1686 fc = hdr->frame_control;
1687
1688#ifdef CONFIG_IWLEGACY_DEBUG
1689 if (ieee80211_is_auth(fc))
1690 D_TX("Sending AUTH frame\n");
1691 else if (ieee80211_is_assoc_req(fc))
1692 D_TX("Sending ASSOC frame\n");
1693 else if (ieee80211_is_reassoc_req(fc))
1694 D_TX("Sending REASSOC frame\n");
1695#endif
1696
1697 hdr_len = ieee80211_hdrlen(fc);
1698
1699
1700 if (!ieee80211_is_data(fc))
1701 sta_id = il->hw_params.bcast_id;
1702 else {
1703
1704 sta_id = il_sta_id_or_broadcast(il, sta);
1705
1706 if (sta_id == IL_INVALID_STATION) {
1707 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1708 goto drop_unlock;
1709 }
1710 }
1711
1712 D_TX("station Id %d\n", sta_id);
1713
1714 if (sta)
1715 sta_priv = (void *)sta->drv_priv;
1716
1717 if (sta_priv && sta_priv->asleep &&
1718 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1729 }
1730
1731
1732 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1733
1734
1735 txq_id = skb_get_queue_mapping(skb);
1736
1737
1738 spin_lock(&il->sta_lock);
1739
1740 if (ieee80211_is_data_qos(fc)) {
1741 qc = ieee80211_get_qos_ctl(hdr);
1742 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1743 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1744 spin_unlock(&il->sta_lock);
1745 goto drop_unlock;
1746 }
1747 seq_number = il->stations[sta_id].tid[tid].seq_number;
1748 seq_number &= IEEE80211_SCTL_SEQ;
1749 hdr->seq_ctrl =
1750 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1751 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1752 seq_number += 0x10;
1753
1754 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1755 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1756 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1757 is_agg = true;
1758 }
1759 }
1760
1761 txq = &il->txq[txq_id];
1762 q = &txq->q;
1763
1764 if (unlikely(il_queue_space(q) < q->high_mark)) {
1765 spin_unlock(&il->sta_lock);
1766 goto drop_unlock;
1767 }
1768
1769 if (ieee80211_is_data_qos(fc)) {
1770 il->stations[sta_id].tid[tid].tfds_in_queue++;
1771 if (!ieee80211_has_morefrags(fc))
1772 il->stations[sta_id].tid[tid].seq_number = seq_number;
1773 }
1774
1775 spin_unlock(&il->sta_lock);
1776
1777 txq->skbs[q->write_ptr] = skb;
1778
1779
1780 out_cmd = txq->cmd[q->write_ptr];
1781 out_meta = &txq->meta[q->write_ptr];
1782 tx_cmd = &out_cmd->cmd.tx;
1783 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1784 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1785
1786
1787
1788
1789
1790
1791
1792 out_cmd->hdr.cmd = C_TX;
1793 out_cmd->hdr.sequence =
1794 cpu_to_le16((u16)
1795 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1796
1797
1798 memcpy(tx_cmd->hdr, hdr, hdr_len);
1799
1800
1801 tx_cmd->len = cpu_to_le16((u16) skb->len);
1802
1803 if (info->control.hw_key)
1804 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1805
1806
1807 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1808
1809 il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1821 firstlen = (len + 3) & ~3;
1822
1823
1824 if (firstlen != len)
1825 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1826
1827
1828
1829 txcmd_phys =
1830 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1831 PCI_DMA_BIDIRECTIONAL);
1832 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
1833 goto drop_unlock;
1834
1835
1836
1837 secondlen = skb->len - hdr_len;
1838 if (secondlen > 0) {
1839 phys_addr =
1840 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1841 PCI_DMA_TODEVICE);
1842 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
1843 goto drop_unlock;
1844 }
1845
1846
1847
1848 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1849 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1850 dma_unmap_len_set(out_meta, len, firstlen);
1851 if (secondlen)
1852 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
1853 0, 0);
1854
1855 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1856 txq->need_update = 1;
1857 } else {
1858 wait_write_ptr = 1;
1859 txq->need_update = 0;
1860 }
1861
1862 scratch_phys =
1863 txcmd_phys + sizeof(struct il_cmd_header) +
1864 offsetof(struct il_tx_cmd, scratch);
1865
1866
1867 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1868 PCI_DMA_BIDIRECTIONAL);
1869 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1870 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1871
1872 il_update_stats(il, true, fc, skb->len);
1873
1874 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1875 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1876 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1877 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1878
1879
1880 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1881 il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
1882
1883 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1884 PCI_DMA_BIDIRECTIONAL);
1885
1886
1887 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1888 il_txq_update_write_ptr(il, txq);
1889 spin_unlock_irqrestore(&il->lock, flags);
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 if (sta_priv && sta_priv->client && !is_agg)
1906 atomic_inc(&sta_priv->pending_frames);
1907
1908 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1909 if (wait_write_ptr) {
1910 spin_lock_irqsave(&il->lock, flags);
1911 txq->need_update = 1;
1912 il_txq_update_write_ptr(il, txq);
1913 spin_unlock_irqrestore(&il->lock, flags);
1914 } else {
1915 il_stop_queue(il, txq);
1916 }
1917 }
1918
1919 return 0;
1920
1921drop_unlock:
1922 spin_unlock_irqrestore(&il->lock, flags);
1923 return -1;
1924}
1925
1926static inline int
1927il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1928{
1929 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1930 GFP_KERNEL);
1931 if (!ptr->addr)
1932 return -ENOMEM;
1933 ptr->size = size;
1934 return 0;
1935}
1936
1937static inline void
1938il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1939{
1940 if (unlikely(!ptr->addr))
1941 return;
1942
1943 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1944 memset(ptr, 0, sizeof(*ptr));
1945}
1946
1947
1948
1949
1950
1951
1952void
1953il4965_hw_txq_ctx_free(struct il_priv *il)
1954{
1955 int txq_id;
1956
1957
1958 if (il->txq) {
1959 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1960 if (txq_id == il->cmd_queue)
1961 il_cmd_queue_free(il);
1962 else
1963 il_tx_queue_free(il, txq_id);
1964 }
1965 il4965_free_dma_ptr(il, &il->kw);
1966
1967 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1968
1969
1970 il_free_txq_mem(il);
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980int
1981il4965_txq_ctx_alloc(struct il_priv *il)
1982{
1983 int ret, txq_id;
1984 unsigned long flags;
1985
1986
1987 il4965_hw_txq_ctx_free(il);
1988
1989 ret =
1990 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1991 il->hw_params.scd_bc_tbls_size);
1992 if (ret) {
1993 IL_ERR("Scheduler BC Table allocation failed\n");
1994 goto error_bc_tbls;
1995 }
1996
1997 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
1998 if (ret) {
1999 IL_ERR("Keep Warm allocation failed\n");
2000 goto error_kw;
2001 }
2002
2003
2004 ret = il_alloc_txq_mem(il);
2005 if (ret)
2006 goto error;
2007
2008 spin_lock_irqsave(&il->lock, flags);
2009
2010
2011 il4965_txq_set_sched(il, 0);
2012
2013
2014 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2015
2016 spin_unlock_irqrestore(&il->lock, flags);
2017
2018
2019 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2020 ret = il_tx_queue_init(il, txq_id);
2021 if (ret) {
2022 IL_ERR("Tx %d queue init failed\n", txq_id);
2023 goto error;
2024 }
2025 }
2026
2027 return ret;
2028
2029error:
2030 il4965_hw_txq_ctx_free(il);
2031 il4965_free_dma_ptr(il, &il->kw);
2032error_kw:
2033 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2034error_bc_tbls:
2035 return ret;
2036}
2037
2038void
2039il4965_txq_ctx_reset(struct il_priv *il)
2040{
2041 int txq_id;
2042 unsigned long flags;
2043
2044 spin_lock_irqsave(&il->lock, flags);
2045
2046
2047 il4965_txq_set_sched(il, 0);
2048
2049 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2050
2051 spin_unlock_irqrestore(&il->lock, flags);
2052
2053
2054 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2055 il_tx_queue_reset(il, txq_id);
2056}
2057
2058static void
2059il4965_txq_ctx_unmap(struct il_priv *il)
2060{
2061 int txq_id;
2062
2063 if (!il->txq)
2064 return;
2065
2066
2067 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2068 if (txq_id == il->cmd_queue)
2069 il_cmd_queue_unmap(il);
2070 else
2071 il_tx_queue_unmap(il, txq_id);
2072}
2073
2074
2075
2076
2077void
2078il4965_txq_ctx_stop(struct il_priv *il)
2079{
2080 int ch, ret;
2081
2082 _il_wr_prph(il, IL49_SCD_TXFACT, 0);
2083
2084
2085 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2086 _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2087 ret =
2088 _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
2089 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2090 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2091 1000);
2092 if (ret < 0)
2093 IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
2094 ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
2095 }
2096}
2097
2098
2099
2100
2101
2102
2103
2104static int
2105il4965_txq_ctx_activate_free(struct il_priv *il)
2106{
2107 int txq_id;
2108
2109 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2110 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2111 return txq_id;
2112 return -1;
2113}
2114
2115
2116
2117
2118static void
2119il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2120{
2121
2122
2123 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2124 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2125 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2126}
2127
2128
2129
2130
2131static int
2132il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2133{
2134 u32 tbl_dw_addr;
2135 u32 tbl_dw;
2136 u16 scd_q2ratid;
2137
2138 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2139
2140 tbl_dw_addr =
2141 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2142
2143 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2144
2145 if (txq_id & 0x1)
2146 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2147 else
2148 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2149
2150 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2151
2152 return 0;
2153}
2154
2155
2156
2157
2158
2159
2160
2161static int
2162il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2163 int tid, u16 ssn_idx)
2164{
2165 unsigned long flags;
2166 u16 ra_tid;
2167 int ret;
2168
2169 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2170 (IL49_FIRST_AMPDU_QUEUE +
2171 il->cfg->num_of_ampdu_queues <= txq_id)) {
2172 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2173 txq_id, IL49_FIRST_AMPDU_QUEUE,
2174 IL49_FIRST_AMPDU_QUEUE +
2175 il->cfg->num_of_ampdu_queues - 1);
2176 return -EINVAL;
2177 }
2178
2179 ra_tid = BUILD_RAxTID(sta_id, tid);
2180
2181
2182 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2183 if (ret)
2184 return ret;
2185
2186 spin_lock_irqsave(&il->lock, flags);
2187
2188
2189 il4965_tx_queue_stop_scheduler(il, txq_id);
2190
2191
2192 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2193
2194
2195 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2196
2197
2198
2199 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2200 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2201 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2202
2203
2204 il_write_targ_mem(il,
2205 il->scd_base_addr +
2206 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2207 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2208 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2209
2210 il_write_targ_mem(il,
2211 il->scd_base_addr +
2212 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2213 (SCD_FRAME_LIMIT <<
2214 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2215 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2216
2217 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2218
2219
2220 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2221
2222 spin_unlock_irqrestore(&il->lock, flags);
2223
2224 return 0;
2225}
2226
2227int
2228il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2229 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2230{
2231 int sta_id;
2232 int tx_fifo;
2233 int txq_id;
2234 int ret;
2235 unsigned long flags;
2236 struct il_tid_data *tid_data;
2237
2238
2239 tx_fifo = il4965_get_fifo_from_tid(tid);
2240 if (unlikely(tx_fifo < 0))
2241 return tx_fifo;
2242
2243 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2244
2245 sta_id = il_sta_id(sta);
2246 if (sta_id == IL_INVALID_STATION) {
2247 IL_ERR("Start AGG on invalid station\n");
2248 return -ENXIO;
2249 }
2250 if (unlikely(tid >= MAX_TID_COUNT))
2251 return -EINVAL;
2252
2253 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2254 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2255 return -ENXIO;
2256 }
2257
2258 txq_id = il4965_txq_ctx_activate_free(il);
2259 if (txq_id == -1) {
2260 IL_ERR("No free aggregation queue available\n");
2261 return -ENXIO;
2262 }
2263
2264 spin_lock_irqsave(&il->sta_lock, flags);
2265 tid_data = &il->stations[sta_id].tid[tid];
2266 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2267 tid_data->agg.txq_id = txq_id;
2268 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2269 spin_unlock_irqrestore(&il->sta_lock, flags);
2270
2271 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2272 if (ret)
2273 return ret;
2274
2275 spin_lock_irqsave(&il->sta_lock, flags);
2276 tid_data = &il->stations[sta_id].tid[tid];
2277 if (tid_data->tfds_in_queue == 0) {
2278 D_HT("HW queue is empty\n");
2279 tid_data->agg.state = IL_AGG_ON;
2280 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2281 } else {
2282 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2283 tid_data->tfds_in_queue);
2284 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2285 }
2286 spin_unlock_irqrestore(&il->sta_lock, flags);
2287 return ret;
2288}
2289
2290
2291
2292
2293
2294static int
2295il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2296{
2297 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2298 (IL49_FIRST_AMPDU_QUEUE +
2299 il->cfg->num_of_ampdu_queues <= txq_id)) {
2300 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2301 txq_id, IL49_FIRST_AMPDU_QUEUE,
2302 IL49_FIRST_AMPDU_QUEUE +
2303 il->cfg->num_of_ampdu_queues - 1);
2304 return -EINVAL;
2305 }
2306
2307 il4965_tx_queue_stop_scheduler(il, txq_id);
2308
2309 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2310
2311 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2312 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2313
2314 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2315
2316 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2317 il_txq_ctx_deactivate(il, txq_id);
2318 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2319
2320 return 0;
2321}
2322
2323int
2324il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2325 struct ieee80211_sta *sta, u16 tid)
2326{
2327 int tx_fifo_id, txq_id, sta_id, ssn;
2328 struct il_tid_data *tid_data;
2329 int write_ptr, read_ptr;
2330 unsigned long flags;
2331
2332
2333 tx_fifo_id = il4965_get_fifo_from_tid(tid);
2334 if (unlikely(tx_fifo_id < 0))
2335 return tx_fifo_id;
2336
2337 sta_id = il_sta_id(sta);
2338
2339 if (sta_id == IL_INVALID_STATION) {
2340 IL_ERR("Invalid station for AGG tid %d\n", tid);
2341 return -ENXIO;
2342 }
2343
2344 spin_lock_irqsave(&il->sta_lock, flags);
2345
2346 tid_data = &il->stations[sta_id].tid[tid];
2347 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2348 txq_id = tid_data->agg.txq_id;
2349
2350 switch (il->stations[sta_id].tid[tid].agg.state) {
2351 case IL_EMPTYING_HW_QUEUE_ADDBA:
2352
2353
2354
2355
2356
2357
2358 D_HT("AGG stop before setup done\n");
2359 goto turn_off;
2360 case IL_AGG_ON:
2361 break;
2362 default:
2363 IL_WARN("Stopping AGG while state not ON or starting\n");
2364 }
2365
2366 write_ptr = il->txq[txq_id].q.write_ptr;
2367 read_ptr = il->txq[txq_id].q.read_ptr;
2368
2369
2370 if (write_ptr != read_ptr) {
2371 D_HT("Stopping a non empty AGG HW QUEUE\n");
2372 il->stations[sta_id].tid[tid].agg.state =
2373 IL_EMPTYING_HW_QUEUE_DELBA;
2374 spin_unlock_irqrestore(&il->sta_lock, flags);
2375 return 0;
2376 }
2377
2378 D_HT("HW queue is empty\n");
2379turn_off:
2380 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2381
2382
2383 spin_unlock(&il->sta_lock);
2384 spin_lock(&il->lock);
2385
2386
2387
2388
2389
2390
2391
2392
2393 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2394 spin_unlock_irqrestore(&il->lock, flags);
2395
2396 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2397
2398 return 0;
2399}
2400
2401int
2402il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2403{
2404 struct il_queue *q = &il->txq[txq_id].q;
2405 u8 *addr = il->stations[sta_id].sta.sta.addr;
2406 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2407
2408 lockdep_assert_held(&il->sta_lock);
2409
2410 switch (il->stations[sta_id].tid[tid].agg.state) {
2411 case IL_EMPTYING_HW_QUEUE_DELBA:
2412
2413
2414 if (txq_id == tid_data->agg.txq_id &&
2415 q->read_ptr == q->write_ptr) {
2416 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2417 int tx_fifo = il4965_get_fifo_from_tid(tid);
2418 D_HT("HW queue empty: continue DELBA flow\n");
2419 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2420 tid_data->agg.state = IL_AGG_OFF;
2421 ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
2422 }
2423 break;
2424 case IL_EMPTYING_HW_QUEUE_ADDBA:
2425
2426 if (tid_data->tfds_in_queue == 0) {
2427 D_HT("HW queue empty: continue ADDBA flow\n");
2428 tid_data->agg.state = IL_AGG_ON;
2429 ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
2430 }
2431 break;
2432 }
2433
2434 return 0;
2435}
2436
2437static void
2438il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
2439{
2440 struct ieee80211_sta *sta;
2441 struct il_station_priv *sta_priv;
2442
2443 rcu_read_lock();
2444 sta = ieee80211_find_sta(il->vif, addr1);
2445 if (sta) {
2446 sta_priv = (void *)sta->drv_priv;
2447
2448 if (sta_priv->client &&
2449 atomic_dec_return(&sta_priv->pending_frames) == 0)
2450 ieee80211_sta_block_awake(il->hw, sta, false);
2451 }
2452 rcu_read_unlock();
2453}
2454
2455static void
2456il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
2457{
2458 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2459
2460 if (!is_agg)
2461 il4965_non_agg_tx_status(il, hdr->addr1);
2462
2463 ieee80211_tx_status_irqsafe(il->hw, skb);
2464}
2465
2466int
2467il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2468{
2469 struct il_tx_queue *txq = &il->txq[txq_id];
2470 struct il_queue *q = &txq->q;
2471 int nfreed = 0;
2472 struct ieee80211_hdr *hdr;
2473 struct sk_buff *skb;
2474
2475 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2476 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2477 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2478 q->write_ptr, q->read_ptr);
2479 return 0;
2480 }
2481
2482 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2483 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2484
2485 skb = txq->skbs[txq->q.read_ptr];
2486
2487 if (WARN_ON_ONCE(skb == NULL))
2488 continue;
2489
2490 hdr = (struct ieee80211_hdr *) skb->data;
2491 if (ieee80211_is_data_qos(hdr->frame_control))
2492 nfreed++;
2493
2494 il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2495
2496 txq->skbs[txq->q.read_ptr] = NULL;
2497 il->ops->txq_free_tfd(il, txq);
2498 }
2499 return nfreed;
2500}
2501
2502
2503
2504
2505
2506
2507
2508static int
2509il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2510 struct il_compressed_ba_resp *ba_resp)
2511{
2512 int i, sh, ack;
2513 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2514 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2515 int successes = 0;
2516 struct ieee80211_tx_info *info;
2517 u64 bitmap, sent_bitmap;
2518
2519 if (unlikely(!agg->wait_for_ba)) {
2520 if (unlikely(ba_resp->bitmap))
2521 IL_ERR("Received BA when not expected\n");
2522 return -EINVAL;
2523 }
2524
2525
2526 agg->wait_for_ba = 0;
2527 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2528
2529
2530 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2531 if (sh < 0)
2532 sh += 0x100;
2533
2534 if (agg->frame_count > (64 - sh)) {
2535 D_TX_REPLY("more frames than bitmap size");
2536 return -1;
2537 }
2538
2539
2540 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2541
2542
2543
2544 sent_bitmap = bitmap & agg->bitmap;
2545
2546
2547
2548 i = 0;
2549 while (sent_bitmap) {
2550 ack = sent_bitmap & 1ULL;
2551 successes += ack;
2552 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2553 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2554 sent_bitmap >>= 1;
2555 ++i;
2556 }
2557
2558 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2559
2560 info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
2561 memset(&info->status, 0, sizeof(info->status));
2562 info->flags |= IEEE80211_TX_STAT_ACK;
2563 info->flags |= IEEE80211_TX_STAT_AMPDU;
2564 info->status.ampdu_ack_len = successes;
2565 info->status.ampdu_len = agg->frame_count;
2566 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2567
2568 return 0;
2569}
2570
2571static inline bool
2572il4965_is_tx_success(u32 status)
2573{
2574 status &= TX_STATUS_MSK;
2575 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
2576}
2577
2578static u8
2579il4965_find_station(struct il_priv *il, const u8 *addr)
2580{
2581 int i;
2582 int start = 0;
2583 int ret = IL_INVALID_STATION;
2584 unsigned long flags;
2585
2586 if (il->iw_mode == NL80211_IFTYPE_ADHOC)
2587 start = IL_STA_ID;
2588
2589 if (is_broadcast_ether_addr(addr))
2590 return il->hw_params.bcast_id;
2591
2592 spin_lock_irqsave(&il->sta_lock, flags);
2593 for (i = start; i < il->hw_params.max_stations; i++)
2594 if (il->stations[i].used &&
2595 ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
2596 ret = i;
2597 goto out;
2598 }
2599
2600 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
2601
2602out:
2603
2604
2605
2606
2607
2608 if (ret != IL_INVALID_STATION &&
2609 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
2610 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
2611 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
2612 IL_ERR("Requested station info for sta %d before ready.\n",
2613 ret);
2614 ret = IL_INVALID_STATION;
2615 }
2616 spin_unlock_irqrestore(&il->sta_lock, flags);
2617 return ret;
2618}
2619
2620static int
2621il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2622{
2623 if (il->iw_mode == NL80211_IFTYPE_STATION)
2624 return IL_AP_ID;
2625 else {
2626 u8 *da = ieee80211_get_DA(hdr);
2627
2628 return il4965_find_station(il, da);
2629 }
2630}
2631
2632static inline u32
2633il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2634{
2635 return le32_to_cpup(&tx_resp->u.status +
2636 tx_resp->frame_count) & IEEE80211_MAX_SN;
2637}
2638
2639static inline u32
2640il4965_tx_status_to_mac80211(u32 status)
2641{
2642 status &= TX_STATUS_MSK;
2643
2644 switch (status) {
2645 case TX_STATUS_SUCCESS:
2646 case TX_STATUS_DIRECT_DONE:
2647 return IEEE80211_TX_STAT_ACK;
2648 case TX_STATUS_FAIL_DEST_PS:
2649 return IEEE80211_TX_STAT_TX_FILTERED;
2650 default:
2651 return 0;
2652 }
2653}
2654
2655
2656
2657
2658static int
2659il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2660 struct il4965_tx_resp *tx_resp, int txq_id,
2661 u16 start_idx)
2662{
2663 u16 status;
2664 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
2665 struct ieee80211_tx_info *info = NULL;
2666 struct ieee80211_hdr *hdr = NULL;
2667 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2668 int i, sh, idx;
2669 u16 seq;
2670 if (agg->wait_for_ba)
2671 D_TX_REPLY("got tx response w/o block-ack\n");
2672
2673 agg->frame_count = tx_resp->frame_count;
2674 agg->start_idx = start_idx;
2675 agg->rate_n_flags = rate_n_flags;
2676 agg->bitmap = 0;
2677
2678
2679 if (agg->frame_count == 1) {
2680
2681 status = le16_to_cpu(frame_status[0].status);
2682 idx = start_idx;
2683
2684 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2685 agg->frame_count, agg->start_idx, idx);
2686
2687 info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
2688 info->status.rates[0].count = tx_resp->failure_frame + 1;
2689 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2690 info->flags |= il4965_tx_status_to_mac80211(status);
2691 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
2692
2693 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
2694 tx_resp->failure_frame);
2695 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
2696
2697 agg->wait_for_ba = 0;
2698 } else {
2699
2700 u64 bitmap = 0;
2701 int start = agg->start_idx;
2702 struct sk_buff *skb;
2703
2704
2705 for (i = 0; i < agg->frame_count; i++) {
2706 u16 sc;
2707 status = le16_to_cpu(frame_status[i].status);
2708 seq = le16_to_cpu(frame_status[i].sequence);
2709 idx = SEQ_TO_IDX(seq);
2710 txq_id = SEQ_TO_QUEUE(seq);
2711
2712 if (status &
2713 (AGG_TX_STATE_FEW_BYTES_MSK |
2714 AGG_TX_STATE_ABORT_MSK))
2715 continue;
2716
2717 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2718 agg->frame_count, txq_id, idx);
2719
2720 skb = il->txq[txq_id].skbs[idx];
2721 if (WARN_ON_ONCE(skb == NULL))
2722 return -1;
2723 hdr = (struct ieee80211_hdr *) skb->data;
2724
2725 sc = le16_to_cpu(hdr->seq_ctrl);
2726 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2727 IL_ERR("BUG_ON idx doesn't match seq control"
2728 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2729 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2730 return -1;
2731 }
2732
2733 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2734 IEEE80211_SEQ_TO_SN(sc));
2735
2736 sh = idx - start;
2737 if (sh > 64) {
2738 sh = (start - idx) + 0xff;
2739 bitmap = bitmap << sh;
2740 sh = 0;
2741 start = idx;
2742 } else if (sh < -64)
2743 sh = 0xff - (start - idx);
2744 else if (sh < 0) {
2745 sh = start - idx;
2746 start = idx;
2747 bitmap = bitmap << sh;
2748 sh = 0;
2749 }
2750 bitmap |= 1ULL << sh;
2751 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
2752 (unsigned long long)bitmap);
2753 }
2754
2755 agg->bitmap = bitmap;
2756 agg->start_idx = start;
2757 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2758 agg->frame_count, agg->start_idx,
2759 (unsigned long long)agg->bitmap);
2760
2761 if (bitmap)
2762 agg->wait_for_ba = 1;
2763 }
2764 return 0;
2765}
2766
2767
2768
2769
2770static void
2771il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2772{
2773 struct il_rx_pkt *pkt = rxb_addr(rxb);
2774 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2775 int txq_id = SEQ_TO_QUEUE(sequence);
2776 int idx = SEQ_TO_IDX(sequence);
2777 struct il_tx_queue *txq = &il->txq[txq_id];
2778 struct sk_buff *skb;
2779 struct ieee80211_hdr *hdr;
2780 struct ieee80211_tx_info *info;
2781 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2782 u32 status = le32_to_cpu(tx_resp->u.status);
2783 int uninitialized_var(tid);
2784 int sta_id;
2785 int freed;
2786 u8 *qc = NULL;
2787 unsigned long flags;
2788
2789 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2790 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2791 "is out of range [0-%d] %d %d\n", txq_id, idx,
2792 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2793 return;
2794 }
2795
2796 txq->time_stamp = jiffies;
2797
2798 skb = txq->skbs[txq->q.read_ptr];
2799 info = IEEE80211_SKB_CB(skb);
2800 memset(&info->status, 0, sizeof(info->status));
2801
2802 hdr = (struct ieee80211_hdr *) skb->data;
2803 if (ieee80211_is_data_qos(hdr->frame_control)) {
2804 qc = ieee80211_get_qos_ctl(hdr);
2805 tid = qc[0] & 0xf;
2806 }
2807
2808 sta_id = il4965_get_ra_sta_id(il, hdr);
2809 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2810 IL_ERR("Station not known\n");
2811 return;
2812 }
2813
2814
2815
2816
2817
2818
2819
2820
2821 if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
2822 il->iw_mode == NL80211_IFTYPE_STATION) {
2823 il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
2824 D_INFO("Stopped queues - RX waiting on passive channel\n");
2825 }
2826
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 if (txq->sched_retry) {
2829 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2830 struct il_ht_agg *agg = NULL;
2831 WARN_ON(!qc);
2832
2833 agg = &il->stations[sta_id].tid[tid].agg;
2834
2835 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2836
2837
2838 if (tx_resp->frame_count == 1 &&
2839 !il4965_is_tx_success(status))
2840 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2841
2842 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2843 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2844 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2845 "%d idx %d\n", scd_ssn, idx);
2846 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2847 if (qc)
2848 il4965_free_tfds_in_queue(il, sta_id, tid,
2849 freed);
2850
2851 if (il->mac80211_registered &&
2852 il_queue_space(&txq->q) > txq->q.low_mark &&
2853 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2854 il_wake_queue(il, txq);
2855 }
2856 } else {
2857 info->status.rates[0].count = tx_resp->failure_frame + 1;
2858 info->flags |= il4965_tx_status_to_mac80211(status);
2859 il4965_hwrate_to_tx_control(il,
2860 le32_to_cpu(tx_resp->rate_n_flags),
2861 info);
2862
2863 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2864 "rate_n_flags 0x%x retries %d\n", txq_id,
2865 il4965_get_tx_fail_reason(status), status,
2866 le32_to_cpu(tx_resp->rate_n_flags),
2867 tx_resp->failure_frame);
2868
2869 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2870 if (qc && likely(sta_id != IL_INVALID_STATION))
2871 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2872 else if (sta_id == IL_INVALID_STATION)
2873 D_TX_REPLY("Station not known\n");
2874
2875 if (il->mac80211_registered &&
2876 il_queue_space(&txq->q) > txq->q.low_mark)
2877 il_wake_queue(il, txq);
2878 }
2879 if (qc && likely(sta_id != IL_INVALID_STATION))
2880 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2881
2882 il4965_check_abort_status(il, tx_resp->frame_count, status);
2883
2884 spin_unlock_irqrestore(&il->sta_lock, flags);
2885}
2886
2887
2888
2889
2890void
2891il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2892 struct ieee80211_tx_info *info)
2893{
2894 struct ieee80211_tx_rate *r = &info->status.rates[0];
2895
2896 info->status.antenna =
2897 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2898 if (rate_n_flags & RATE_MCS_HT_MSK)
2899 r->flags |= IEEE80211_TX_RC_MCS;
2900 if (rate_n_flags & RATE_MCS_GF_MSK)
2901 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2902 if (rate_n_flags & RATE_MCS_HT40_MSK)
2903 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2904 if (rate_n_flags & RATE_MCS_DUP_MSK)
2905 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2906 if (rate_n_flags & RATE_MCS_SGI_MSK)
2907 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2908 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2909}
2910
2911
2912
2913
2914
2915
2916
2917static void
2918il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2919{
2920 struct il_rx_pkt *pkt = rxb_addr(rxb);
2921 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2922 struct il_tx_queue *txq = NULL;
2923 struct il_ht_agg *agg;
2924 int idx;
2925 int sta_id;
2926 int tid;
2927 unsigned long flags;
2928
2929
2930 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2931
2932
2933
2934 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2935
2936 if (scd_flow >= il->hw_params.max_txq_num) {
2937 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2938 return;
2939 }
2940
2941 txq = &il->txq[scd_flow];
2942 sta_id = ba_resp->sta_id;
2943 tid = ba_resp->tid;
2944 agg = &il->stations[sta_id].tid[tid].agg;
2945 if (unlikely(agg->txq_id != scd_flow)) {
2946
2947
2948
2949
2950
2951
2952 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2953 scd_flow, agg->txq_id);
2954 return;
2955 }
2956
2957
2958 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2959
2960 spin_lock_irqsave(&il->sta_lock, flags);
2961
2962 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2963 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2964 ba_resp->sta_id);
2965 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2966 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2967 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2968 ba_resp->scd_flow, ba_resp->scd_ssn);
2969 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2970 (unsigned long long)agg->bitmap);
2971
2972
2973 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2974
2975
2976
2977
2978 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2979
2980 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2981 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2982
2983 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2984 il->mac80211_registered &&
2985 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2986 il_wake_queue(il, txq);
2987
2988 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2989 }
2990
2991 spin_unlock_irqrestore(&il->sta_lock, flags);
2992}
2993
2994#ifdef CONFIG_IWLEGACY_DEBUG
2995const char *
2996il4965_get_tx_fail_reason(u32 status)
2997{
2998#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2999#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
3000
3001 switch (status & TX_STATUS_MSK) {
3002 case TX_STATUS_SUCCESS:
3003 return "SUCCESS";
3004 TX_STATUS_POSTPONE(DELAY);
3005 TX_STATUS_POSTPONE(FEW_BYTES);
3006 TX_STATUS_POSTPONE(QUIET_PERIOD);
3007 TX_STATUS_POSTPONE(CALC_TTAK);
3008 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
3009 TX_STATUS_FAIL(SHORT_LIMIT);
3010 TX_STATUS_FAIL(LONG_LIMIT);
3011 TX_STATUS_FAIL(FIFO_UNDERRUN);
3012 TX_STATUS_FAIL(DRAIN_FLOW);
3013 TX_STATUS_FAIL(RFKILL_FLUSH);
3014 TX_STATUS_FAIL(LIFE_EXPIRE);
3015 TX_STATUS_FAIL(DEST_PS);
3016 TX_STATUS_FAIL(HOST_ABORTED);
3017 TX_STATUS_FAIL(BT_RETRY);
3018 TX_STATUS_FAIL(STA_INVALID);
3019 TX_STATUS_FAIL(FRAG_DROPPED);
3020 TX_STATUS_FAIL(TID_DISABLE);
3021 TX_STATUS_FAIL(FIFO_FLUSHED);
3022 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
3023 TX_STATUS_FAIL(PASSIVE_NO_RX);
3024 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
3025 }
3026
3027 return "UNKNOWN";
3028
3029#undef TX_STATUS_FAIL
3030#undef TX_STATUS_POSTPONE
3031}
3032#endif
3033
3034static struct il_link_quality_cmd *
3035il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
3036{
3037 int i, r;
3038 struct il_link_quality_cmd *link_cmd;
3039 u32 rate_flags = 0;
3040 __le32 rate_n_flags;
3041
3042 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
3043 if (!link_cmd) {
3044 IL_ERR("Unable to allocate memory for LQ cmd.\n");
3045 return NULL;
3046 }
3047
3048
3049 if (il->band == IEEE80211_BAND_5GHZ)
3050 r = RATE_6M_IDX;
3051 else
3052 r = RATE_1M_IDX;
3053
3054 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
3055 rate_flags |= RATE_MCS_CCK_MSK;
3056
3057 rate_flags |=
3058 il4965_first_antenna(il->hw_params.
3059 valid_tx_ant) << RATE_MCS_ANT_POS;
3060 rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
3061 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
3062 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
3063
3064 link_cmd->general_params.single_stream_ant_msk =
3065 il4965_first_antenna(il->hw_params.valid_tx_ant);
3066
3067 link_cmd->general_params.dual_stream_ant_msk =
3068 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
3069 valid_tx_ant);
3070 if (!link_cmd->general_params.dual_stream_ant_msk) {
3071 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
3072 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
3073 link_cmd->general_params.dual_stream_ant_msk =
3074 il->hw_params.valid_tx_ant;
3075 }
3076
3077 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3078 link_cmd->agg_params.agg_time_limit =
3079 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3080
3081 link_cmd->sta_id = sta_id;
3082
3083 return link_cmd;
3084}
3085
3086
3087
3088
3089
3090
3091int
3092il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
3093{
3094 int ret;
3095 u8 sta_id;
3096 struct il_link_quality_cmd *link_cmd;
3097 unsigned long flags;
3098
3099 if (sta_id_r)
3100 *sta_id_r = IL_INVALID_STATION;
3101
3102 ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
3103 if (ret) {
3104 IL_ERR("Unable to add station %pM\n", addr);
3105 return ret;
3106 }
3107
3108 if (sta_id_r)
3109 *sta_id_r = sta_id;
3110
3111 spin_lock_irqsave(&il->sta_lock, flags);
3112 il->stations[sta_id].used |= IL_STA_LOCAL;
3113 spin_unlock_irqrestore(&il->sta_lock, flags);
3114
3115
3116 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3117 if (!link_cmd) {
3118 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
3119 addr);
3120 return -ENOMEM;
3121 }
3122
3123 ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
3124 if (ret)
3125 IL_ERR("Link quality command failed (%d)\n", ret);
3126
3127 spin_lock_irqsave(&il->sta_lock, flags);
3128 il->stations[sta_id].lq = link_cmd;
3129 spin_unlock_irqrestore(&il->sta_lock, flags);
3130
3131 return 0;
3132}
3133
3134static int
3135il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
3136{
3137 int i;
3138 u8 buff[sizeof(struct il_wep_cmd) +
3139 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
3140 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
3141 size_t cmd_size = sizeof(struct il_wep_cmd);
3142 struct il_host_cmd cmd = {
3143 .id = C_WEPKEY,
3144 .data = wep_cmd,
3145 .flags = CMD_SYNC,
3146 };
3147 bool not_empty = false;
3148
3149 might_sleep();
3150
3151 memset(wep_cmd, 0,
3152 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
3153
3154 for (i = 0; i < WEP_KEYS_MAX; i++) {
3155 u8 key_size = il->_4965.wep_keys[i].key_size;
3156
3157 wep_cmd->key[i].key_idx = i;
3158 if (key_size) {
3159 wep_cmd->key[i].key_offset = i;
3160 not_empty = true;
3161 } else
3162 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
3163
3164 wep_cmd->key[i].key_size = key_size;
3165 memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
3166 }
3167
3168 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
3169 wep_cmd->num_keys = WEP_KEYS_MAX;
3170
3171 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
3172 cmd.len = cmd_size;
3173
3174 if (not_empty || send_if_empty)
3175 return il_send_cmd(il, &cmd);
3176 else
3177 return 0;
3178}
3179
3180int
3181il4965_restore_default_wep_keys(struct il_priv *il)
3182{
3183 lockdep_assert_held(&il->mutex);
3184
3185 return il4965_static_wepkey_cmd(il, false);
3186}
3187
3188int
3189il4965_remove_default_wep_key(struct il_priv *il,
3190 struct ieee80211_key_conf *keyconf)
3191{
3192 int ret;
3193 int idx = keyconf->keyidx;
3194
3195 lockdep_assert_held(&il->mutex);
3196
3197 D_WEP("Removing default WEP key: idx=%d\n", idx);
3198
3199 memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
3200 if (il_is_rfkill(il)) {
3201 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
3202
3203 return 0;
3204 }
3205 ret = il4965_static_wepkey_cmd(il, 1);
3206 D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
3207
3208 return ret;
3209}
3210
3211int
3212il4965_set_default_wep_key(struct il_priv *il,
3213 struct ieee80211_key_conf *keyconf)
3214{
3215 int ret;
3216 int len = keyconf->keylen;
3217 int idx = keyconf->keyidx;
3218
3219 lockdep_assert_held(&il->mutex);
3220
3221 if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
3222 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
3223 return -EINVAL;
3224 }
3225
3226 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3227 keyconf->hw_key_idx = HW_KEY_DEFAULT;
3228 il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
3229
3230 il->_4965.wep_keys[idx].key_size = len;
3231 memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
3232
3233 ret = il4965_static_wepkey_cmd(il, false);
3234
3235 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
3236 return ret;
3237}
3238
3239static int
3240il4965_set_wep_dynamic_key_info(struct il_priv *il,
3241 struct ieee80211_key_conf *keyconf, u8 sta_id)
3242{
3243 unsigned long flags;
3244 __le16 key_flags = 0;
3245 struct il_addsta_cmd sta_cmd;
3246
3247 lockdep_assert_held(&il->mutex);
3248
3249 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
3250
3251 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
3252 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3253 key_flags &= ~STA_KEY_FLG_INVALID;
3254
3255 if (keyconf->keylen == WEP_KEY_LEN_128)
3256 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
3257
3258 if (sta_id == il->hw_params.bcast_id)
3259 key_flags |= STA_KEY_MULTICAST_MSK;
3260
3261 spin_lock_irqsave(&il->sta_lock, flags);
3262
3263 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3264 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3265 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
3266
3267 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3268
3269 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
3270 keyconf->keylen);
3271
3272 if ((il->stations[sta_id].sta.key.
3273 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3274 il->stations[sta_id].sta.key.key_offset =
3275 il_get_free_ucode_key_idx(il);
3276
3277
3278
3279 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3280 "no space for a new key");
3281
3282 il->stations[sta_id].sta.key.key_flags = key_flags;
3283 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3284 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3285
3286 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3287 sizeof(struct il_addsta_cmd));
3288 spin_unlock_irqrestore(&il->sta_lock, flags);
3289
3290 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3291}
3292
3293static int
3294il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3295 struct ieee80211_key_conf *keyconf, u8 sta_id)
3296{
3297 unsigned long flags;
3298 __le16 key_flags = 0;
3299 struct il_addsta_cmd sta_cmd;
3300
3301 lockdep_assert_held(&il->mutex);
3302
3303 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3304 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3305 key_flags &= ~STA_KEY_FLG_INVALID;
3306
3307 if (sta_id == il->hw_params.bcast_id)
3308 key_flags |= STA_KEY_MULTICAST_MSK;
3309
3310 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3311
3312 spin_lock_irqsave(&il->sta_lock, flags);
3313 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3314 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3315
3316 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3317
3318 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3319
3320 if ((il->stations[sta_id].sta.key.
3321 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3322 il->stations[sta_id].sta.key.key_offset =
3323 il_get_free_ucode_key_idx(il);
3324
3325
3326
3327 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3328 "no space for a new key");
3329
3330 il->stations[sta_id].sta.key.key_flags = key_flags;
3331 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3332 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3333
3334 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3335 sizeof(struct il_addsta_cmd));
3336 spin_unlock_irqrestore(&il->sta_lock, flags);
3337
3338 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3339}
3340
3341static int
3342il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3343 struct ieee80211_key_conf *keyconf, u8 sta_id)
3344{
3345 unsigned long flags;
3346 int ret = 0;
3347 __le16 key_flags = 0;
3348
3349 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3350 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3351 key_flags &= ~STA_KEY_FLG_INVALID;
3352
3353 if (sta_id == il->hw_params.bcast_id)
3354 key_flags |= STA_KEY_MULTICAST_MSK;
3355
3356 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3357 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3358
3359 spin_lock_irqsave(&il->sta_lock, flags);
3360
3361 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3362 il->stations[sta_id].keyinfo.keylen = 16;
3363
3364 if ((il->stations[sta_id].sta.key.
3365 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3366 il->stations[sta_id].sta.key.key_offset =
3367 il_get_free_ucode_key_idx(il);
3368
3369
3370
3371 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3372 "no space for a new key");
3373
3374 il->stations[sta_id].sta.key.key_flags = key_flags;
3375
3376
3377 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3378
3379 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3380
3381 spin_unlock_irqrestore(&il->sta_lock, flags);
3382
3383 return ret;
3384}
3385
3386void
3387il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3388 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3389{
3390 u8 sta_id;
3391 unsigned long flags;
3392 int i;
3393
3394 if (il_scan_cancel(il)) {
3395
3396
3397 return;
3398 }
3399
3400 sta_id = il_sta_id_or_broadcast(il, sta);
3401 if (sta_id == IL_INVALID_STATION)
3402 return;
3403
3404 spin_lock_irqsave(&il->sta_lock, flags);
3405
3406 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3407
3408 for (i = 0; i < 5; i++)
3409 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3410 cpu_to_le16(phase1key[i]);
3411
3412 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3413 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3414
3415 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3416
3417 spin_unlock_irqrestore(&il->sta_lock, flags);
3418}
3419
3420int
3421il4965_remove_dynamic_key(struct il_priv *il,
3422 struct ieee80211_key_conf *keyconf, u8 sta_id)
3423{
3424 unsigned long flags;
3425 u16 key_flags;
3426 u8 keyidx;
3427 struct il_addsta_cmd sta_cmd;
3428
3429 lockdep_assert_held(&il->mutex);
3430
3431 il->_4965.key_mapping_keys--;
3432
3433 spin_lock_irqsave(&il->sta_lock, flags);
3434 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3435 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3436
3437 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3438
3439 if (keyconf->keyidx != keyidx) {
3440
3441
3442
3443
3444
3445 spin_unlock_irqrestore(&il->sta_lock, flags);
3446 return 0;
3447 }
3448
3449 if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
3450 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3451 key_flags);
3452 spin_unlock_irqrestore(&il->sta_lock, flags);
3453 return 0;
3454 }
3455
3456 if (!test_and_clear_bit
3457 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3458 IL_ERR("idx %d not used in uCode key table.\n",
3459 il->stations[sta_id].sta.key.key_offset);
3460 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3461 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3462 il->stations[sta_id].sta.key.key_flags =
3463 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3464 il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
3465 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3466 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3467
3468 if (il_is_rfkill(il)) {
3469 D_WEP
3470 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3471 spin_unlock_irqrestore(&il->sta_lock, flags);
3472 return 0;
3473 }
3474 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3475 sizeof(struct il_addsta_cmd));
3476 spin_unlock_irqrestore(&il->sta_lock, flags);
3477
3478 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3479}
3480
3481int
3482il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
3483 u8 sta_id)
3484{
3485 int ret;
3486
3487 lockdep_assert_held(&il->mutex);
3488
3489 il->_4965.key_mapping_keys++;
3490 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3491
3492 switch (keyconf->cipher) {
3493 case WLAN_CIPHER_SUITE_CCMP:
3494 ret =
3495 il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
3496 break;
3497 case WLAN_CIPHER_SUITE_TKIP:
3498 ret =
3499 il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
3500 break;
3501 case WLAN_CIPHER_SUITE_WEP40:
3502 case WLAN_CIPHER_SUITE_WEP104:
3503 ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
3504 break;
3505 default:
3506 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3507 keyconf->cipher);
3508 ret = -EINVAL;
3509 }
3510
3511 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3512 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3513
3514 return ret;
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524int
3525il4965_alloc_bcast_station(struct il_priv *il)
3526{
3527 struct il_link_quality_cmd *link_cmd;
3528 unsigned long flags;
3529 u8 sta_id;
3530
3531 spin_lock_irqsave(&il->sta_lock, flags);
3532 sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
3533 if (sta_id == IL_INVALID_STATION) {
3534 IL_ERR("Unable to prepare broadcast station\n");
3535 spin_unlock_irqrestore(&il->sta_lock, flags);
3536
3537 return -EINVAL;
3538 }
3539
3540 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3541 il->stations[sta_id].used |= IL_STA_BCAST;
3542 spin_unlock_irqrestore(&il->sta_lock, flags);
3543
3544 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3545 if (!link_cmd) {
3546 IL_ERR
3547 ("Unable to initialize rate scaling for bcast station.\n");
3548 return -ENOMEM;
3549 }
3550
3551 spin_lock_irqsave(&il->sta_lock, flags);
3552 il->stations[sta_id].lq = link_cmd;
3553 spin_unlock_irqrestore(&il->sta_lock, flags);
3554
3555 return 0;
3556}
3557
3558
3559
3560
3561
3562
3563
3564static int
3565il4965_update_bcast_station(struct il_priv *il)
3566{
3567 unsigned long flags;
3568 struct il_link_quality_cmd *link_cmd;
3569 u8 sta_id = il->hw_params.bcast_id;
3570
3571 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3572 if (!link_cmd) {
3573 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3574 return -ENOMEM;
3575 }
3576
3577 spin_lock_irqsave(&il->sta_lock, flags);
3578 if (il->stations[sta_id].lq)
3579 kfree(il->stations[sta_id].lq);
3580 else
3581 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3582 il->stations[sta_id].lq = link_cmd;
3583 spin_unlock_irqrestore(&il->sta_lock, flags);
3584
3585 return 0;
3586}
3587
3588int
3589il4965_update_bcast_stations(struct il_priv *il)
3590{
3591 return il4965_update_bcast_station(il);
3592}
3593
3594
3595
3596
3597int
3598il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3599{
3600 unsigned long flags;
3601 struct il_addsta_cmd sta_cmd;
3602
3603 lockdep_assert_held(&il->mutex);
3604
3605
3606 spin_lock_irqsave(&il->sta_lock, flags);
3607 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3608 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3609 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3610 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3611 sizeof(struct il_addsta_cmd));
3612 spin_unlock_irqrestore(&il->sta_lock, flags);
3613
3614 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3615}
3616
3617int
3618il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3619 u16 ssn)
3620{
3621 unsigned long flags;
3622 int sta_id;
3623 struct il_addsta_cmd sta_cmd;
3624
3625 lockdep_assert_held(&il->mutex);
3626
3627 sta_id = il_sta_id(sta);
3628 if (sta_id == IL_INVALID_STATION)
3629 return -ENXIO;
3630
3631 spin_lock_irqsave(&il->sta_lock, flags);
3632 il->stations[sta_id].sta.station_flags_msk = 0;
3633 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3634 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3635 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3636 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3637 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3638 sizeof(struct il_addsta_cmd));
3639 spin_unlock_irqrestore(&il->sta_lock, flags);
3640
3641 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3642}
3643
3644int
3645il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3646{
3647 unsigned long flags;
3648 int sta_id;
3649 struct il_addsta_cmd sta_cmd;
3650
3651 lockdep_assert_held(&il->mutex);
3652
3653 sta_id = il_sta_id(sta);
3654 if (sta_id == IL_INVALID_STATION) {
3655 IL_ERR("Invalid station for AGG tid %d\n", tid);
3656 return -ENXIO;
3657 }
3658
3659 spin_lock_irqsave(&il->sta_lock, flags);
3660 il->stations[sta_id].sta.station_flags_msk = 0;
3661 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3662 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3663 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3664 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3665 sizeof(struct il_addsta_cmd));
3666 spin_unlock_irqrestore(&il->sta_lock, flags);
3667
3668 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3669}
3670
3671void
3672il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3673{
3674 unsigned long flags;
3675
3676 spin_lock_irqsave(&il->sta_lock, flags);
3677 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3678 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3679 il->stations[sta_id].sta.sta.modify_mask =
3680 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3681 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3682 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3683 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3684 spin_unlock_irqrestore(&il->sta_lock, flags);
3685
3686}
3687
3688void
3689il4965_update_chain_flags(struct il_priv *il)
3690{
3691 if (il->ops->set_rxon_chain) {
3692 il->ops->set_rxon_chain(il);
3693 if (il->active.rx_chain != il->staging.rx_chain)
3694 il_commit_rxon(il);
3695 }
3696}
3697
3698static void
3699il4965_clear_free_frames(struct il_priv *il)
3700{
3701 struct list_head *element;
3702
3703 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3704
3705 while (!list_empty(&il->free_frames)) {
3706 element = il->free_frames.next;
3707 list_del(element);
3708 kfree(list_entry(element, struct il_frame, list));
3709 il->frames_count--;
3710 }
3711
3712 if (il->frames_count) {
3713 IL_WARN("%d frames still in use. Did we lose one?\n",
3714 il->frames_count);
3715 il->frames_count = 0;
3716 }
3717}
3718
3719static struct il_frame *
3720il4965_get_free_frame(struct il_priv *il)
3721{
3722 struct il_frame *frame;
3723 struct list_head *element;
3724 if (list_empty(&il->free_frames)) {
3725 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3726 if (!frame) {
3727 IL_ERR("Could not allocate frame!\n");
3728 return NULL;
3729 }
3730
3731 il->frames_count++;
3732 return frame;
3733 }
3734
3735 element = il->free_frames.next;
3736 list_del(element);
3737 return list_entry(element, struct il_frame, list);
3738}
3739
3740static void
3741il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3742{
3743 memset(frame, 0, sizeof(*frame));
3744 list_add(&frame->list, &il->free_frames);
3745}
3746
3747static u32
3748il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3749 int left)
3750{
3751 lockdep_assert_held(&il->mutex);
3752
3753 if (!il->beacon_skb)
3754 return 0;
3755
3756 if (il->beacon_skb->len > left)
3757 return 0;
3758
3759 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3760
3761 return il->beacon_skb->len;
3762}
3763
3764
3765static void
3766il4965_set_beacon_tim(struct il_priv *il,
3767 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3768 u32 frame_size)
3769{
3770 u16 tim_idx;
3771 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3772
3773
3774
3775
3776
3777 tim_idx = mgmt->u.beacon.variable - beacon;
3778
3779
3780 while ((tim_idx < (frame_size - 2)) &&
3781 (beacon[tim_idx] != WLAN_EID_TIM))
3782 tim_idx += beacon[tim_idx + 1] + 2;
3783
3784
3785 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3786 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3787 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3788 } else
3789 IL_WARN("Unable to find TIM Element in beacon\n");
3790}
3791
3792static unsigned int
3793il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3794{
3795 struct il_tx_beacon_cmd *tx_beacon_cmd;
3796 u32 frame_size;
3797 u32 rate_flags;
3798 u32 rate;
3799
3800
3801
3802
3803
3804 lockdep_assert_held(&il->mutex);
3805
3806 if (!il->beacon_enabled) {
3807 IL_ERR("Trying to build beacon without beaconing enabled\n");
3808 return 0;
3809 }
3810
3811
3812 tx_beacon_cmd = &frame->u.beacon;
3813 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3814
3815
3816 frame_size =
3817 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3818 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3819 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3820 return 0;
3821 if (!frame_size)
3822 return 0;
3823
3824
3825 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3826 tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
3827 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3828 tx_beacon_cmd->tx.tx_flags =
3829 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3830 TX_CMD_FLG_STA_RATE_MSK;
3831
3832
3833 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3834 frame_size);
3835
3836
3837 rate = il_get_lowest_plcp(il);
3838 il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
3839 rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
3840 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3841 rate_flags |= RATE_MCS_CCK_MSK;
3842 tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
3843
3844 return sizeof(*tx_beacon_cmd) + frame_size;
3845}
3846
3847int
3848il4965_send_beacon_cmd(struct il_priv *il)
3849{
3850 struct il_frame *frame;
3851 unsigned int frame_size;
3852 int rc;
3853
3854 frame = il4965_get_free_frame(il);
3855 if (!frame) {
3856 IL_ERR("Could not obtain free frame buffer for beacon "
3857 "command.\n");
3858 return -ENOMEM;
3859 }
3860
3861 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3862 if (!frame_size) {
3863 IL_ERR("Error configuring the beacon command\n");
3864 il4965_free_frame(il, frame);
3865 return -EINVAL;
3866 }
3867
3868 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3869
3870 il4965_free_frame(il, frame);
3871
3872 return rc;
3873}
3874
3875static inline dma_addr_t
3876il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3877{
3878 struct il_tfd_tb *tb = &tfd->tbs[idx];
3879
3880 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3881 if (sizeof(dma_addr_t) > sizeof(u32))
3882 addr |=
3883 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3884 16;
3885
3886 return addr;
3887}
3888
3889static inline u16
3890il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3891{
3892 struct il_tfd_tb *tb = &tfd->tbs[idx];
3893
3894 return le16_to_cpu(tb->hi_n_len) >> 4;
3895}
3896
3897static inline void
3898il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3899{
3900 struct il_tfd_tb *tb = &tfd->tbs[idx];
3901 u16 hi_n_len = len << 4;
3902
3903 put_unaligned_le32(addr, &tb->lo);
3904 if (sizeof(dma_addr_t) > sizeof(u32))
3905 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3906
3907 tb->hi_n_len = cpu_to_le16(hi_n_len);
3908
3909 tfd->num_tbs = idx + 1;
3910}
3911
3912static inline u8
3913il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3914{
3915 return tfd->num_tbs & 0x1f;
3916}
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926void
3927il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3928{
3929 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3930 struct il_tfd *tfd;
3931 struct pci_dev *dev = il->pci_dev;
3932 int idx = txq->q.read_ptr;
3933 int i;
3934 int num_tbs;
3935
3936 tfd = &tfd_tmp[idx];
3937
3938
3939 num_tbs = il4965_tfd_get_num_tbs(tfd);
3940
3941 if (num_tbs >= IL_NUM_OF_TBS) {
3942 IL_ERR("Too many chunks: %i\n", num_tbs);
3943
3944 return;
3945 }
3946
3947
3948 if (num_tbs)
3949 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3950 dma_unmap_len(&txq->meta[idx], len),
3951 PCI_DMA_BIDIRECTIONAL);
3952
3953
3954 for (i = 1; i < num_tbs; i++)
3955 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3956 il4965_tfd_tb_get_len(tfd, i),
3957 PCI_DMA_TODEVICE);
3958
3959
3960 if (txq->skbs) {
3961 struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
3962
3963
3964 if (skb) {
3965 dev_kfree_skb_any(skb);
3966 txq->skbs[txq->q.read_ptr] = NULL;
3967 }
3968 }
3969}
3970
3971int
3972il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3973 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3974{
3975 struct il_queue *q;
3976 struct il_tfd *tfd, *tfd_tmp;
3977 u32 num_tbs;
3978
3979 q = &txq->q;
3980 tfd_tmp = (struct il_tfd *)txq->tfds;
3981 tfd = &tfd_tmp[q->write_ptr];
3982
3983 if (reset)
3984 memset(tfd, 0, sizeof(*tfd));
3985
3986 num_tbs = il4965_tfd_get_num_tbs(tfd);
3987
3988
3989 if (num_tbs >= IL_NUM_OF_TBS) {
3990 IL_ERR("Error can not send more than %d chunks\n",
3991 IL_NUM_OF_TBS);
3992 return -EINVAL;
3993 }
3994
3995 BUG_ON(addr & ~DMA_BIT_MASK(36));
3996 if (unlikely(addr & ~IL_TX_DMA_MASK))
3997 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3998
3999 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
4000
4001 return 0;
4002}
4003
4004
4005
4006
4007
4008
4009
4010
4011int
4012il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
4013{
4014 int txq_id = txq->q.id;
4015
4016
4017 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
4018
4019 return 0;
4020}
4021
4022
4023
4024
4025
4026
4027static void
4028il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
4029{
4030 struct il_rx_pkt *pkt = rxb_addr(rxb);
4031 struct il_alive_resp *palive;
4032 struct delayed_work *pwork;
4033
4034 palive = &pkt->u.alive_frame;
4035
4036 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
4037 palive->is_valid, palive->ver_type, palive->ver_subtype);
4038
4039 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
4040 D_INFO("Initialization Alive received.\n");
4041 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
4042 sizeof(struct il_init_alive_resp));
4043 pwork = &il->init_alive_start;
4044 } else {
4045 D_INFO("Runtime Alive received.\n");
4046 memcpy(&il->card_alive, &pkt->u.alive_frame,
4047 sizeof(struct il_alive_resp));
4048 pwork = &il->alive_start;
4049 }
4050
4051
4052
4053 if (palive->is_valid == UCODE_VALID_OK)
4054 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
4055 else
4056 IL_WARN("uCode did not respond OK.\n");
4057}
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069static void
4070il4965_bg_stats_periodic(unsigned long data)
4071{
4072 struct il_priv *il = (struct il_priv *)data;
4073
4074 if (test_bit(S_EXIT_PENDING, &il->status))
4075 return;
4076
4077
4078 if (!il_is_ready_rf(il))
4079 return;
4080
4081 il_send_stats_request(il, CMD_ASYNC, false);
4082}
4083
4084static void
4085il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
4086{
4087 struct il_rx_pkt *pkt = rxb_addr(rxb);
4088 struct il4965_beacon_notif *beacon =
4089 (struct il4965_beacon_notif *)pkt->u.raw;
4090#ifdef CONFIG_IWLEGACY_DEBUG
4091 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
4092
4093 D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
4094 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
4095 beacon->beacon_notify_hdr.failure_frame,
4096 le32_to_cpu(beacon->ibss_mgr_status),
4097 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
4098#endif
4099 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
4100}
4101
4102static void
4103il4965_perform_ct_kill_task(struct il_priv *il)
4104{
4105 unsigned long flags;
4106
4107 D_POWER("Stop all queues\n");
4108
4109 if (il->mac80211_registered)
4110 ieee80211_stop_queues(il->hw);
4111
4112 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4113 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4114 _il_rd(il, CSR_UCODE_DRV_GP1);
4115
4116 spin_lock_irqsave(&il->reg_lock, flags);
4117 if (likely(_il_grab_nic_access(il)))
4118 _il_release_nic_access(il);
4119 spin_unlock_irqrestore(&il->reg_lock, flags);
4120}
4121
4122
4123
4124static void
4125il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
4126{
4127 struct il_rx_pkt *pkt = rxb_addr(rxb);
4128 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4129 unsigned long status = il->status;
4130
4131 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
4132 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4133 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
4134 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
4135
4136 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
4137
4138 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
4139 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4140
4141 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4142
4143 if (!(flags & RXON_CARD_DISABLED)) {
4144 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4145 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4146 il_wr(il, HBUS_TARG_MBX_C,
4147 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4148 }
4149 }
4150
4151 if (flags & CT_CARD_DISABLED)
4152 il4965_perform_ct_kill_task(il);
4153
4154 if (flags & HW_CARD_DISABLED)
4155 set_bit(S_RFKILL, &il->status);
4156 else
4157 clear_bit(S_RFKILL, &il->status);
4158
4159 if (!(flags & RXON_CARD_DISABLED))
4160 il_scan_cancel(il);
4161
4162 if ((test_bit(S_RFKILL, &status) !=
4163 test_bit(S_RFKILL, &il->status)))
4164 wiphy_rfkill_set_hw_state(il->hw->wiphy,
4165 test_bit(S_RFKILL, &il->status));
4166 else
4167 wake_up(&il->wait_command_queue);
4168}
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179static void
4180il4965_setup_handlers(struct il_priv *il)
4181{
4182 il->handlers[N_ALIVE] = il4965_hdl_alive;
4183 il->handlers[N_ERROR] = il_hdl_error;
4184 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
4185 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
4186 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
4187 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
4188 il->handlers[N_BEACON] = il4965_hdl_beacon;
4189
4190
4191
4192
4193
4194
4195 il->handlers[C_STATS] = il4965_hdl_c_stats;
4196 il->handlers[N_STATS] = il4965_hdl_stats;
4197
4198 il_setup_rx_scan_handlers(il);
4199
4200
4201 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
4202
4203 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
4204
4205 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
4206 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
4207 il->handlers[N_RX] = il4965_hdl_rx;
4208
4209 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
4210
4211 il->handlers[C_TX] = il4965_hdl_tx;
4212}
4213
4214
4215
4216
4217
4218
4219
4220
4221void
4222il4965_rx_handle(struct il_priv *il)
4223{
4224 struct il_rx_buf *rxb;
4225 struct il_rx_pkt *pkt;
4226 struct il_rx_queue *rxq = &il->rxq;
4227 u32 r, i;
4228 int reclaim;
4229 unsigned long flags;
4230 u8 fill_rx = 0;
4231 u32 count = 8;
4232 int total_empty;
4233
4234
4235
4236 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
4237 i = rxq->read;
4238
4239
4240 if (i == r)
4241 D_RX("r = %d, i = %d\n", r, i);
4242
4243
4244 total_empty = r - rxq->write_actual;
4245 if (total_empty < 0)
4246 total_empty += RX_QUEUE_SIZE;
4247
4248 if (total_empty > (RX_QUEUE_SIZE / 2))
4249 fill_rx = 1;
4250
4251 while (i != r) {
4252 int len;
4253
4254 rxb = rxq->queue[i];
4255
4256
4257
4258
4259 BUG_ON(rxb == NULL);
4260
4261 rxq->queue[i] = NULL;
4262
4263 pci_unmap_page(il->pci_dev, rxb->page_dma,
4264 PAGE_SIZE << il->hw_params.rx_page_order,
4265 PCI_DMA_FROMDEVICE);
4266 pkt = rxb_addr(rxb);
4267
4268 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4269 len += sizeof(u32);
4270
4271
4272
4273
4274
4275
4276
4277 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4278 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
4279 (pkt->hdr.cmd != N_RX_MPDU) &&
4280 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
4281 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
4282
4283
4284
4285
4286 if (il->handlers[pkt->hdr.cmd]) {
4287 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
4288 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4289 il->isr_stats.handlers[pkt->hdr.cmd]++;
4290 il->handlers[pkt->hdr.cmd] (il, rxb);
4291 } else {
4292
4293 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4294 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4295 }
4296
4297
4298
4299
4300
4301
4302
4303
4304 if (reclaim) {
4305
4306
4307
4308 if (rxb->page)
4309 il_tx_cmd_complete(il, rxb);
4310 else
4311 IL_WARN("Claim null rxb?\n");
4312 }
4313
4314
4315
4316
4317 spin_lock_irqsave(&rxq->lock, flags);
4318 if (rxb->page != NULL) {
4319 rxb->page_dma =
4320 pci_map_page(il->pci_dev, rxb->page, 0,
4321 PAGE_SIZE << il->hw_params.
4322 rx_page_order, PCI_DMA_FROMDEVICE);
4323
4324 if (unlikely(pci_dma_mapping_error(il->pci_dev,
4325 rxb->page_dma))) {
4326 __il_free_pages(il, rxb->page);
4327 rxb->page = NULL;
4328 list_add_tail(&rxb->list, &rxq->rx_used);
4329 } else {
4330 list_add_tail(&rxb->list, &rxq->rx_free);
4331 rxq->free_count++;
4332 }
4333 } else
4334 list_add_tail(&rxb->list, &rxq->rx_used);
4335
4336 spin_unlock_irqrestore(&rxq->lock, flags);
4337
4338 i = (i + 1) & RX_QUEUE_MASK;
4339
4340
4341 if (fill_rx) {
4342 count++;
4343 if (count >= 8) {
4344 rxq->read = i;
4345 il4965_rx_replenish_now(il);
4346 count = 0;
4347 }
4348 }
4349 }
4350
4351
4352 rxq->read = i;
4353 if (fill_rx)
4354 il4965_rx_replenish_now(il);
4355 else
4356 il4965_rx_queue_restock(il);
4357}
4358
4359
4360static inline void
4361il4965_synchronize_irq(struct il_priv *il)
4362{
4363
4364 synchronize_irq(il->pci_dev->irq);
4365 tasklet_kill(&il->irq_tasklet);
4366}
4367
4368static void
4369il4965_irq_tasklet(struct il_priv *il)
4370{
4371 u32 inta, handled = 0;
4372 u32 inta_fh;
4373 unsigned long flags;
4374 u32 i;
4375#ifdef CONFIG_IWLEGACY_DEBUG
4376 u32 inta_mask;
4377#endif
4378
4379 spin_lock_irqsave(&il->lock, flags);
4380
4381
4382
4383
4384 inta = _il_rd(il, CSR_INT);
4385 _il_wr(il, CSR_INT, inta);
4386
4387
4388
4389
4390 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4391 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4392
4393#ifdef CONFIG_IWLEGACY_DEBUG
4394 if (il_get_debug_level(il) & IL_DL_ISR) {
4395
4396 inta_mask = _il_rd(il, CSR_INT_MASK);
4397 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4398 inta_mask, inta_fh);
4399 }
4400#endif
4401
4402 spin_unlock_irqrestore(&il->lock, flags);
4403
4404
4405
4406
4407
4408 if (inta_fh & CSR49_FH_INT_RX_MASK)
4409 inta |= CSR_INT_BIT_FH_RX;
4410 if (inta_fh & CSR49_FH_INT_TX_MASK)
4411 inta |= CSR_INT_BIT_FH_TX;
4412
4413
4414 if (inta & CSR_INT_BIT_HW_ERR) {
4415 IL_ERR("Hardware error detected. Restarting.\n");
4416
4417
4418 il_disable_interrupts(il);
4419
4420 il->isr_stats.hw++;
4421 il_irq_handle_error(il);
4422
4423 handled |= CSR_INT_BIT_HW_ERR;
4424
4425 return;
4426 }
4427#ifdef CONFIG_IWLEGACY_DEBUG
4428 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4429
4430 if (inta & CSR_INT_BIT_SCD) {
4431 D_ISR("Scheduler finished to transmit "
4432 "the frame/frames.\n");
4433 il->isr_stats.sch++;
4434 }
4435
4436
4437 if (inta & CSR_INT_BIT_ALIVE) {
4438 D_ISR("Alive interrupt\n");
4439 il->isr_stats.alive++;
4440 }
4441 }
4442#endif
4443
4444 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4445
4446
4447 if (inta & CSR_INT_BIT_RF_KILL) {
4448 int hw_rf_kill = 0;
4449
4450 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4451 hw_rf_kill = 1;
4452
4453 IL_WARN("RF_KILL bit toggled to %s.\n",
4454 hw_rf_kill ? "disable radio" : "enable radio");
4455
4456 il->isr_stats.rfkill++;
4457
4458
4459
4460
4461
4462
4463 if (hw_rf_kill) {
4464 set_bit(S_RFKILL, &il->status);
4465 } else {
4466 clear_bit(S_RFKILL, &il->status);
4467 il_force_reset(il, true);
4468 }
4469 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4470
4471 handled |= CSR_INT_BIT_RF_KILL;
4472 }
4473
4474
4475 if (inta & CSR_INT_BIT_CT_KILL) {
4476 IL_ERR("Microcode CT kill error detected.\n");
4477 il->isr_stats.ctkill++;
4478 handled |= CSR_INT_BIT_CT_KILL;
4479 }
4480
4481
4482 if (inta & CSR_INT_BIT_SW_ERR) {
4483 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4484 inta);
4485 il->isr_stats.sw++;
4486 il_irq_handle_error(il);
4487 handled |= CSR_INT_BIT_SW_ERR;
4488 }
4489
4490
4491
4492
4493
4494
4495 if (inta & CSR_INT_BIT_WAKEUP) {
4496 D_ISR("Wakeup interrupt\n");
4497 il_rx_queue_update_write_ptr(il, &il->rxq);
4498 for (i = 0; i < il->hw_params.max_txq_num; i++)
4499 il_txq_update_write_ptr(il, &il->txq[i]);
4500 il->isr_stats.wakeup++;
4501 handled |= CSR_INT_BIT_WAKEUP;
4502 }
4503
4504
4505
4506
4507 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4508 il4965_rx_handle(il);
4509 il->isr_stats.rx++;
4510 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4511 }
4512
4513
4514 if (inta & CSR_INT_BIT_FH_TX) {
4515 D_ISR("uCode load interrupt\n");
4516 il->isr_stats.tx++;
4517 handled |= CSR_INT_BIT_FH_TX;
4518
4519 il->ucode_write_complete = 1;
4520 wake_up(&il->wait_command_queue);
4521 }
4522
4523 if (inta & ~handled) {
4524 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4525 il->isr_stats.unhandled++;
4526 }
4527
4528 if (inta & ~(il->inta_mask)) {
4529 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4530 inta & ~il->inta_mask);
4531 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4532 }
4533
4534
4535
4536 if (test_bit(S_INT_ENABLED, &il->status))
4537 il_enable_interrupts(il);
4538
4539 else if (handled & CSR_INT_BIT_RF_KILL)
4540 il_enable_rfkill_int(il);
4541
4542#ifdef CONFIG_IWLEGACY_DEBUG
4543 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4544 inta = _il_rd(il, CSR_INT);
4545 inta_mask = _il_rd(il, CSR_INT_MASK);
4546 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4547 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4548 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4549 }
4550#endif
4551}
4552
4553
4554
4555
4556
4557
4558
4559#ifdef CONFIG_IWLEGACY_DEBUG
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static ssize_t
4573il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4574 char *buf)
4575{
4576 struct il_priv *il = dev_get_drvdata(d);
4577 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4578}
4579
4580static ssize_t
4581il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4582 const char *buf, size_t count)
4583{
4584 struct il_priv *il = dev_get_drvdata(d);
4585 unsigned long val;
4586 int ret;
4587
4588 ret = kstrtoul(buf, 0, &val);
4589 if (ret)
4590 IL_ERR("%s is not in hex or decimal form.\n", buf);
4591 else
4592 il->debug_level = val;
4593
4594 return strnlen(buf, count);
4595}
4596
4597static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4598 il4965_store_debug_level);
4599
4600#endif
4601
4602static ssize_t
4603il4965_show_temperature(struct device *d, struct device_attribute *attr,
4604 char *buf)
4605{
4606 struct il_priv *il = dev_get_drvdata(d);
4607
4608 if (!il_is_alive(il))
4609 return -EAGAIN;
4610
4611 return sprintf(buf, "%d\n", il->temperature);
4612}
4613
4614static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4615
4616static ssize_t
4617il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4618{
4619 struct il_priv *il = dev_get_drvdata(d);
4620
4621 if (!il_is_ready_rf(il))
4622 return sprintf(buf, "off\n");
4623 else
4624 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4625}
4626
4627static ssize_t
4628il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4629 const char *buf, size_t count)
4630{
4631 struct il_priv *il = dev_get_drvdata(d);
4632 unsigned long val;
4633 int ret;
4634
4635 ret = kstrtoul(buf, 10, &val);
4636 if (ret)
4637 IL_INFO("%s is not in decimal form.\n", buf);
4638 else {
4639 ret = il_set_tx_power(il, val, false);
4640 if (ret)
4641 IL_ERR("failed setting tx power (0x%d).\n", ret);
4642 else
4643 ret = count;
4644 }
4645 return ret;
4646}
4647
4648static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4649 il4965_store_tx_power);
4650
4651static struct attribute *il_sysfs_entries[] = {
4652 &dev_attr_temperature.attr,
4653 &dev_attr_tx_power.attr,
4654#ifdef CONFIG_IWLEGACY_DEBUG
4655 &dev_attr_debug_level.attr,
4656#endif
4657 NULL
4658};
4659
4660static struct attribute_group il_attribute_group = {
4661 .name = NULL,
4662 .attrs = il_sysfs_entries,
4663};
4664
4665
4666
4667
4668
4669
4670
4671static void
4672il4965_dealloc_ucode_pci(struct il_priv *il)
4673{
4674 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4675 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4676 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4677 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4678 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4679 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4680}
4681
4682static void
4683il4965_nic_start(struct il_priv *il)
4684{
4685
4686 _il_wr(il, CSR_RESET, 0);
4687}
4688
4689static void il4965_ucode_callback(const struct firmware *ucode_raw,
4690 void *context);
4691static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4692
4693static int __must_check
4694il4965_request_firmware(struct il_priv *il, bool first)
4695{
4696 const char *name_pre = il->cfg->fw_name_pre;
4697 char tag[8];
4698
4699 if (first) {
4700 il->fw_idx = il->cfg->ucode_api_max;
4701 sprintf(tag, "%d", il->fw_idx);
4702 } else {
4703 il->fw_idx--;
4704 sprintf(tag, "%d", il->fw_idx);
4705 }
4706
4707 if (il->fw_idx < il->cfg->ucode_api_min) {
4708 IL_ERR("no suitable firmware found!\n");
4709 return -ENOENT;
4710 }
4711
4712 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4713
4714 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4715
4716 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4717 &il->pci_dev->dev, GFP_KERNEL, il,
4718 il4965_ucode_callback);
4719}
4720
4721struct il4965_firmware_pieces {
4722 const void *inst, *data, *init, *init_data, *boot;
4723 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4724};
4725
4726static int
4727il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4728 struct il4965_firmware_pieces *pieces)
4729{
4730 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4731 u32 api_ver, hdr_size;
4732 const u8 *src;
4733
4734 il->ucode_ver = le32_to_cpu(ucode->ver);
4735 api_ver = IL_UCODE_API(il->ucode_ver);
4736
4737 switch (api_ver) {
4738 default:
4739 case 0:
4740 case 1:
4741 case 2:
4742 hdr_size = 24;
4743 if (ucode_raw->size < hdr_size) {
4744 IL_ERR("File size too small!\n");
4745 return -EINVAL;
4746 }
4747 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4748 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4749 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4750 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4751 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4752 src = ucode->v1.data;
4753 break;
4754 }
4755
4756
4757 if (ucode_raw->size !=
4758 hdr_size + pieces->inst_size + pieces->data_size +
4759 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4760
4761 IL_ERR("uCode file size %d does not match expected size\n",
4762 (int)ucode_raw->size);
4763 return -EINVAL;
4764 }
4765
4766 pieces->inst = src;
4767 src += pieces->inst_size;
4768 pieces->data = src;
4769 src += pieces->data_size;
4770 pieces->init = src;
4771 src += pieces->init_size;
4772 pieces->init_data = src;
4773 src += pieces->init_data_size;
4774 pieces->boot = src;
4775 src += pieces->boot_size;
4776
4777 return 0;
4778}
4779
4780
4781
4782
4783
4784
4785
4786static void
4787il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4788{
4789 struct il_priv *il = context;
4790 struct il_ucode_header *ucode;
4791 int err;
4792 struct il4965_firmware_pieces pieces;
4793 const unsigned int api_max = il->cfg->ucode_api_max;
4794 const unsigned int api_min = il->cfg->ucode_api_min;
4795 u32 api_ver;
4796
4797 u32 max_probe_length = 200;
4798 u32 standard_phy_calibration_size =
4799 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4800
4801 memset(&pieces, 0, sizeof(pieces));
4802
4803 if (!ucode_raw) {
4804 if (il->fw_idx <= il->cfg->ucode_api_max)
4805 IL_ERR("request for firmware file '%s' failed.\n",
4806 il->firmware_name);
4807 goto try_again;
4808 }
4809
4810 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4811 ucode_raw->size);
4812
4813
4814 if (ucode_raw->size < 4) {
4815 IL_ERR("File size way too small!\n");
4816 goto try_again;
4817 }
4818
4819
4820 ucode = (struct il_ucode_header *)ucode_raw->data;
4821
4822 err = il4965_load_firmware(il, ucode_raw, &pieces);
4823
4824 if (err)
4825 goto try_again;
4826
4827 api_ver = IL_UCODE_API(il->ucode_ver);
4828
4829
4830
4831
4832
4833
4834 if (api_ver < api_min || api_ver > api_max) {
4835 IL_ERR("Driver unable to support your firmware API. "
4836 "Driver supports v%u, firmware is v%u.\n", api_max,
4837 api_ver);
4838 goto try_again;
4839 }
4840
4841 if (api_ver != api_max)
4842 IL_ERR("Firmware has old API version. Expected v%u, "
4843 "got v%u. New firmware can be obtained "
4844 "from http://www.intellinuxwireless.org.\n", api_max,
4845 api_ver);
4846
4847 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4848 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4849 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4850
4851 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4852 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4853 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4854 IL_UCODE_SERIAL(il->ucode_ver));
4855
4856
4857
4858
4859
4860
4861
4862 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4863 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4864 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4865 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4866 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4867 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4868
4869
4870 if (pieces.inst_size > il->hw_params.max_inst_size) {
4871 IL_ERR("uCode instr len %Zd too large to fit in\n",
4872 pieces.inst_size);
4873 goto try_again;
4874 }
4875
4876 if (pieces.data_size > il->hw_params.max_data_size) {
4877 IL_ERR("uCode data len %Zd too large to fit in\n",
4878 pieces.data_size);
4879 goto try_again;
4880 }
4881
4882 if (pieces.init_size > il->hw_params.max_inst_size) {
4883 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4884 pieces.init_size);
4885 goto try_again;
4886 }
4887
4888 if (pieces.init_data_size > il->hw_params.max_data_size) {
4889 IL_ERR("uCode init data len %Zd too large to fit in\n",
4890 pieces.init_data_size);
4891 goto try_again;
4892 }
4893
4894 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4895 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4896 pieces.boot_size);
4897 goto try_again;
4898 }
4899
4900
4901
4902
4903
4904
4905 il->ucode_code.len = pieces.inst_size;
4906 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4907
4908 il->ucode_data.len = pieces.data_size;
4909 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4910
4911 il->ucode_data_backup.len = pieces.data_size;
4912 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4913
4914 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4915 !il->ucode_data_backup.v_addr)
4916 goto err_pci_alloc;
4917
4918
4919 if (pieces.init_size && pieces.init_data_size) {
4920 il->ucode_init.len = pieces.init_size;
4921 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4922
4923 il->ucode_init_data.len = pieces.init_data_size;
4924 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4925
4926 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4927 goto err_pci_alloc;
4928 }
4929
4930
4931 if (pieces.boot_size) {
4932 il->ucode_boot.len = pieces.boot_size;
4933 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4934
4935 if (!il->ucode_boot.v_addr)
4936 goto err_pci_alloc;
4937 }
4938
4939
4940
4941 il->sta_key_max_num = STA_KEY_MAX_NUM;
4942
4943
4944
4945
4946 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4947 pieces.inst_size);
4948 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4949
4950 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4951 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4952
4953
4954
4955
4956
4957 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4958 pieces.data_size);
4959 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4960 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4961
4962
4963 if (pieces.init_size) {
4964 D_INFO("Copying (but not loading) init instr len %Zd\n",
4965 pieces.init_size);
4966 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4967 }
4968
4969
4970 if (pieces.init_data_size) {
4971 D_INFO("Copying (but not loading) init data len %Zd\n",
4972 pieces.init_data_size);
4973 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4974 pieces.init_data_size);
4975 }
4976
4977
4978 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4979 pieces.boot_size);
4980 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4981
4982
4983
4984
4985
4986 il->_4965.phy_calib_chain_noise_reset_cmd =
4987 standard_phy_calibration_size;
4988 il->_4965.phy_calib_chain_noise_gain_cmd =
4989 standard_phy_calibration_size + 1;
4990
4991
4992
4993
4994
4995
4996 err = il4965_mac_setup_register(il, max_probe_length);
4997 if (err)
4998 goto out_unbind;
4999
5000 err = il_dbgfs_register(il, DRV_NAME);
5001 if (err)
5002 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
5003 err);
5004
5005 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
5006 if (err) {
5007 IL_ERR("failed to create sysfs device attributes\n");
5008 goto out_unbind;
5009 }
5010
5011
5012 release_firmware(ucode_raw);
5013 complete(&il->_4965.firmware_loading_complete);
5014 return;
5015
5016try_again:
5017
5018 if (il4965_request_firmware(il, false))
5019 goto out_unbind;
5020 release_firmware(ucode_raw);
5021 return;
5022
5023err_pci_alloc:
5024 IL_ERR("failed to allocate pci memory\n");
5025 il4965_dealloc_ucode_pci(il);
5026out_unbind:
5027 complete(&il->_4965.firmware_loading_complete);
5028 device_release_driver(&il->pci_dev->dev);
5029 release_firmware(ucode_raw);
5030}
5031
5032static const char *const desc_lookup_text[] = {
5033 "OK",
5034 "FAIL",
5035 "BAD_PARAM",
5036 "BAD_CHECKSUM",
5037 "NMI_INTERRUPT_WDG",
5038 "SYSASSERT",
5039 "FATAL_ERROR",
5040 "BAD_COMMAND",
5041 "HW_ERROR_TUNE_LOCK",
5042 "HW_ERROR_TEMPERATURE",
5043 "ILLEGAL_CHAN_FREQ",
5044 "VCC_NOT_STBL",
5045 "FH49_ERROR",
5046 "NMI_INTERRUPT_HOST",
5047 "NMI_INTERRUPT_ACTION_PT",
5048 "NMI_INTERRUPT_UNKNOWN",
5049 "UCODE_VERSION_MISMATCH",
5050 "HW_ERROR_ABS_LOCK",
5051 "HW_ERROR_CAL_LOCK_FAIL",
5052 "NMI_INTERRUPT_INST_ACTION_PT",
5053 "NMI_INTERRUPT_DATA_ACTION_PT",
5054 "NMI_TRM_HW_ER",
5055 "NMI_INTERRUPT_TRM",
5056 "NMI_INTERRUPT_BREAK_POINT",
5057 "DEBUG_0",
5058 "DEBUG_1",
5059 "DEBUG_2",
5060 "DEBUG_3",
5061};
5062
5063static struct {
5064 char *name;
5065 u8 num;
5066} advanced_lookup[] = {
5067 {
5068 "NMI_INTERRUPT_WDG", 0x34}, {
5069 "SYSASSERT", 0x35}, {
5070 "UCODE_VERSION_MISMATCH", 0x37}, {
5071 "BAD_COMMAND", 0x38}, {
5072 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
5073 "FATAL_ERROR", 0x3D}, {
5074 "NMI_TRM_HW_ERR", 0x46}, {
5075 "NMI_INTERRUPT_TRM", 0x4C}, {
5076 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
5077 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
5078 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
5079 "NMI_INTERRUPT_HOST", 0x66}, {
5080 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
5081 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
5082 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
5083"ADVANCED_SYSASSERT", 0},};
5084
5085static const char *
5086il4965_desc_lookup(u32 num)
5087{
5088 int i;
5089 int max = ARRAY_SIZE(desc_lookup_text);
5090
5091 if (num < max)
5092 return desc_lookup_text[num];
5093
5094 max = ARRAY_SIZE(advanced_lookup) - 1;
5095 for (i = 0; i < max; i++) {
5096 if (advanced_lookup[i].num == num)
5097 break;
5098 }
5099 return advanced_lookup[i].name;
5100}
5101
5102#define ERROR_START_OFFSET (1 * sizeof(u32))
5103#define ERROR_ELEM_SIZE (7 * sizeof(u32))
5104
5105void
5106il4965_dump_nic_error_log(struct il_priv *il)
5107{
5108 u32 data2, line;
5109 u32 desc, time, count, base, data1;
5110 u32 blink1, blink2, ilink1, ilink2;
5111 u32 pc, hcmd;
5112
5113 if (il->ucode_type == UCODE_INIT)
5114 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
5115 else
5116 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
5117
5118 if (!il->ops->is_valid_rtc_data_addr(base)) {
5119 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
5120 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
5121 return;
5122 }
5123
5124 count = il_read_targ_mem(il, base);
5125
5126 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
5127 IL_ERR("Start IWL Error Log Dump:\n");
5128 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
5129 }
5130
5131 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
5132 il->isr_stats.err_code = desc;
5133 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
5134 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
5135 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
5136 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
5137 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
5138 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
5139 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
5140 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
5141 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
5142 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
5143
5144 IL_ERR("Desc Time "
5145 "data1 data2 line\n");
5146 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
5147 il4965_desc_lookup(desc), desc, time, data1, data2, line);
5148 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
5149 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
5150 blink2, ilink1, ilink2, hcmd);
5151}
5152
5153static void
5154il4965_rf_kill_ct_config(struct il_priv *il)
5155{
5156 struct il_ct_kill_config cmd;
5157 unsigned long flags;
5158 int ret = 0;
5159
5160 spin_lock_irqsave(&il->lock, flags);
5161 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
5162 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
5163 spin_unlock_irqrestore(&il->lock, flags);
5164
5165 cmd.critical_temperature_R =
5166 cpu_to_le32(il->hw_params.ct_kill_threshold);
5167
5168 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
5169 if (ret)
5170 IL_ERR("C_CT_KILL_CONFIG failed\n");
5171 else
5172 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
5173 "critical temperature is %d\n",
5174 il->hw_params.ct_kill_threshold);
5175}
5176
5177static const s8 default_queue_to_tx_fifo[] = {
5178 IL_TX_FIFO_VO,
5179 IL_TX_FIFO_VI,
5180 IL_TX_FIFO_BE,
5181 IL_TX_FIFO_BK,
5182 IL49_CMD_FIFO_NUM,
5183 IL_TX_FIFO_UNUSED,
5184 IL_TX_FIFO_UNUSED,
5185};
5186
5187#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
5188
5189static int
5190il4965_alive_notify(struct il_priv *il)
5191{
5192 u32 a;
5193 unsigned long flags;
5194 int i, chan;
5195 u32 reg_val;
5196
5197 spin_lock_irqsave(&il->lock, flags);
5198
5199
5200 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
5201 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
5202 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
5203 il_write_targ_mem(il, a, 0);
5204 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
5205 il_write_targ_mem(il, a, 0);
5206 for (;
5207 a <
5208 il->scd_base_addr +
5209 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
5210 a += 4)
5211 il_write_targ_mem(il, a, 0);
5212
5213
5214 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
5215
5216
5217 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
5218 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
5219 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5220 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
5221
5222
5223 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
5224 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
5225 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
5226
5227
5228 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
5229
5230
5231 for (i = 0; i < il->hw_params.max_txq_num; i++) {
5232
5233
5234 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
5235 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
5236
5237
5238 il_write_targ_mem(il,
5239 il->scd_base_addr +
5240 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
5241 (SCD_WIN_SIZE <<
5242 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
5243 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
5244
5245
5246 il_write_targ_mem(il,
5247 il->scd_base_addr +
5248 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
5249 sizeof(u32),
5250 (SCD_FRAME_LIMIT <<
5251 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5252 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
5253
5254 }
5255 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
5256 (1 << il->hw_params.max_txq_num) - 1);
5257
5258
5259 il4965_txq_set_sched(il, IL_MASK(0, 6));
5260
5261 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
5262
5263
5264 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
5265 for (i = 0; i < 4; i++)
5266 atomic_set(&il->queue_stop_count[i], 0);
5267
5268
5269 il->txq_ctx_active_msk = 0;
5270
5271 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5272
5273 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5274 int ac = default_queue_to_tx_fifo[i];
5275
5276 il_txq_ctx_activate(il, i);
5277
5278 if (ac == IL_TX_FIFO_UNUSED)
5279 continue;
5280
5281 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
5282 }
5283
5284 spin_unlock_irqrestore(&il->lock, flags);
5285
5286 return 0;
5287}
5288
5289
5290
5291
5292
5293
5294static void
5295il4965_alive_start(struct il_priv *il)
5296{
5297 int ret = 0;
5298
5299 D_INFO("Runtime Alive received.\n");
5300
5301 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5302
5303
5304 D_INFO("Alive failed.\n");
5305 goto restart;
5306 }
5307
5308
5309
5310
5311 if (il4965_verify_ucode(il)) {
5312
5313
5314 D_INFO("Bad runtime uCode load.\n");
5315 goto restart;
5316 }
5317
5318 ret = il4965_alive_notify(il);
5319 if (ret) {
5320 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5321 goto restart;
5322 }
5323
5324
5325 set_bit(S_ALIVE, &il->status);
5326
5327
5328 il_setup_watchdog(il);
5329
5330 if (il_is_rfkill(il))
5331 return;
5332
5333 ieee80211_wake_queues(il->hw);
5334
5335 il->active_rate = RATES_MASK;
5336
5337 il_power_update_mode(il, true);
5338 D_INFO("Updated power mode\n");
5339
5340 if (il_is_associated(il)) {
5341 struct il_rxon_cmd *active_rxon =
5342 (struct il_rxon_cmd *)&il->active;
5343
5344 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5345 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5346 } else {
5347
5348 il_connection_init_rx_config(il);
5349
5350 if (il->ops->set_rxon_chain)
5351 il->ops->set_rxon_chain(il);
5352 }
5353
5354
5355 il_send_bt_config(il);
5356
5357 il4965_reset_run_time_calib(il);
5358
5359 set_bit(S_READY, &il->status);
5360
5361
5362 il_commit_rxon(il);
5363
5364
5365 il4965_rf_kill_ct_config(il);
5366
5367 D_INFO("ALIVE processing complete.\n");
5368 wake_up(&il->wait_command_queue);
5369
5370 return;
5371
5372restart:
5373 queue_work(il->workqueue, &il->restart);
5374}
5375
5376static void il4965_cancel_deferred_work(struct il_priv *il);
5377
5378static void
5379__il4965_down(struct il_priv *il)
5380{
5381 unsigned long flags;
5382 int exit_pending;
5383
5384 D_INFO(DRV_NAME " is going down\n");
5385
5386 il_scan_cancel_timeout(il, 200);
5387
5388 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5389
5390
5391
5392 del_timer_sync(&il->watchdog);
5393
5394 il_clear_ucode_stations(il);
5395
5396
5397 spin_lock_irq(&il->sta_lock);
5398
5399
5400
5401
5402
5403
5404
5405 memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
5406 il->_4965.key_mapping_keys = 0;
5407 spin_unlock_irq(&il->sta_lock);
5408
5409 il_dealloc_bcast_stations(il);
5410 il_clear_driver_stations(il);
5411
5412
5413 wake_up_all(&il->wait_command_queue);
5414
5415
5416
5417 if (!exit_pending)
5418 clear_bit(S_EXIT_PENDING, &il->status);
5419
5420
5421 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5422
5423
5424 spin_lock_irqsave(&il->lock, flags);
5425 il_disable_interrupts(il);
5426 spin_unlock_irqrestore(&il->lock, flags);
5427 il4965_synchronize_irq(il);
5428
5429 if (il->mac80211_registered)
5430 ieee80211_stop_queues(il->hw);
5431
5432
5433
5434 if (!il_is_init(il)) {
5435 il->status =
5436 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5437 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5438 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5439 goto exit;
5440 }
5441
5442
5443
5444 il->status &=
5445 test_bit(S_RFKILL, &il->status) << S_RFKILL |
5446 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
5447 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
5448 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5449
5450
5451
5452
5453
5454
5455 spin_lock_irq(&il->reg_lock);
5456
5457
5458 il4965_txq_ctx_stop(il);
5459 il4965_rxq_stop(il);
5460
5461 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5462 udelay(5);
5463
5464 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5465
5466 _il_apm_stop(il);
5467
5468 spin_unlock_irq(&il->reg_lock);
5469
5470 il4965_txq_ctx_unmap(il);
5471exit:
5472 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5473
5474 dev_kfree_skb(il->beacon_skb);
5475 il->beacon_skb = NULL;
5476
5477
5478 il4965_clear_free_frames(il);
5479}
5480
5481static void
5482il4965_down(struct il_priv *il)
5483{
5484 mutex_lock(&il->mutex);
5485 __il4965_down(il);
5486 mutex_unlock(&il->mutex);
5487
5488 il4965_cancel_deferred_work(il);
5489}
5490
5491
5492static void
5493il4965_set_hw_ready(struct il_priv *il)
5494{
5495 int ret;
5496
5497 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5498 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5499
5500
5501 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5502 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5503 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5504 100);
5505 if (ret >= 0)
5506 il->hw_ready = true;
5507
5508 D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
5509}
5510
5511static void
5512il4965_prepare_card_hw(struct il_priv *il)
5513{
5514 int ret;
5515
5516 il->hw_ready = false;
5517
5518 il4965_set_hw_ready(il);
5519 if (il->hw_ready)
5520 return;
5521
5522
5523 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5524
5525 ret =
5526 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5527 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5528 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5529
5530
5531 if (ret != -ETIMEDOUT)
5532 il4965_set_hw_ready(il);
5533}
5534
5535#define MAX_HW_RESTARTS 5
5536
5537static int
5538__il4965_up(struct il_priv *il)
5539{
5540 int i;
5541 int ret;
5542
5543 if (test_bit(S_EXIT_PENDING, &il->status)) {
5544 IL_WARN("Exit pending; will not bring the NIC up\n");
5545 return -EIO;
5546 }
5547
5548 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5549 IL_ERR("ucode not available for device bringup\n");
5550 return -EIO;
5551 }
5552
5553 ret = il4965_alloc_bcast_station(il);
5554 if (ret) {
5555 il_dealloc_bcast_stations(il);
5556 return ret;
5557 }
5558
5559 il4965_prepare_card_hw(il);
5560 if (!il->hw_ready) {
5561 IL_ERR("HW not ready\n");
5562 return -EIO;
5563 }
5564
5565
5566 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5567 clear_bit(S_RFKILL, &il->status);
5568 else {
5569 set_bit(S_RFKILL, &il->status);
5570 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5571
5572 il_enable_rfkill_int(il);
5573 IL_WARN("Radio disabled by HW RF Kill switch\n");
5574 return 0;
5575 }
5576
5577 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5578
5579
5580 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5581
5582 ret = il4965_hw_nic_init(il);
5583 if (ret) {
5584 IL_ERR("Unable to init nic\n");
5585 return ret;
5586 }
5587
5588
5589 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5590 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5591
5592
5593 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5594 il_enable_interrupts(il);
5595
5596
5597 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5598 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5599
5600
5601
5602
5603 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5604 il->ucode_data.len);
5605
5606 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5607
5608
5609
5610
5611 ret = il->ops->load_ucode(il);
5612
5613 if (ret) {
5614 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5615 continue;
5616 }
5617
5618
5619 il4965_nic_start(il);
5620
5621 D_INFO(DRV_NAME " is coming up\n");
5622
5623 return 0;
5624 }
5625
5626 set_bit(S_EXIT_PENDING, &il->status);
5627 __il4965_down(il);
5628 clear_bit(S_EXIT_PENDING, &il->status);
5629
5630
5631
5632 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5633 return -EIO;
5634}
5635
5636
5637
5638
5639
5640
5641
5642static void
5643il4965_bg_init_alive_start(struct work_struct *data)
5644{
5645 struct il_priv *il =
5646 container_of(data, struct il_priv, init_alive_start.work);
5647
5648 mutex_lock(&il->mutex);
5649 if (test_bit(S_EXIT_PENDING, &il->status))
5650 goto out;
5651
5652 il->ops->init_alive_start(il);
5653out:
5654 mutex_unlock(&il->mutex);
5655}
5656
5657static void
5658il4965_bg_alive_start(struct work_struct *data)
5659{
5660 struct il_priv *il =
5661 container_of(data, struct il_priv, alive_start.work);
5662
5663 mutex_lock(&il->mutex);
5664 if (test_bit(S_EXIT_PENDING, &il->status))
5665 goto out;
5666
5667 il4965_alive_start(il);
5668out:
5669 mutex_unlock(&il->mutex);
5670}
5671
5672static void
5673il4965_bg_run_time_calib_work(struct work_struct *work)
5674{
5675 struct il_priv *il = container_of(work, struct il_priv,
5676 run_time_calib_work);
5677
5678 mutex_lock(&il->mutex);
5679
5680 if (test_bit(S_EXIT_PENDING, &il->status) ||
5681 test_bit(S_SCANNING, &il->status)) {
5682 mutex_unlock(&il->mutex);
5683 return;
5684 }
5685
5686 if (il->start_calib) {
5687 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5688 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5689 }
5690
5691 mutex_unlock(&il->mutex);
5692}
5693
5694static void
5695il4965_bg_restart(struct work_struct *data)
5696{
5697 struct il_priv *il = container_of(data, struct il_priv, restart);
5698
5699 if (test_bit(S_EXIT_PENDING, &il->status))
5700 return;
5701
5702 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5703 mutex_lock(&il->mutex);
5704 il->is_open = 0;
5705
5706 __il4965_down(il);
5707
5708 mutex_unlock(&il->mutex);
5709 il4965_cancel_deferred_work(il);
5710 ieee80211_restart_hw(il->hw);
5711 } else {
5712 il4965_down(il);
5713
5714 mutex_lock(&il->mutex);
5715 if (test_bit(S_EXIT_PENDING, &il->status)) {
5716 mutex_unlock(&il->mutex);
5717 return;
5718 }
5719
5720 __il4965_up(il);
5721 mutex_unlock(&il->mutex);
5722 }
5723}
5724
5725static void
5726il4965_bg_rx_replenish(struct work_struct *data)
5727{
5728 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5729
5730 if (test_bit(S_EXIT_PENDING, &il->status))
5731 return;
5732
5733 mutex_lock(&il->mutex);
5734 il4965_rx_replenish(il);
5735 mutex_unlock(&il->mutex);
5736}
5737
5738
5739
5740
5741
5742
5743
5744#define UCODE_READY_TIMEOUT (4 * HZ)
5745
5746
5747
5748
5749
5750static int
5751il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5752{
5753 int ret;
5754 struct ieee80211_hw *hw = il->hw;
5755
5756 hw->rate_control_algorithm = "iwl-4965-rs";
5757
5758
5759 hw->flags =
5760 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5761 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5762 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
5763 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5764 if (il->cfg->sku & IL_SKU_N)
5765 hw->flags |=
5766 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5767 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5768
5769 hw->sta_data_size = sizeof(struct il_station_priv);
5770 hw->vif_data_size = sizeof(struct il_vif_priv);
5771
5772 hw->wiphy->interface_modes =
5773 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
5774
5775 hw->wiphy->flags |=
5776 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
5777 WIPHY_FLAG_IBSS_RSN;
5778
5779
5780
5781
5782
5783 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5784
5785 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5786
5787 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5788
5789
5790 hw->queues = 4;
5791
5792 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5793
5794 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5795 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5796 &il->bands[IEEE80211_BAND_2GHZ];
5797 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5798 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5799 &il->bands[IEEE80211_BAND_5GHZ];
5800
5801 il_leds_init(il);
5802
5803 ret = ieee80211_register_hw(il->hw);
5804 if (ret) {
5805 IL_ERR("Failed to register hw (error %d)\n", ret);
5806 return ret;
5807 }
5808 il->mac80211_registered = 1;
5809
5810 return 0;
5811}
5812
5813int
5814il4965_mac_start(struct ieee80211_hw *hw)
5815{
5816 struct il_priv *il = hw->priv;
5817 int ret;
5818
5819 D_MAC80211("enter\n");
5820
5821
5822 mutex_lock(&il->mutex);
5823 ret = __il4965_up(il);
5824 mutex_unlock(&il->mutex);
5825
5826 if (ret)
5827 return ret;
5828
5829 if (il_is_rfkill(il))
5830 goto out;
5831
5832 D_INFO("Start UP work done.\n");
5833
5834
5835
5836 ret = wait_event_timeout(il->wait_command_queue,
5837 test_bit(S_READY, &il->status),
5838 UCODE_READY_TIMEOUT);
5839 if (!ret) {
5840 if (!test_bit(S_READY, &il->status)) {
5841 IL_ERR("START_ALIVE timeout after %dms.\n",
5842 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5843 return -ETIMEDOUT;
5844 }
5845 }
5846
5847 il4965_led_enable(il);
5848
5849out:
5850 il->is_open = 1;
5851 D_MAC80211("leave\n");
5852 return 0;
5853}
5854
5855void
5856il4965_mac_stop(struct ieee80211_hw *hw)
5857{
5858 struct il_priv *il = hw->priv;
5859
5860 D_MAC80211("enter\n");
5861
5862 if (!il->is_open)
5863 return;
5864
5865 il->is_open = 0;
5866
5867 il4965_down(il);
5868
5869 flush_workqueue(il->workqueue);
5870
5871
5872
5873 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5874 il_enable_rfkill_int(il);
5875
5876 D_MAC80211("leave\n");
5877}
5878
5879void
5880il4965_mac_tx(struct ieee80211_hw *hw,
5881 struct ieee80211_tx_control *control,
5882 struct sk_buff *skb)
5883{
5884 struct il_priv *il = hw->priv;
5885
5886 D_MACDUMP("enter\n");
5887
5888 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5889 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5890
5891 if (il4965_tx_skb(il, control->sta, skb))
5892 dev_kfree_skb_any(skb);
5893
5894 D_MACDUMP("leave\n");
5895}
5896
5897void
5898il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5899 struct ieee80211_key_conf *keyconf,
5900 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5901{
5902 struct il_priv *il = hw->priv;
5903
5904 D_MAC80211("enter\n");
5905
5906 il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
5907
5908 D_MAC80211("leave\n");
5909}
5910
5911int
5912il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5913 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5914 struct ieee80211_key_conf *key)
5915{
5916 struct il_priv *il = hw->priv;
5917 int ret;
5918 u8 sta_id;
5919 bool is_default_wep_key = false;
5920
5921 D_MAC80211("enter\n");
5922
5923 if (il->cfg->mod_params->sw_crypto) {
5924 D_MAC80211("leave - hwcrypto disabled\n");
5925 return -EOPNOTSUPP;
5926 }
5927
5928
5929
5930
5931
5932 if (vif->type == NL80211_IFTYPE_ADHOC &&
5933 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
5934 D_MAC80211("leave - ad-hoc group key\n");
5935 return -EOPNOTSUPP;
5936 }
5937
5938 sta_id = il_sta_id_or_broadcast(il, sta);
5939 if (sta_id == IL_INVALID_STATION)
5940 return -EINVAL;
5941
5942 mutex_lock(&il->mutex);
5943 il_scan_cancel_timeout(il, 100);
5944
5945
5946
5947
5948
5949
5950
5951 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5952 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5953 if (cmd == SET_KEY)
5954 is_default_wep_key = !il->_4965.key_mapping_keys;
5955 else
5956 is_default_wep_key =
5957 (key->hw_key_idx == HW_KEY_DEFAULT);
5958 }
5959
5960 switch (cmd) {
5961 case SET_KEY:
5962 if (is_default_wep_key)
5963 ret = il4965_set_default_wep_key(il, key);
5964 else
5965 ret = il4965_set_dynamic_key(il, key, sta_id);
5966
5967 D_MAC80211("enable hwcrypto key\n");
5968 break;
5969 case DISABLE_KEY:
5970 if (is_default_wep_key)
5971 ret = il4965_remove_default_wep_key(il, key);
5972 else
5973 ret = il4965_remove_dynamic_key(il, key, sta_id);
5974
5975 D_MAC80211("disable hwcrypto key\n");
5976 break;
5977 default:
5978 ret = -EINVAL;
5979 }
5980
5981 mutex_unlock(&il->mutex);
5982 D_MAC80211("leave\n");
5983
5984 return ret;
5985}
5986
5987int
5988il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5989 enum ieee80211_ampdu_mlme_action action,
5990 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5991 u8 buf_size)
5992{
5993 struct il_priv *il = hw->priv;
5994 int ret = -EINVAL;
5995
5996 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5997
5998 if (!(il->cfg->sku & IL_SKU_N))
5999 return -EACCES;
6000
6001 mutex_lock(&il->mutex);
6002
6003 switch (action) {
6004 case IEEE80211_AMPDU_RX_START:
6005 D_HT("start Rx\n");
6006 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
6007 break;
6008 case IEEE80211_AMPDU_RX_STOP:
6009 D_HT("stop Rx\n");
6010 ret = il4965_sta_rx_agg_stop(il, sta, tid);
6011 if (test_bit(S_EXIT_PENDING, &il->status))
6012 ret = 0;
6013 break;
6014 case IEEE80211_AMPDU_TX_START:
6015 D_HT("start Tx\n");
6016 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
6017 break;
6018 case IEEE80211_AMPDU_TX_STOP_CONT:
6019 case IEEE80211_AMPDU_TX_STOP_FLUSH:
6020 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
6021 D_HT("stop Tx\n");
6022 ret = il4965_tx_agg_stop(il, vif, sta, tid);
6023 if (test_bit(S_EXIT_PENDING, &il->status))
6024 ret = 0;
6025 break;
6026 case IEEE80211_AMPDU_TX_OPERATIONAL:
6027 ret = 0;
6028 break;
6029 }
6030 mutex_unlock(&il->mutex);
6031
6032 return ret;
6033}
6034
6035int
6036il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6037 struct ieee80211_sta *sta)
6038{
6039 struct il_priv *il = hw->priv;
6040 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
6041 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
6042 int ret;
6043 u8 sta_id;
6044
6045 D_INFO("received request to add station %pM\n", sta->addr);
6046 mutex_lock(&il->mutex);
6047 D_INFO("proceeding to add station %pM\n", sta->addr);
6048 sta_priv->common.sta_id = IL_INVALID_STATION;
6049
6050 atomic_set(&sta_priv->pending_frames, 0);
6051
6052 ret =
6053 il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
6054 if (ret) {
6055 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
6056
6057 mutex_unlock(&il->mutex);
6058 return ret;
6059 }
6060
6061 sta_priv->common.sta_id = sta_id;
6062
6063
6064 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
6065 il4965_rs_rate_init(il, sta, sta_id);
6066 mutex_unlock(&il->mutex);
6067
6068 return 0;
6069}
6070
6071void
6072il4965_mac_channel_switch(struct ieee80211_hw *hw,
6073 struct ieee80211_channel_switch *ch_switch)
6074{
6075 struct il_priv *il = hw->priv;
6076 const struct il_channel_info *ch_info;
6077 struct ieee80211_conf *conf = &hw->conf;
6078 struct ieee80211_channel *channel = ch_switch->chandef.chan;
6079 struct il_ht_config *ht_conf = &il->current_ht_config;
6080 u16 ch;
6081
6082 D_MAC80211("enter\n");
6083
6084 mutex_lock(&il->mutex);
6085
6086 if (il_is_rfkill(il))
6087 goto out;
6088
6089 if (test_bit(S_EXIT_PENDING, &il->status) ||
6090 test_bit(S_SCANNING, &il->status) ||
6091 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
6092 goto out;
6093
6094 if (!il_is_associated(il))
6095 goto out;
6096
6097 if (!il->ops->set_channel_switch)
6098 goto out;
6099
6100 ch = channel->hw_value;
6101 if (le16_to_cpu(il->active.channel) == ch)
6102 goto out;
6103
6104 ch_info = il_get_channel_info(il, channel->band, ch);
6105 if (!il_is_channel_valid(ch_info)) {
6106 D_MAC80211("invalid channel\n");
6107 goto out;
6108 }
6109
6110 spin_lock_irq(&il->lock);
6111
6112 il->current_ht_config.smps = conf->smps_mode;
6113
6114
6115 switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
6116 case NL80211_CHAN_NO_HT:
6117 case NL80211_CHAN_HT20:
6118 il->ht.is_40mhz = false;
6119 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
6120 break;
6121 case NL80211_CHAN_HT40MINUS:
6122 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
6123 il->ht.is_40mhz = true;
6124 break;
6125 case NL80211_CHAN_HT40PLUS:
6126 il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
6127 il->ht.is_40mhz = true;
6128 break;
6129 }
6130
6131 if ((le16_to_cpu(il->staging.channel) != ch))
6132 il->staging.flags = 0;
6133
6134 il_set_rxon_channel(il, channel);
6135 il_set_rxon_ht(il, ht_conf);
6136 il_set_flags_for_band(il, channel->band, il->vif);
6137
6138 spin_unlock_irq(&il->lock);
6139
6140 il_set_rate(il);
6141
6142
6143
6144
6145 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6146 il->switch_channel = cpu_to_le16(ch);
6147 if (il->ops->set_channel_switch(il, ch_switch)) {
6148 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
6149 il->switch_channel = 0;
6150 ieee80211_chswitch_done(il->vif, false);
6151 }
6152
6153out:
6154 mutex_unlock(&il->mutex);
6155 D_MAC80211("leave\n");
6156}
6157
6158void
6159il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
6160 unsigned int *total_flags, u64 multicast)
6161{
6162 struct il_priv *il = hw->priv;
6163 __le32 filter_or = 0, filter_nand = 0;
6164
6165#define CHK(test, flag) do { \
6166 if (*total_flags & (test)) \
6167 filter_or |= (flag); \
6168 else \
6169 filter_nand |= (flag); \
6170 } while (0)
6171
6172 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
6173 *total_flags);
6174
6175 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
6176
6177 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
6178 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
6179
6180#undef CHK
6181
6182 mutex_lock(&il->mutex);
6183
6184 il->staging.filter_flags &= ~filter_nand;
6185 il->staging.filter_flags |= filter_or;
6186
6187
6188
6189
6190
6191
6192 mutex_unlock(&il->mutex);
6193
6194
6195
6196
6197
6198
6199
6200 *total_flags &=
6201 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
6202 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6203}
6204
6205
6206
6207
6208
6209
6210
6211static void
6212il4965_bg_txpower_work(struct work_struct *work)
6213{
6214 struct il_priv *il = container_of(work, struct il_priv,
6215 txpower_work);
6216
6217 mutex_lock(&il->mutex);
6218
6219
6220
6221
6222
6223 if (test_bit(S_EXIT_PENDING, &il->status) ||
6224 test_bit(S_SCANNING, &il->status))
6225 goto out;
6226
6227
6228
6229
6230 il->ops->send_tx_power(il);
6231
6232
6233
6234 il->last_temperature = il->temperature;
6235out:
6236 mutex_unlock(&il->mutex);
6237}
6238
6239static void
6240il4965_setup_deferred_work(struct il_priv *il)
6241{
6242 il->workqueue = create_singlethread_workqueue(DRV_NAME);
6243
6244 init_waitqueue_head(&il->wait_command_queue);
6245
6246 INIT_WORK(&il->restart, il4965_bg_restart);
6247 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
6248 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
6249 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
6250 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
6251
6252 il_setup_scan_deferred_work(il);
6253
6254 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6255
6256 init_timer(&il->stats_periodic);
6257 il->stats_periodic.data = (unsigned long)il;
6258 il->stats_periodic.function = il4965_bg_stats_periodic;
6259
6260 init_timer(&il->watchdog);
6261 il->watchdog.data = (unsigned long)il;
6262 il->watchdog.function = il_bg_watchdog;
6263
6264 tasklet_init(&il->irq_tasklet,
6265 (void (*)(unsigned long))il4965_irq_tasklet,
6266 (unsigned long)il);
6267}
6268
6269static void
6270il4965_cancel_deferred_work(struct il_priv *il)
6271{
6272 cancel_work_sync(&il->txpower_work);
6273 cancel_delayed_work_sync(&il->init_alive_start);
6274 cancel_delayed_work(&il->alive_start);
6275 cancel_work_sync(&il->run_time_calib_work);
6276
6277 il_cancel_scan_deferred_work(il);
6278
6279 del_timer_sync(&il->stats_periodic);
6280}
6281
6282static void
6283il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
6284{
6285 int i;
6286
6287 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
6288 rates[i].bitrate = il_rates[i].ieee * 5;
6289 rates[i].hw_value = i;
6290 rates[i].hw_value_short = i;
6291 rates[i].flags = 0;
6292 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
6293
6294
6295
6296 rates[i].flags |=
6297 (il_rates[i].plcp ==
6298 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6299 }
6300 }
6301}
6302
6303
6304
6305
6306void
6307il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6308{
6309 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6310 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6311}
6312
6313void
6314il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6315 int tx_fifo_id, int scd_retry)
6316{
6317 int txq_id = txq->q.id;
6318
6319
6320 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6321
6322
6323 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6324 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6325 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6326 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6327 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6328 IL49_SCD_QUEUE_STTS_REG_MSK);
6329
6330 txq->sched_retry = scd_retry;
6331
6332 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6333 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6334}
6335
6336static const struct ieee80211_ops il4965_mac_ops = {
6337 .tx = il4965_mac_tx,
6338 .start = il4965_mac_start,
6339 .stop = il4965_mac_stop,
6340 .add_interface = il_mac_add_interface,
6341 .remove_interface = il_mac_remove_interface,
6342 .change_interface = il_mac_change_interface,
6343 .config = il_mac_config,
6344 .configure_filter = il4965_configure_filter,
6345 .set_key = il4965_mac_set_key,
6346 .update_tkip_key = il4965_mac_update_tkip_key,
6347 .conf_tx = il_mac_conf_tx,
6348 .reset_tsf = il_mac_reset_tsf,
6349 .bss_info_changed = il_mac_bss_info_changed,
6350 .ampdu_action = il4965_mac_ampdu_action,
6351 .hw_scan = il_mac_hw_scan,
6352 .sta_add = il4965_mac_sta_add,
6353 .sta_remove = il_mac_sta_remove,
6354 .channel_switch = il4965_mac_channel_switch,
6355 .tx_last_beacon = il_mac_tx_last_beacon,
6356 .flush = il_mac_flush,
6357};
6358
6359static int
6360il4965_init_drv(struct il_priv *il)
6361{
6362 int ret;
6363
6364 spin_lock_init(&il->sta_lock);
6365 spin_lock_init(&il->hcmd_lock);
6366
6367 INIT_LIST_HEAD(&il->free_frames);
6368
6369 mutex_init(&il->mutex);
6370
6371 il->ieee_channels = NULL;
6372 il->ieee_rates = NULL;
6373 il->band = IEEE80211_BAND_2GHZ;
6374
6375 il->iw_mode = NL80211_IFTYPE_STATION;
6376 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6377 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6378
6379
6380 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6381
6382
6383 if (il->ops->set_rxon_chain)
6384 il->ops->set_rxon_chain(il);
6385
6386 il_init_scan_params(il);
6387
6388 ret = il_init_channel_map(il);
6389 if (ret) {
6390 IL_ERR("initializing regulatory failed: %d\n", ret);
6391 goto err;
6392 }
6393
6394 ret = il_init_geos(il);
6395 if (ret) {
6396 IL_ERR("initializing geos failed: %d\n", ret);
6397 goto err_free_channel_map;
6398 }
6399 il4965_init_hw_rates(il, il->ieee_rates);
6400
6401 return 0;
6402
6403err_free_channel_map:
6404 il_free_channel_map(il);
6405err:
6406 return ret;
6407}
6408
6409static void
6410il4965_uninit_drv(struct il_priv *il)
6411{
6412 il_free_geos(il);
6413 il_free_channel_map(il);
6414 kfree(il->scan_cmd);
6415}
6416
6417static void
6418il4965_hw_detect(struct il_priv *il)
6419{
6420 il->hw_rev = _il_rd(il, CSR_HW_REV);
6421 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6422 il->rev_id = il->pci_dev->revision;
6423 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6424}
6425
6426static struct il_sensitivity_ranges il4965_sensitivity = {
6427 .min_nrg_cck = 97,
6428 .max_nrg_cck = 0,
6429
6430 .auto_corr_min_ofdm = 85,
6431 .auto_corr_min_ofdm_mrc = 170,
6432 .auto_corr_min_ofdm_x1 = 105,
6433 .auto_corr_min_ofdm_mrc_x1 = 220,
6434
6435 .auto_corr_max_ofdm = 120,
6436 .auto_corr_max_ofdm_mrc = 210,
6437 .auto_corr_max_ofdm_x1 = 140,
6438 .auto_corr_max_ofdm_mrc_x1 = 270,
6439
6440 .auto_corr_min_cck = 125,
6441 .auto_corr_max_cck = 200,
6442 .auto_corr_min_cck_mrc = 200,
6443 .auto_corr_max_cck_mrc = 400,
6444
6445 .nrg_th_cck = 100,
6446 .nrg_th_ofdm = 100,
6447
6448 .barker_corr_th_min = 190,
6449 .barker_corr_th_min_mrc = 390,
6450 .nrg_th_cca = 62,
6451};
6452
6453static void
6454il4965_set_hw_params(struct il_priv *il)
6455{
6456 il->hw_params.bcast_id = IL4965_BROADCAST_ID;
6457 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6458 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6459 if (il->cfg->mod_params->amsdu_size_8K)
6460 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6461 else
6462 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6463
6464 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6465
6466 if (il->cfg->mod_params->disable_11n)
6467 il->cfg->sku &= ~IL_SKU_N;
6468
6469 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
6470 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
6471 il->cfg->num_of_queues =
6472 il->cfg->mod_params->num_of_queues;
6473
6474 il->hw_params.max_txq_num = il->cfg->num_of_queues;
6475 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
6476 il->hw_params.scd_bc_tbls_size =
6477 il->cfg->num_of_queues *
6478 sizeof(struct il4965_scd_bc_tbl);
6479
6480 il->hw_params.tfd_size = sizeof(struct il_tfd);
6481 il->hw_params.max_stations = IL4965_STATION_COUNT;
6482 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
6483 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
6484 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
6485 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
6486
6487 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
6488
6489 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
6490 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
6491 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
6492 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
6493
6494 il->hw_params.ct_kill_threshold =
6495 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
6496
6497 il->hw_params.sens = &il4965_sensitivity;
6498 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
6499}
6500
6501static int
6502il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6503{
6504 int err = 0;
6505 struct il_priv *il;
6506 struct ieee80211_hw *hw;
6507 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6508 unsigned long flags;
6509 u16 pci_cmd;
6510
6511
6512
6513
6514
6515 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
6516 if (!hw) {
6517 err = -ENOMEM;
6518 goto out;
6519 }
6520 il = hw->priv;
6521 il->hw = hw;
6522 SET_IEEE80211_DEV(hw, &pdev->dev);
6523
6524 D_INFO("*** LOAD DRIVER ***\n");
6525 il->cfg = cfg;
6526 il->ops = &il4965_ops;
6527#ifdef CONFIG_IWLEGACY_DEBUGFS
6528 il->debugfs_ops = &il4965_debugfs_ops;
6529#endif
6530 il->pci_dev = pdev;
6531 il->inta_mask = CSR_INI_SET_MASK;
6532
6533
6534
6535
6536 pci_disable_link_state(pdev,
6537 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6538 PCIE_LINK_STATE_CLKPM);
6539
6540 if (pci_enable_device(pdev)) {
6541 err = -ENODEV;
6542 goto out_ieee80211_free_hw;
6543 }
6544
6545 pci_set_master(pdev);
6546
6547 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6548 if (!err)
6549 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6550 if (err) {
6551 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6552 if (!err)
6553 err =
6554 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6555
6556 if (err) {
6557 IL_WARN("No suitable DMA available.\n");
6558 goto out_pci_disable_device;
6559 }
6560 }
6561
6562 err = pci_request_regions(pdev, DRV_NAME);
6563 if (err)
6564 goto out_pci_disable_device;
6565
6566 pci_set_drvdata(pdev, il);
6567
6568
6569
6570
6571 il->hw_base = pci_ioremap_bar(pdev, 0);
6572 if (!il->hw_base) {
6573 err = -ENODEV;
6574 goto out_pci_release_regions;
6575 }
6576
6577 D_INFO("pci_resource_len = 0x%08llx\n",
6578 (unsigned long long)pci_resource_len(pdev, 0));
6579 D_INFO("pci_resource_base = %p\n", il->hw_base);
6580
6581
6582
6583
6584 spin_lock_init(&il->reg_lock);
6585 spin_lock_init(&il->lock);
6586
6587
6588
6589
6590
6591
6592 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6593
6594 il4965_hw_detect(il);
6595 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6596
6597
6598
6599 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6600
6601 il4965_prepare_card_hw(il);
6602 if (!il->hw_ready) {
6603 IL_WARN("Failed, HW not ready\n");
6604 err = -EIO;
6605 goto out_iounmap;
6606 }
6607
6608
6609
6610
6611
6612 err = il_eeprom_init(il);
6613 if (err) {
6614 IL_ERR("Unable to init EEPROM\n");
6615 goto out_iounmap;
6616 }
6617 err = il4965_eeprom_check_version(il);
6618 if (err)
6619 goto out_free_eeprom;
6620
6621
6622 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6623 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6624 il->hw->wiphy->addresses = il->addresses;
6625 il->hw->wiphy->n_addresses = 1;
6626
6627
6628
6629
6630 il4965_set_hw_params(il);
6631
6632
6633
6634
6635
6636 err = il4965_init_drv(il);
6637 if (err)
6638 goto out_free_eeprom;
6639
6640
6641
6642
6643
6644 spin_lock_irqsave(&il->lock, flags);
6645 il_disable_interrupts(il);
6646 spin_unlock_irqrestore(&il->lock, flags);
6647
6648 pci_enable_msi(il->pci_dev);
6649
6650 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6651 if (err) {
6652 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6653 goto out_disable_msi;
6654 }
6655
6656 il4965_setup_deferred_work(il);
6657 il4965_setup_handlers(il);
6658
6659
6660
6661
6662
6663
6664 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6665 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6666 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6667 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6668 }
6669
6670 il_enable_rfkill_int(il);
6671
6672
6673 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6674 clear_bit(S_RFKILL, &il->status);
6675 else
6676 set_bit(S_RFKILL, &il->status);
6677
6678 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6679 test_bit(S_RFKILL, &il->status));
6680
6681 il_power_initialize(il);
6682
6683 init_completion(&il->_4965.firmware_loading_complete);
6684
6685 err = il4965_request_firmware(il, true);
6686 if (err)
6687 goto out_destroy_workqueue;
6688
6689 return 0;
6690
6691out_destroy_workqueue:
6692 destroy_workqueue(il->workqueue);
6693 il->workqueue = NULL;
6694 free_irq(il->pci_dev->irq, il);
6695out_disable_msi:
6696 pci_disable_msi(il->pci_dev);
6697 il4965_uninit_drv(il);
6698out_free_eeprom:
6699 il_eeprom_free(il);
6700out_iounmap:
6701 iounmap(il->hw_base);
6702out_pci_release_regions:
6703 pci_set_drvdata(pdev, NULL);
6704 pci_release_regions(pdev);
6705out_pci_disable_device:
6706 pci_disable_device(pdev);
6707out_ieee80211_free_hw:
6708 ieee80211_free_hw(il->hw);
6709out:
6710 return err;
6711}
6712
6713static void
6714il4965_pci_remove(struct pci_dev *pdev)
6715{
6716 struct il_priv *il = pci_get_drvdata(pdev);
6717 unsigned long flags;
6718
6719 if (!il)
6720 return;
6721
6722 wait_for_completion(&il->_4965.firmware_loading_complete);
6723
6724 D_INFO("*** UNLOAD DRIVER ***\n");
6725
6726 il_dbgfs_unregister(il);
6727 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6728
6729
6730
6731
6732
6733 set_bit(S_EXIT_PENDING, &il->status);
6734
6735 il_leds_exit(il);
6736
6737 if (il->mac80211_registered) {
6738 ieee80211_unregister_hw(il->hw);
6739 il->mac80211_registered = 0;
6740 } else {
6741 il4965_down(il);
6742 }
6743
6744
6745
6746
6747
6748
6749
6750
6751 il_apm_stop(il);
6752
6753
6754
6755
6756 spin_lock_irqsave(&il->lock, flags);
6757 il_disable_interrupts(il);
6758 spin_unlock_irqrestore(&il->lock, flags);
6759
6760 il4965_synchronize_irq(il);
6761
6762 il4965_dealloc_ucode_pci(il);
6763
6764 if (il->rxq.bd)
6765 il4965_rx_queue_free(il, &il->rxq);
6766 il4965_hw_txq_ctx_free(il);
6767
6768 il_eeprom_free(il);
6769
6770
6771 flush_workqueue(il->workqueue);
6772
6773
6774
6775
6776 destroy_workqueue(il->workqueue);
6777 il->workqueue = NULL;
6778
6779 free_irq(il->pci_dev->irq, il);
6780 pci_disable_msi(il->pci_dev);
6781 iounmap(il->hw_base);
6782 pci_release_regions(pdev);
6783 pci_disable_device(pdev);
6784 pci_set_drvdata(pdev, NULL);
6785
6786 il4965_uninit_drv(il);
6787
6788 dev_kfree_skb(il->beacon_skb);
6789
6790 ieee80211_free_hw(il->hw);
6791}
6792
6793
6794
6795
6796
6797void
6798il4965_txq_set_sched(struct il_priv *il, u32 mask)
6799{
6800 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6801}
6802
6803
6804
6805
6806
6807
6808
6809
6810static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6811 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6812 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6813 {0}
6814};
6815MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6816
6817static struct pci_driver il4965_driver = {
6818 .name = DRV_NAME,
6819 .id_table = il4965_hw_card_ids,
6820 .probe = il4965_pci_probe,
6821 .remove = il4965_pci_remove,
6822 .driver.pm = IL_LEGACY_PM_OPS,
6823};
6824
6825static int __init
6826il4965_init(void)
6827{
6828
6829 int ret;
6830 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6831 pr_info(DRV_COPYRIGHT "\n");
6832
6833 ret = il4965_rate_control_register();
6834 if (ret) {
6835 pr_err("Unable to register rate control algorithm: %d\n", ret);
6836 return ret;
6837 }
6838
6839 ret = pci_register_driver(&il4965_driver);
6840 if (ret) {
6841 pr_err("Unable to initialize PCI module\n");
6842 goto error_register;
6843 }
6844
6845 return ret;
6846
6847error_register:
6848 il4965_rate_control_unregister();
6849 return ret;
6850}
6851
6852static void __exit
6853il4965_exit(void)
6854{
6855 pci_unregister_driver(&il4965_driver);
6856 il4965_rate_control_unregister();
6857}
6858
6859module_exit(il4965_exit);
6860module_init(il4965_init);
6861
6862#ifdef CONFIG_IWLEGACY_DEBUG
6863module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6864MODULE_PARM_DESC(debug, "debug output mask");
6865#endif
6866
6867module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6868MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6869module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6870MODULE_PARM_DESC(queues_num, "number of hw queues.");
6871module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6872MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6873module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6874 S_IRUGO);
6875MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6876module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6877MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
6878